query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Return a data point and its metadata information.
def __getitem__(self, index): A_path = self.A_paths[index] A_img = np.array(Image.open(A_path).convert('RGB')) A_img = self.stack(A_img) A_img = resize(A_img,(256, 256)) A_img = np.einsum('abc->cab', A_img) # A_img = hsi_loader(A_path) # print(np.max(A_img)) A = hsi_normalize(A_img, max_=1) #A = self.transform(A_img) return {'A': A, 'A_paths': A_path}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def point(self):\n return self.x, self.y, self.z", "def to_data(self):\n point = {\n 'point': [self.pt[0], self.pt[1], self.pt[2]],\n 'layer_height': self.layer_height,\n\n 'mesh_normal': self.mesh_normal.to_data(),\n 'up_vector': self.up_vector.to_data(),\n 'frame': self.frame.to_data(),\n\n 'extruder_toggle': self.extruder_toggle,\n 'velocity': self.velocity,\n 'wait_time': self.wait_time,\n 'blend_radius': self.blend_radius,\n\n 'closest_support_pt': self.closest_support_pt.to_data() if self.closest_support_pt else None,\n 'distance_to_support': self.distance_to_support,\n\n 'is_feasible': self.is_feasible,\n\n 'attributes': utils.get_jsonable_attributes(self.attributes)\n }\n return point", "def point(self):\n return collections.namedtuple(self.name, [self.x, self.y, self.units])", "def get_point(self):\n return self._x, self._y", "def get_point(self):\n return self.point", "def get_data_point(self, n, columnname):\n c_index = self._get_header_index(columnname)\n \n return self.data[n][c_index]", "def getPoint(self):\n return self.point", "def getPoint(self):\n return self.point", "def point(self):\n return shapely.geometry.Point(self._x[0], self._x[1])", "def getPTData(*args):\n return args[0].Data.PTData.pt_data", "def _point(self):\n raise NotImplementedError", "def getPoint(self):\n return self._point", "def soleDataPoint(self):\n dps = self.datapoints()\n if dps:\n return dps[0]", "def _datapoint(self,\n datapoint_param: DataPointParam) \\\n -> Optional[List[DataPointResult]]:\n logger.debug('getting datapoint {}'.format(datapoint_param))\n # block until token refreshed. Make sure it is a valid token\n with self.data.tokenManager.valid_token_ctx() as token:\n res = jGetter.get_data_points(self.auth, token,\n datapoint_param)\n return res", "def get_draw_data(self):\n x, y = self._coordinates\n return x, y, self.__heading", "def dehydrate_point(value):\n dim = len(value)\n if dim == 2:\n return Structure(ord(b\"X\"), value.srid, *value)\n elif dim == 3:\n return Structure(ord(b\"Y\"), value.srid, *value)\n else:\n raise ValueError(\"Cannot dehydrate Point with %d dimensions\" % dim)", "def get_data_on_points(self, varname, record, points):\n res = float('nan')*np.ones((len(points)), dtype=np.float64)\n if len(np.shape(np.array(points))) != 2:\n raise TelemacException('Warning problem with the list of '\\\n 'extraction points')\n # dimension of the computation result\n dim = np.shape(np.array(points))[1]\n if dim == 2:\n res = self._get_data_on_2d_points(varname, record, points)\n elif dim == 3:\n res = self._get_data_on_3d_points(varname, record, points)\n else:\n raise TelemacException('Warning problem with the dimension of '\\\n 'extraction points')\n return res", "def get_point_data(self,data,special_idx):\n if len(special_idx) == 0:\n return {'ra':np.nan,\n 'dec':np.nan,\n 'az':np.nan,\n 'el':np.nan}\n\n top = special_idx[0]\n ra_point, dec_point = Coordinates.h2e_full(data['level1/spectrometer/pixel_pointing/pixel_az'][0,:top],\n data['level1/spectrometer/pixel_pointing/pixel_el'][0,:top],\n data['level1/spectrometer/MJD'][:top],\n Coordinates.comap_longitude,\n Coordinates.comap_latitude)\n point_data = {'ra':np.nanmean(ra_point),\n 'dec':np.nanmean(dec_point),\n 'az':np.nanmean(data['level1/spectrometer/pixel_pointing/pixel_az'][0,:top]),\n 'el':np.nanmean(data['level1/spectrometer/pixel_pointing/pixel_el'][0,:top])}\n return point_data", "def point(self) -> Point:\n return self._point", "def get_data_coordinates(self, x, y):\n return x/self.sx - self.tx, y/self.sy - self.ty", "def getPointValues(a, Y, x):\n raise NotImplementedError('getPoint not implemented')", "def spatialData(self):\n return self.__spatial_data", "def _so(self):\n example_point = self.points[0]\n d = OrderedDict()\n d['format'] = _TPC_FORMAT\n d.update(self.metadata)\n d['dimension'] = len(example_point.v)\n d['attributes'] = sorted(example_point.a.keys())\n\n ser = lambda p:point_so(d['attributes'],p)\n d['points'] = [ser(p) for p in self.points]\n return d", "def get_dataset(self):\n return self._X, self._y", "def getPointValues(self, *args, **kwargs):\n ...", "def point(self):\n return Point(self._x, self._y)", "def data_xy(position) -> dict:\n\n return {\"x\": position[0], \"y\": position[1]}", "def get_points(self):\r\n return self.points", "def getDataCoordinates(self):\n coord = np.zeros((self.dataset.shape[0], 2))\n for i in range(len(self.dataset)):\n coord[i, 0] = self.dataset[i][0]\n coord[i, 1] = self.dataset[i][1]\n return coord", "def getData(self):\n # organize the points into segments\n # 1. make sure there is an on curve\n haveOnCurve = False\n for point in self._points:\n if point.segmentType is not None:\n haveOnCurve = True\n break\n # 2. move the off curves to front of the list\n if haveOnCurve:\n _prepPointsForSegments(self._points)\n # 3. ignore double points on start and end\n firstPoint = self._points[0]\n lastPoint = self._points[-1]\n if firstPoint.segmentType is not None and lastPoint.segmentType is not None:\n if firstPoint.coordinates == lastPoint.coordinates:\n if (firstPoint.segmentType in [\"line\", \"move\"]):\n del self._points[0]\n else:\n raise AssertionError(\"Unhandled point type sequence\")\n # done\n return self._points", "def get_data(self, line_id):\n # check\n if line_id not in self._lineDict:\n raise KeyError('Line ID %s does not exist.' % str(line_id))\n\n # get line\n line = self._lineDict[line_id]\n if line is None:\n raise RuntimeError('Line ID %s has been removed.' % line_id)\n\n return line.get_xdata(), line.get_ydata()", "def get_point_info(self,id):\n id = str(id)\n if not self.cacheRedis.exists(\"point_info:id:\" + id):\n point_info = self.db.get(\"SELECT * FROM fs_point WHERE id=%s and status=0\",id)\n self.cacheRedis.hmset('point_info:id:' + id,point_info)\n point_info = self.cacheRedis.hgetall('point_info:id:' + id)\n return point_info", "def __getitem__(self, index):\n return self.points[index]", "def getPoint(self):\n return Point(*self.position)", "def get_points(self):\n\t\treturn self.points", "def get_coords(data, id):\n return data[id]['lat'], data[id]['lon']", "def get_pex_info(entry_point):\r\n from . import pex_info\r\n\r\n pex_info_content = read_pex_info_content(entry_point)\r\n if pex_info_content:\r\n return pex_info.PexInfo.from_json(pex_info_content)\r\n raise ValueError('Invalid entry_point: %s' % entry_point)", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def create_data_points(self, time_series, metric_descriptor):\n data_points = []\n for point in time_series.points:\n # TODO: Possibly encode namespace in name\n data_point = DataPoint(ns=metric_descriptor.name,\n name=metric_descriptor.name,\n value=point.value.value)\n data_points.append(data_point)\n return data_points", "def preprocess_point(point):\n # TODO: Replace IDs with actual information available at the\n # reference schema\n \n return {\n 'uuid': point['UUID'],\n 'operator_id': point['OperatorID'],\n 'usage_type_id': point['UsageTypeID'],\n 'country': point['AddressInfo']['Country']['Title'],\n 'address': ', '.join(filter(lambda item: item is not None, [\n point['AddressInfo']['Title'],\n point['AddressInfo']['AddressLine1'],\n point['AddressInfo']['AddressLine2'],\n point['AddressInfo']['Town'],\n ])),\n 'latitude': point['AddressInfo']['Latitude'],\n 'longitude': point['AddressInfo']['Longitude'],\n 'num_points': point['NumberOfPoints'],\n **aggregate_points(point),\n }", "def data(self):\n return self.d", "def _get_data(self, event):\n if event.xdata is None:\n return None, None\n xdata = np.clip(event.xdata, *self.ax.get_xbound())\n ydata = np.clip(event.ydata, *self.ax.get_ybound())\n return xdata, ydata", "def get_point(self, params):\n spm = self.spm\n cpoint = self.current_point\n x = params.get('X',cpoint[0])\n y = params.get('Y',cpoint[1])\n z = params.get('Z',cpoint[2])\n a = cpoint[3]\n b = cpoint[4]\n if 'E' in params:\n a = params.get('E',cpoint[3])\n elif 'A' in params:\n a = params.get('A',cpoint[3])\n elif 'B' in params:\n b = params.get('B',cpoint[4])\n point = [x,y,z,a,b]\n return point", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def get(self):\n return self.x, self.y", "def get_spatial_info(cube, add_cyclic=False):\n\n if add_cyclic:\n data, x = add_cyclic_point(cube.data, coord=cube.coord('longitude').points)\n else:\n data = cube.data\n x = cube.coord('longitude').points\n\n y = cube.coord('latitude').points\n inproj = input_projections[cube.attributes['input_projection']]\n\n return data, x, y, inproj", "def getData(self):\r\n return personData(\r\n self.title.getVal(),\r\n self.first.getVal(),\r\n self.middle.getVal(),\r\n self.last.getVal(),\r\n self.suffix.getVal(),\r\n self.phone.getVal(),\r\n self.ext.getVal(),\r\n self.email.getVal(),\r\n self.affiliation.getVal())", "def point_coords(geom):\n # Return a tuple with the x/y point coordinate for a GeoDataFrame geometry\n return list(geom.coords)[0] # Just get first tuple in list, since it's a point", "def GetPoints(self):\n if not self.VTKObject.GetPoints():\n return None\n array = vtkDataArrayToVTKArray(\n self.VTKObject.GetPoints().GetData(), self)\n array.Association = ArrayAssociation.POINT\n return array", "def _extract_data_points_from_series(series: dict) -> List[dict]:\n data_points = series[\"generic:Obs\"]\n if type(data_points) != list:\n data_points = [data_points]\n return data_points", "def get_fields_point(self):\n self.set_definition(sps21point)\n return self.get_fields()", "def get_points(self):\n\t\treturn self._points", "def __get_data_at_points__(self, points):\n file_data = self.reader.GetOutput()\n probe = vtk.vtkProbeFilter()\n probe.SetInputConnection(points.GetOutputPort())\n probe.SetSourceData(file_data)\n probe.Update()\n\n b = vnp.vtk_to_numpy(probe.GetOutput().GetPointData().GetArray(self.vector))\n return b", "def addDataPoints(self):\n pass", "def getData(self, slice=None):\n\t\traise NotImplementedError", "def points(self):\r\n return self._structure.points", "def _check_data_point(cube, metadata):\n point_index = []\n\n for dim_length in cube.shape:\n point_index.append(int(random.random() * dim_length))\n\n point_index = tuple(point_index)\n\n try:\n point_cube = cube[point_index]\n _data_point = point_cube.data\n except Exception:\n msg = 'Unable to extract data point {} from file: {}'.format(\n point_index, metadata['basename'])\n raise FileValidationError(msg)\n else:\n return True", "def extract_data(data_file):\n\n data_points = set()\n points_to_coordinates = {}\n x_coor_dict = {}\n # need this since we need to iterate through the first point indices in a fixed order\n first_point_indices = []\n\n with open(data_file) as input_file:\n # consume the first line, it only tells us how many points there are\n input_file.readline()\n point_number = 0\n current_x_coor = inf\n # point that's used as the indexing point for a list of points with the same x coordinates\n first_point_index = 1\n\n for line in input_file:\n point_number += 1\n line = line.strip('\\n')\n split_line = line.split(\" \")\n print(split_line)\n x_coor = float(split_line[1])\n # if we're seeing this x coordinate for the first time\n if current_x_coor != x_coor:\n first_point_index = point_number\n first_point_indices.append(first_point_index)\n x_coor_dict[first_point_index] = []\n current_x_coor = x_coor\n x_coor_dict[first_point_index].append(point_number)\n new_point = Point(x_coor, float(split_line[2]))\n points_to_coordinates[point_number] = new_point\n data_points.add(point_number)\n\n return data_points, points_to_coordinates, x_coor_dict, first_point_indices", "def GetPoint1(self):\n ...", "def GetPoint1(self):\n ...", "def GetStructuredData(self): # real signature unknown; restored from __doc__\n pass", "def getDataPoint(self, inputFile, pointDateTime, quiet=False):\n \n try:\n with open(inputFile, 'r') as f:\n # Get the header\n header = \"\"\n data = \"\"\n for x in range(4):\n header += f.readline()\n \n flag = False\n \n while not(flag):\n \n try:\n line = f.readline()\n \n except EOFError:\n flag = True\n found = False\n \n if line != '':\n words = line.split(',')\n \n date_time = words[0]\n sample_number = words[1]\n e_field = words[3]\n \n if pointDateTime in date_time:\n flag = True\n found = True \n else:\n flag = True\n found = False \n \n if found:\n if not(quiet):\n print(\" Found %s on %s\" %\n (pointDateTime, inputFile))\n \n data = line\n \n else:\n if not(quiet):\n print(\" Did not find %s on %s\" % (pointDateTime,\n inputFile))\n\n return header, data\n \n except IOError as e:\n print(str(e))", "def build_data(self, points):\n return NotImplemented", "def get_data(name, xarray=False, metadata=False):\n\n if name not in pytplot.data_quants.keys():\n print(\"That name is currently not in pytplot\")\n return\n \n temp_data_quant = pytplot.data_quants[name]\n\n if isinstance(temp_data_quant, dict):\n # non-record varying variables are stored as dicts\n return temp_data_quant['data']\n\n if xarray:\n return temp_data_quant\n\n if metadata:\n return temp_data_quant.attrs\n\n if 'v1' in temp_data_quant.coords.keys() and 'v2' in temp_data_quant.coords.keys() and 'v3' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v1', 'v2', 'v3'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['v1'].values, temp_data_quant.coords['v2'].values, temp_data_quant.coords['v3'].values)\n elif 'v1' in temp_data_quant.coords.keys() and 'v2' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v1', 'v2'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['v1'].values, temp_data_quant.coords['v2'].values)\n elif 'v1' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v1'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['v1'].values)\n elif 'v' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['v'].values)\n elif 'spec_bins' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['spec_bins'].values)\n variable = namedtuple('variable', ['times', 'y'])\n\n return variable(temp_data_quant.time.values, temp_data_quant.data)", "def get_points(self):\n return self._points", "def get_points(self):\n return self._points", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def get_metadata(self, chunk, coords):\n\n return chunk.get_metadata(coords)", "def get_values(self):\n return (self.x,self.y)", "def point(self) -> Point3D:\n return self._point.copy()", "def process_point(meta, data):\n point = dfparser.Point()\n point.ParseFromString(data)\n\n sample_freq = meta['params']['sample_freq']\n threshold = meta['process_params']['threshold']\n\n events_all = []\n for channel in point.channels:\n for i, block in enumerate(channel.blocks):\n SOCKETIO.emit('progress',\n {'val': int((i/len(channel.blocks))*100)})\n eventlet.sleep(0)\n events = []\n for event in block.events:\n data = np.frombuffer(event.data, np.int16)\n events.append(extract_amps_approx2(data, event.time,\n threshold,\n sample_freq)[0])\n events = np.hstack(events)[0::2]\n events_all.append(events)\n\n events_all = np.hstack(events_all)\n return events_all", "def data(self):\r\n if self.metadata is None:\r\n self.refresh()\r\n return self.metadata", "def get_point(self, topic, **kwargs):\n topic = topic.strip('/')\n _log.debug('handle_get: {topic}'.format(topic=topic))\n path, point_name = topic.rsplit('/', 1)\n return self.vip.rpc.call(self.driver_vip_identity, 'get_point', path,\n point_name, **kwargs).get()", "def __getitem__(self,point):\n point=point.normalize(self.size)\n return self.terrain[point.y][point.x]", "def __getitem__(self, index):\n return self._timeseriesData[index]", "def getFirstData(self) -> ghidra.program.model.listing.Data:\n ...", "def get_data_float(self):\n if self.__data is None:\n return None\n return self.__data.copy()", "def _convert_point(cls, feature):\n lon, lat = feature['geometry']['coordinates']\n popup = feature['properties'].get('name', '')\n return cls(lat, lon)", "def data(self) -> datetime:\n return self._data", "def get_point(self, uuid):\n\n try:\n return Point.from_point(self._points[uuid])\n except KeyError:\n error_str = \"Trying to get an non-existing point with uuid: {}\"\n raise ValueError(error_str.format(uuid))", "def __getitem__(self, key):\n return self.points.__getitem__(key)", "def data(self):\n return getXarray(self.__mdsnode__,strict=self.__strict__)", "def __get_data(self, universe:list, factor_pool:list, ret_label:str, start_date, end_date)->tuple:\n X = None\n y = None\n return (X, y)", "def _get_data(self):\n raise NotImplementedError()", "def _basic_data_info(X, y):\n num_samples, num_feats = X.shape # start with X properties\n\n # Compute distribution\n classes, counts, percs = _class_distribution(y)\n num_classes = classes.size\n\n # Return data info dictionary\n output_dic = {\n \"Num_samples\": num_samples,\n \"Num_feats\": num_feats,\n \"Num_classes\": num_classes,\n \"classes\": classes,\n \"counts\": counts,\n \"percs\": percs\n }\n\n return output_dic", "def points(self) -> record.PackedPointRecord:\n return self._points", "def __get_points_object__(xyz):\n # TODO: Need to fix this to handle all points, not just the first\n source = vtk.vtkPointSource()\n source.SetCenter(xyz[0])\n source.SetRadius(0)\n source.SetNumberOfPoints(1)\n source.Update()\n return source", "def _create_data_point(\n self,\n run_name: str,\n event: tf.compat.v1.Event,\n value: tf.compat.v1.Summary.Value,\n metadata: tf.compat.v1.SummaryMetadata,\n ) -> tensorboard_data.TimeSeriesDataPoint:\n pass", "def get_metadata(self, variable):\n return self.dataset[variable]", "def getPoint(self, x, y):\n return self._c[x*self.__height + y]", "def _point(self):\n return list(map(repr,self.point))", "def getP1(self):\n return self.points[0]", "def tValueForPoint(self, point):\n if self.segmentType == \"curve\":\n on1 = self.previousOnCurve\n off1 = self.points[0].coordinates\n off2 = self.points[1].coordinates\n on2 = self.points[2].coordinates\n return _tValueForPointOnCubicCurve(point, (on1, off1, off2, on2))\n elif self.segmentType == \"line\":\n return _tValueForPointOnLine(point, (self.previousOnCurve, self.points[0].coordinates))\n elif self.segmentType == \"qcurve\":\n raise NotImplementedError\n else:\n raise NotImplementedError", "def get_value(self, point: Point) -> FieldState:\n return self.arr[point.y][point.x]", "def __getitem__(self, index):\n\n if self._data_indices is not None:\n index = self._data_indices[index]\n data = self._dataset[index]\n return data", "def coordinates(self):\n if hasattr(self, '_coordinates'):\n return self._coordinates\n else:\n return self._points", "def getFeatureData(self, feature):\n return self.data[:,self._getFIdx(feature)]", "def data_setup_datapoints():\n datapoints_list = []\n datapoints_list.append(helper_datapoints(\"datapoints1\", [(10, 0), (5, 1), (15, 2), (5, 3)]).get_id())\n datapoints_list.append(helper_datapoints(\"datapoints2\", [(1, 0), (2, 0.5), (3, 1.0), (4, 1.5)]).get_id())\n return datapoints_list", "def get_data(self):\n return DataGatherer().get_temperature_data()", "def __getitem__(self, i):\n return self.__points[i]", "def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:\n return self.data[item]" ]
[ "0.66095746", "0.6598914", "0.65769345", "0.6538956", "0.65237606", "0.6339937", "0.62947094", "0.62947094", "0.61600286", "0.61509", "0.6141212", "0.6137378", "0.6118088", "0.6068833", "0.605036", "0.60272974", "0.59920406", "0.5991923", "0.59914654", "0.59783316", "0.59660155", "0.5946634", "0.5937396", "0.59302616", "0.5918874", "0.5914381", "0.58878475", "0.5880487", "0.5861513", "0.58490175", "0.5836979", "0.5815245", "0.5775202", "0.5762196", "0.57542616", "0.57334495", "0.5712834", "0.57019645", "0.5700838", "0.56934017", "0.5676873", "0.5674455", "0.56729734", "0.56666887", "0.56472087", "0.5644312", "0.5627764", "0.5622767", "0.56183714", "0.56183344", "0.5614866", "0.56141835", "0.56051517", "0.5604452", "0.5581556", "0.555865", "0.555783", "0.55507785", "0.55454725", "0.55454725", "0.5544659", "0.55438304", "0.5526698", "0.55167353", "0.5515631", "0.5515631", "0.55029607", "0.54963905", "0.5491004", "0.5486112", "0.54833496", "0.5475698", "0.546093", "0.54538894", "0.5445363", "0.54394263", "0.54392457", "0.5432977", "0.54300696", "0.54248816", "0.5423356", "0.5412996", "0.54125124", "0.54098713", "0.54011893", "0.53952885", "0.53946024", "0.5393585", "0.53930205", "0.5389761", "0.5388913", "0.53847134", "0.53825665", "0.53734946", "0.537188", "0.53682494", "0.53617024", "0.535967", "0.53402865", "0.5339358", "0.53288525" ]
0.0
-1
Return the total number of images in the dataset.
def __len__(self): return len(self.A_paths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_of_images(self):", "def getNumberOfImages(self):\n\t\treturn self.numberOfImages", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def num_of_images(self):\n return len(self.data['image_infos'])", "def get_image_count(self):\n return self._num_images", "def count(self):\n \n return len(self.img_lst)", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def count_images(self):\n\t\treturn self.session.query(Image.id).count()", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def __len__(self):\n return self.num_images", "def numberOfImages(self):\n return len(self.imageList)", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def size(self):\n\t\t\treturn len(self.image_ids)", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def get_n_images(self) -> int:\n try:\n return self.header[\"NumberOfImagesInMosaic\"]\n except KeyError:\n raise KeyError(messages.MISSING_NUMBER_OF_IMAGES)", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def _get_num_objects_per_step(self, worker_id=0):\n data_layer = self.get_data_layer(worker_id)\n num_images = tf.shape(data_layer.input_tensors['source_tensors'][0])[0]\n return num_images", "def count(self):\r\n return self.data_array.size", "def count(self):\n return self.data_container.count", "def __len__(self):\n return len(self.db.list_nodes('/images'))", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def __len__(self):\n\n return len(self.images)", "def __len__(self):\n return len(self.images)", "def __len__(self):\n return len(self.images)", "def __len__(self):\n return self.images.size(0)", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def get_num_eval_images(hparams):\n num_images_map = {\n 'imagenet': 50000,\n 'cifar10': 10000,\n }\n if hparams.input_data.input_fn not in num_images_map:\n raise ValueError(\n f'Unknown dataset size for input_fn {hparams.input_data.input_fn}')\n\n return num_images_map[hparams.input_data.input_fn]", "async def totalImages(self, tags):\n with async_timeout.timeout(10):\n url = self.urlGen(tags=tags, PID=0)\n async with self.session.get(url=url) as XMLData:\n XMLData = await XMLData.read()\n XMLData = ET.XML(XMLData)\n XML = self.ParseXML(XMLData)\n return int(XML['posts']['@count'])\n return None", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def data_count(self):\n return(len(self.data))", "def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count", "def dataset_size(self):\n return self.dataset.size", "def size(self):\r\n return len(self._train_datas)", "def GetNumberOfMemoryImages(self):\r\n number = INT()\r\n r = CALL('GetNumberOfMemoryImages',self,INT(self.seq),byref(number))\r\n return self.CheckForSuccessError(r)", "def get_num_train_images(hparams):\n num_images_map = {\n 'imagenet': 1281167,\n 'cifar10': 50000,\n }\n if hparams.input_data.input_fn not in num_images_map:\n raise ValueError(\n f'Unknown dataset size for input_fn {hparams.input_data.input_fn}')\n\n num_images = num_images_map[hparams.input_data.input_fn]\n\n if hparams.input_data.max_samples > 0:\n return min(num_images, hparams.input_data.max_samples)\n return num_images", "def numPixels(self):\n self._logger.debug(\"numPixels\")\n return self.count", "def get_label_counts(dataset_path: str):\n if not dataset_path:\n return None\n td = ImageFolder(root=dataset_path)\n # get label distribution\n label_counts = [0] * len(td.classes)\n for p, l in td.samples:\n label_counts[l] += 1\n return label_counts", "def dataset_size(self):\n if not self._dataset_size:\n # pylint: disable=attribute-defined-outside-init\n self._dataset_size = count_file_lines(\n self._hparams.source_dataset.files)\n return self._dataset_size", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def nbytes(self) -> int:\n\n return self.data.nbytes + self.shape.nbytes", "def __len__(self):\n return len(self.rimgdataset)", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def gen_img_counts(img_path, model):\n\n img = transform(Image.open(img_path).convert('RGB'))\n print(type(img))\n output = model(img.unsqueeze(0))\n pred_count = int(output.detach().cpu().sum().numpy())\n return pred_count", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def __len__(self):\n return len(self.imgs_path)", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def get_number_files(dataset):\n HOME = os.environ['HOME']\n # cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json',\n # '--key=%s/.globus/userkey.pem' % HOME, '--cert=%s/.globus/usercert.pem' % HOME]\n cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json']\n output = subprocess.check_output(cmds, stderr=subprocess.STDOUT)\n summary_dict = json.loads(output)\n return int(summary_dict['data'][0]['summary'][0]['nfiles'])", "def getNumberPhoto(guide):\n return len(guide.photos.all())", "def nb_im(self, code):\n return len(os.listdir(self._im_dir[code]))", "def data_size(self) -> int:\n return len(self.__labels)", "def __len__(self):\n gen_len = len(self.image_ids) // self.batch_size\n if len(self.image_ids) % self.batch_size != 0:\n gen_len += 1\n return gen_len", "def size(img):\n\treturn img.size", "def size(self) -> Tuple[groupable, pdarray]:\n return self.count()", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def nbytes(self):\n dtype = self.config[\"dtype\"]\n if dtype is None:\n return None\n\n size = reduce(mul, self.shape, 1)\n nbytes = size * dtype.itemsize\n\n if getattr(self, \"masked\", True):\n nbytes += size\n\n return nbytes", "def get_num_datasets(self, data):\n dsets = set()\n for items in data:\n dsetid = items[3]\n dsets.add(dsetid)\n return len(dsets)", "def size(self, index):\n return self.base_dataset.size(index)", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def number_of_photos(self):\n return Submission.objects.filter(theme__contest=self).count()", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def _get_dataset_size(loader):\n if isinstance(loader, (tuple, list)):\n return len(loader[0].dataset)\n else:\n return len(loader.dataset)", "def __len__(self):\n return len(self.image_file_names)", "def getNbRows(self):\n return self.data.shape[1]", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def length(self, data: Sequence[Sequence[torch.Tensor]]) -> int:\n return self.n_batch", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def count_images_with_tags(self, tags):\n\t\tif not tags: \n\t\t\treturn self.session.query(Image.id).count()\n\t\t\n\t\treturn self.build_select_images_with_tags_query(tags).count()", "def get_data_ninstances(self):\n return self.data_ninstances", "def count(self):\n return len(self.read_ints())", "def count(self):\n return self.size()", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetNumberOfObjects(self)", "def dimension_count(self):\n return self._dimensionCount", "def width(self) -> int:\n return self._image_data.width", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_GetNumberOfObjects(self)", "def getSampleCount(self):\r\n return len(self._data)", "def get_nbytes(dset):\n if 'nbytes' in dset.attrs:\n # look if the dataset has an attribute nbytes\n return dset.attrs['nbytes']\n elif hasattr(dset, 'value'):\n # else extract nbytes from the underlying array\n return dset.size * numpy.zeros(1, dset.dtype).nbytes", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3ISS3_GetNumberOfObjects(self)", "def GetNumberOfObjects(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUS3_GetNumberOfObjects(self)", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def __len__(self):\n return len(self.image_paths)", "def getNeuronCount(self):\n\t\treturn self.loader.getNeuronCount()", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n if buf is not None:\n size += buf.size\n return size", "def size(self):\n\t\treturn self._count", "def n_total_files(self):\n return len(self.fileinfo)", "def __len__(self):\n return self.flat_image.size", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def __len__(self):\n return len(self.img_paths)", "def __len__(self):\n return len(self.image_names)", "def getNumDimensions(self):\n return len(self.di.keys())", "def __len__(self):\n return int(np.ceil(len(self.ids) / self.batch_size))" ]
[ "0.82737345", "0.81175524", "0.7999686", "0.79701054", "0.79361874", "0.7762907", "0.766983", "0.7524982", "0.7517643", "0.7517643", "0.7517643", "0.7517643", "0.7517643", "0.7477773", "0.7451018", "0.73769426", "0.73769027", "0.7314028", "0.71550864", "0.7140171", "0.71151656", "0.7070289", "0.70633394", "0.7057324", "0.7053774", "0.70323235", "0.700711", "0.700711", "0.6985165", "0.6979305", "0.69729316", "0.69230765", "0.686021", "0.6849251", "0.68485385", "0.6837464", "0.68371856", "0.68235505", "0.6819503", "0.6804754", "0.68046474", "0.68008184", "0.6787169", "0.67555326", "0.6737976", "0.6737976", "0.67371786", "0.6733668", "0.67325896", "0.6730809", "0.6709414", "0.6707386", "0.6705409", "0.6689267", "0.66579473", "0.6616515", "0.66126525", "0.6608104", "0.66043234", "0.66026974", "0.65950793", "0.6589355", "0.65838367", "0.6582899", "0.65745544", "0.65577555", "0.6554303", "0.6553004", "0.65431756", "0.6539042", "0.6523472", "0.65222603", "0.65159506", "0.65143925", "0.65111476", "0.6507529", "0.65072227", "0.6505137", "0.6496552", "0.6489943", "0.6489233", "0.64868313", "0.64844555", "0.648375", "0.6481952", "0.6461903", "0.6460552", "0.6458799", "0.6458192", "0.6458192", "0.6458192", "0.6457187", "0.6437831", "0.64375246", "0.6437025", "0.64340854", "0.6433578", "0.64331496", "0.64302456", "0.6412508", "0.64045554" ]
0.0
-1
Set up the SHC switch platform.
async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: entities: list[NumberEntity] = [] session: SHCSession = hass.data[DOMAIN][config_entry.entry_id][DATA_SESSION] for number in ( session.device_helper.thermostats + session.device_helper.roomthermostats ): entities.append( SHCNumber( device=number, parent_id=session.information.unique_id, entry_id=config_entry.entry_id, attr_name="Offset", ) ) if entities: async_add_entities(entities)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n add_devices_callback([\n HE853Switch('OviSwitch', STATE_ON),\n HE853Switch('AC', STATE_OFF)\n ])", "def setup_platform(hass, config, add_devices, discovery_info=None) -> None:\n friendly_name = config.get(CONF_FRIENDLY_NAME)\n mac_addr = config.get(CONF_MAC)\n add_devices([Switchmate(mac_addr, friendly_name)], True)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n pin = config.get(CONF_PIN)\n\n add_devices([ProgtimeSwitch(mac, pin, name)])", "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n host = config.get(CONF_HOST)\n name = config.get(CONF_NAME)\n token = config.get('token')\n\n add_devices_callback([MiroboSwitch(name, host, token)])", "def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n ip_addr = config.get(CONF_HOST)\n mac_addr = binascii.unhexlify(config.get(CONF_MAC).encode().replace(b':', b''))\n target_temp_default = config.get(CONF_TARGET_TEMP)\n target_temp_step = config.get(CONF_TARGET_TEMP_STEP)\n operation_list = DEFAULT_OPERATION_LIST\n \n import broadlink\n \n broadlink_device = broadlink.hysen((ip_addr, 80), mac_addr, None)\n broadlink_device.timeout = config.get(CONF_TIMEOUT)\n\n try:\n broadlink_device.auth()\n add_devices([\n BroadlinkHysenClimate(hass, name, broadlink_device, target_temp_default, target_temp_step, operation_list)\n ])\n except socket.timeout:\n _LOGGER.error(\"Failed to connect to Broadlink Hysen Device IP:%s\",ip_addr)", "def setup(self):\n self.log.debug('RFSwitch - in RFSwitch setup()')\n # Add resource setup code here\n print(\"Calling RFSwitch:setup\")", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()", "def setup_platform(hass, config, add_devices, discovery_info=None):\n devices = config.get(CONF_SWITCHES, {})\n cmdrgbwlight = []\n\n for object_id, device_config in devices.items():\n value_template = device_config.get(CONF_STATE_VALUE_TEMPLATE)\n\n if value_template is not None:\n value_template.hass = hass\n\n cmdrgbwlight.append(\n CommandSwitch(\n hass,\n object_id,\n device_config.get(CONF_NAME),\n device_config.get(CONF_COMMAND_ON),\n device_config.get(CONF_COMMAND_OFF),\n device_config.get(CONF_COMMAND_STATE),\n device.config.get(CONF_BRIGHTNESS_STATE),\n device.config.get(CONF_BRIGHTNESS_COMMAND),\n device.config.get(CONF_BRIGHTNESS_VALUE_TEMPLATE),\n device.config.get(CONF_RGB_STATE),\n device.config.get(CONF_RGB_COMMAND),\n device.config.get(CONF_RGB_VALUE_TEMPLATE),\n device.config.get(CONF_FRIENDLY_NAME, object_id),\n device.config.get(CONF_BRIGHTNESS_SCALE),\n value_template\n )\n )\n\n if not cmdrgbwlight:\n _LOGGER.error(\"No switches added\")\n return False\n\n add_devices(cmdrgbwlight)", "def setup(hass: HomeAssistant, base_config: ConfigType) -> bool: # noqa: C901\n\n hass.data[DOMAIN] = {}\n\n # Parse configuration into a dict of device name to physical address\n # represented as a list of four elements.\n device_aliases = {}\n devices = base_config[DOMAIN].get(CONF_DEVICES, {})\n _LOGGER.debug(\"Parsing config %s\", devices)\n device_aliases.update(parse_mapping(devices))\n _LOGGER.debug(\"Parsed devices: %s\", device_aliases)\n\n platform = base_config[DOMAIN].get(CONF_PLATFORM, SWITCH)\n\n loop = (\n # Create own thread if more than 1 CPU\n hass.loop\n if multiprocessing.cpu_count() < 2\n else None\n )\n host = base_config[DOMAIN].get(CONF_HOST)\n display_name = base_config[DOMAIN].get(CONF_DISPLAY_NAME, DEFAULT_DISPLAY_NAME)\n if host:\n adapter = TcpAdapter(host, name=display_name, activate_source=False)\n else:\n adapter = CecAdapter(name=display_name[:12], activate_source=False)\n hdmi_network = HDMINetwork(adapter, loop=loop)\n\n def _adapter_watchdog(now=None):\n _LOGGER.debug(\"Reached _adapter_watchdog\")\n event.call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n if not adapter.initialized:\n _LOGGER.info(\"Adapter not initialized; Trying to restart\")\n hass.bus.fire(EVENT_HDMI_CEC_UNAVAILABLE)\n adapter.init()\n\n _adapter_watchdog_job = HassJob(_adapter_watchdog, cancel_on_shutdown=True)\n\n @callback\n def _async_initialized_callback(*_: Any):\n \"\"\"Add watchdog on initialization.\"\"\"\n return event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n\n hdmi_network.set_initialized_callback(_async_initialized_callback)\n\n def _volume(call: ServiceCall) -> None:\n \"\"\"Increase/decrease volume and mute/unmute system.\"\"\"\n mute_key_mapping = {\n ATTR_TOGGLE: KEY_MUTE_TOGGLE,\n ATTR_ON: KEY_MUTE_ON,\n ATTR_OFF: KEY_MUTE_OFF,\n }\n for cmd, att in call.data.items():\n if cmd == CMD_UP:\n _process_volume(KEY_VOLUME_UP, att)\n elif cmd == CMD_DOWN:\n _process_volume(KEY_VOLUME_DOWN, att)\n elif cmd == CMD_MUTE:\n hdmi_network.send_command(\n KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)\n )\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n _LOGGER.info(\"Audio muted\")\n else:\n _LOGGER.warning(\"Unknown command %s\", cmd)\n\n def _process_volume(cmd, att):\n if isinstance(att, (str,)):\n att = att.strip()\n if att == CMD_PRESS:\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n elif att == CMD_RELEASE:\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n else:\n att = 1 if att == \"\" else int(att)\n for _ in range(0, att):\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n\n def _tx(call: ServiceCall) -> None:\n \"\"\"Send CEC command.\"\"\"\n data = call.data\n if ATTR_RAW in data:\n command = CecCommand(data[ATTR_RAW])\n else:\n src = data.get(ATTR_SRC, ADDR_UNREGISTERED)\n dst = data.get(ATTR_DST, ADDR_BROADCAST)\n if ATTR_CMD in data:\n cmd = data[ATTR_CMD]\n else:\n _LOGGER.error(\"Attribute 'cmd' is missing\")\n return\n if ATTR_ATT in data:\n if isinstance(data[ATTR_ATT], (list,)):\n att = data[ATTR_ATT]\n else:\n att = reduce(lambda x, y: f\"{x}:{y:x}\", data[ATTR_ATT])\n else:\n att = \"\"\n command = CecCommand(cmd, dst, src, att)\n hdmi_network.send_command(command)\n\n def _standby(call: ServiceCall) -> None:\n hdmi_network.standby()\n\n def _power_on(call: ServiceCall) -> None:\n hdmi_network.power_on()\n\n def _select_device(call: ServiceCall) -> None:\n \"\"\"Select the active device.\"\"\"\n if not (addr := call.data[ATTR_DEVICE]):\n _LOGGER.error(\"Device not found: %s\", call.data[ATTR_DEVICE])\n return\n if addr in device_aliases:\n addr = device_aliases[addr]\n else:\n entity = hass.states.get(addr)\n _LOGGER.debug(\"Selecting entity %s\", entity)\n if entity is not None:\n addr = entity.attributes[\"physical_address\"]\n _LOGGER.debug(\"Address acquired: %s\", addr)\n if addr is None:\n _LOGGER.error(\n \"Device %s has not physical address\", call.data[ATTR_DEVICE]\n )\n return\n if not isinstance(addr, (PhysicalAddress,)):\n addr = PhysicalAddress(addr)\n hdmi_network.active_source(addr)\n _LOGGER.info(\"Selected %s (%s)\", call.data[ATTR_DEVICE], addr)\n\n def _update(call: ServiceCall) -> None:\n \"\"\"Update if device update is needed.\n\n Called by service, requests CEC network to update data.\n \"\"\"\n hdmi_network.scan()\n\n def _new_device(device):\n \"\"\"Handle new devices which are detected by HDMI network.\"\"\"\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )\n\n def _shutdown(call):\n hdmi_network.stop()\n\n def _start_cec(callback_event):\n \"\"\"Register services and start HDMI network to watch for devices.\"\"\"\n hass.services.register(\n DOMAIN, SERVICE_SEND_COMMAND, _tx, SERVICE_SEND_COMMAND_SCHEMA\n )\n hass.services.register(\n DOMAIN, SERVICE_VOLUME, _volume, schema=SERVICE_VOLUME_SCHEMA\n )\n hass.services.register(\n DOMAIN,\n SERVICE_UPDATE_DEVICES,\n _update,\n schema=SERVICE_UPDATE_DEVICES_SCHEMA,\n )\n hass.services.register(DOMAIN, SERVICE_POWER_ON, _power_on)\n hass.services.register(DOMAIN, SERVICE_STANDBY, _standby)\n hass.services.register(DOMAIN, SERVICE_SELECT_DEVICE, _select_device)\n\n hdmi_network.set_new_device_callback(_new_device)\n hdmi_network.start()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_cec)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)\n return True", "def setup_platform(hass, config, add_devices, discovery_info=None):\n cl = hass.data.get(DATA_CIRCADIAN_LIGHTING)\n if cl:\n cs = CircadianSwitch(\n hass,\n cl,\n name=config.get(CONF_NAME),\n lights_ct=config.get(CONF_LIGHTS_CT, []),\n lights_rgb=config.get(CONF_LIGHTS_RGB, []),\n lights_xy=config.get(CONF_LIGHTS_XY, []),\n lights_brightness=config.get(CONF_LIGHTS_BRIGHT, []),\n disable_brightness_adjust=config.get(CONF_DISABLE_BRIGHTNESS_ADJUST),\n min_brightness=config.get(CONF_MIN_BRIGHT),\n max_brightness=config.get(CONF_MAX_BRIGHT),\n sleep_entity=config.get(CONF_SLEEP_ENTITY),\n sleep_state=config.get(CONF_SLEEP_STATE),\n sleep_colortemp=config.get(CONF_SLEEP_CT),\n sleep_brightness=config.get(CONF_SLEEP_BRIGHT),\n disable_entity=config.get(CONF_DISABLE_ENTITY),\n disable_state=config.get(CONF_DISABLE_STATE),\n initial_transition=config.get(CONF_INITIAL_TRANSITION),\n )\n add_devices([cs])\n\n def update(call=None):\n \"\"\"Update lights.\"\"\"\n cs.update_switch()\n\n return True\n else:\n return False", "def _init_hardware(self):\n return", "def setupHw():\n\n pin.setupHw()\n pin.setupOutPins(traffic_lights)\n pin.setDebug(False)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n switches = []\n for coil in config.get(\"coils\"):\n switches.append(ModbusCoilSwitch(\n coil.get(CONF_NAME),\n coil.get(CONF_SLAVE),\n coil.get(CONF_COIL)))\n add_devices(switches)", "def platform_start(self):\n self.platform.start()", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # Only act if loaded via mysensors by discovery event.\n # Otherwise gateway is not setup.\n if discovery_info is None:\n return\n\n for gateway in mysensors.GATEWAYS.values():\n # Define the S_TYPES and V_TYPES that the platform should handle as\n # states. Map them in a dict of lists.\n pres = gateway.const.Presentation\n set_req = gateway.const.SetReq\n map_sv_types = {\n pres.S_TEMP: [set_req.V_TEMP],\n pres.S_HUM: [set_req.V_HUM],\n pres.S_BARO: [set_req.V_PRESSURE, set_req.V_FORECAST],\n pres.S_WIND: [set_req.V_WIND, set_req.V_GUST],\n pres.S_RAIN: [set_req.V_RAIN, set_req.V_RAINRATE],\n pres.S_UV: [set_req.V_UV],\n pres.S_WEIGHT: [set_req.V_WEIGHT, set_req.V_IMPEDANCE],\n pres.S_POWER: [set_req.V_WATT, set_req.V_KWH],\n pres.S_DISTANCE: [set_req.V_DISTANCE],\n pres.S_LIGHT_LEVEL: [set_req.V_LIGHT_LEVEL],\n pres.S_IR: [set_req.V_IR_RECEIVE],\n pres.S_WATER: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_CUSTOM: [set_req.V_VAR1,\n set_req.V_VAR2,\n set_req.V_VAR3,\n set_req.V_VAR4,\n set_req.V_VAR5],\n pres.S_SCENE_CONTROLLER: [set_req.V_SCENE_ON,\n set_req.V_SCENE_OFF],\n }\n if float(gateway.protocol_version) < 1.5:\n map_sv_types.update({\n pres.S_AIR_QUALITY: [set_req.V_DUST_LEVEL],\n pres.S_DUST: [set_req.V_DUST_LEVEL],\n })\n if float(gateway.protocol_version) >= 1.5:\n map_sv_types.update({\n pres.S_COLOR_SENSOR: [set_req.V_RGB],\n pres.S_MULTIMETER: [set_req.V_VOLTAGE,\n set_req.V_CURRENT,\n set_req.V_IMPEDANCE],\n pres.S_SOUND: [set_req.V_LEVEL],\n pres.S_VIBRATION: [set_req.V_LEVEL],\n pres.S_MOISTURE: [set_req.V_LEVEL],\n pres.S_AIR_QUALITY: [set_req.V_LEVEL],\n pres.S_DUST: [set_req.V_LEVEL],\n })\n map_sv_types[pres.S_LIGHT_LEVEL].append(set_req.V_LEVEL)\n\n if float(gateway.protocol_version) >= 2.0:\n map_sv_types.update({\n pres.S_INFO: [set_req.V_TEXT],\n pres.S_GAS: [set_req.V_FLOW, set_req.V_VOLUME],\n pres.S_GPS: [set_req.V_POSITION],\n pres.S_WATER_QUALITY: [set_req.V_TEMP, set_req.V_PH,\n set_req.V_ORP, set_req.V_EC]\n })\n map_sv_types[pres.S_CUSTOM].append(set_req.V_CUSTOM)\n map_sv_types[pres.S_POWER].extend(\n [set_req.V_VAR, set_req.V_VA, set_req.V_POWER_FACTOR])\n\n devices = {}\n gateway.platform_callbacks.append(mysensors.pf_callback_factory(\n map_sv_types, devices, add_devices, MySensorsSensor))", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # Add devices\n add_devices([SemsSensor(\"SEMS Portal\", config)], True)", "async def async_setup(self):\n self._unsub_stop = self.hass.bus.async_listen(\n EVENT_HOMEASSISTANT_STOP, self._handle_ha_stop\n )\n dev_reg = await device_registry.async_get_registry(self.hass)\n model_type = self.device.settings[\"device\"][\"type\"]\n dev_reg.async_get_or_create(\n config_entry_id=self.entry.entry_id,\n name=self.name,\n connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},\n # This is duplicate but otherwise via_device can't work\n identifiers={(DOMAIN, self.mac)},\n manufacturer=\"Shelly\",\n model=aioshelly.MODEL_NAMES.get(model_type, model_type),\n sw_version=self.device.settings[\"fw\"],\n )", "def setup(hass, config):\n global HASS_PLATFORM\n socketserver.TCPServer.allow_reuse_address = True\n HASS_PLATFORM = hass\n\n HASS_PLATFORM.data[DOMAIN] = {}\n\n port = int(config[DOMAIN][CONF_PORT])\n\n for hub_config in config[DOMAIN][CONF_HUBS]:\n hass.data[DOMAIN][hub_config[CONF_ACCOUNT]] = Hub(hass, hub_config)\n\n for component in [\"binary_sensor\", \"alarm_control_panel\", \"sensor\"]:\n discovery.load_platform(hass, component, DOMAIN, {}, config)\n\n for hub in HASS_PLATFORM.data[DOMAIN].values():\n for sensor in hub._states.values():\n sensor.async_schedule_update_ha_state()\n\n server = socketserver.TCPServer((\"\", port), AlarmTCPHandler)\n\n server_thread = threading.Thread(target=server.serve_forever)\n server_thread.start()\n\n return True", "def _swift_generic_setup(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings')):\n self._pull_configs('generic')\n self._swift_install()\n self._set_onhold('generic')\n self._final_install_touches()", "def setup(hass, config) -> bool:\n\n sh_conf = config.get(DOMAIN)\n\n # Assign configuration variables\n username = sh_conf[CONF_EMAIL]\n password = sh_conf[CONF_PASSWORD]\n\n # Setup connection with SmartHab API\n hub = pysmarthab.SmartHab()\n\n try:\n hub.login(username, password)\n except pysmarthab.RequestFailedException as ex:\n _LOGGER.error(\"Error while trying to reach SmartHab API.\")\n _LOGGER.debug(ex, exc_info=True)\n return False\n\n # Verify that passed in configuration works\n if not hub.is_logged_in():\n _LOGGER.error(\"Could not authenticate with SmartHab API\")\n return False\n\n # Pass hub object to child platforms\n hass.data[DOMAIN] = {DATA_HUB: hub}\n\n load_platform(hass, \"light\", DOMAIN, None, config)\n load_platform(hass, \"cover\", DOMAIN, None, config)\n\n return True", "def initFromEnv(self):\n #self.command = 'scram' # SB I think this line is not needed\n self[\"SCRAM_ARCH\"] = None\n\n if 'SCRAM_ARCH' in os.environ:\n self[\"SCRAM_ARCH\"] = os.environ[\"SCRAM_ARCH\"]\n else:\n stdout, _, _ = execute_command(command='scram arch')\n self[\"SCRAM_ARCH\"] = stdout\n\n try:\n self[\"CMSSW_BASE\"] = os.environ[\"CMSSW_BASE\"]\n self[\"CMSSW_VERSION\"] = os.environ[\"CMSSW_VERSION\"]\n# Commenting these two out. I don't think they are really needed\n# self.cmsswReleaseBase = os.environ[\"CMSSW_RELEASE_BASE\"]\n# self.localRT = os.environ[\"LOCALRT\"]\n except KeyError as ke:\n self[\"CMSSW_BASE\"] = None\n self[\"CMSSW_VERSION\"] = None\n# self.cmsswReleaseBase = None\n# self.localRT = None\n msg = \"Please make sure you have setup the CMS enviroment (cmsenv). Cannot find %s in your env\" % str(ke)\n msg += \"\\nPlease refer to https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial#CMS_environment for how to setup the CMS enviroment.\"\n raise EnvironmentException(msg)", "def __init__(self, hdw=['Soundcard'], devicename='dev1'):\n self.debugFlag = False\n self.task = None # NI Task\n self.required_hardware = hdw # Require specific hardware \n self.hardware = [] # list of hardware actually found on this system\n self.find_hardware(device_info={'devicename': devicename}) # population the self.hardware list", "def setup_platform(hass, config, add_devices, discovery_info=None):\n from pybotvac import Account\n\n try:\n auth = Account(config[CONF_USERNAME], config[CONF_PASSWORD])\n except HTTPError:\n _LOGGER.error(\"Unable to connect to Neato API\")\n return False\n\n dev = []\n for robot in auth.robots:\n for type_name in SWITCH_TYPES:\n dev.append(NeatoConnectedSwitch(robot, type_name))\n add_devices(dev)", "def setup_platform(hass, config, add_entities_callback, discovery_info=None):\n # Add switch from config file\n switches = get_devices_from_config(config, RfxtrxSwitch)\n add_entities_callback(switches)\n\n def switch_update(event):\n \"\"\"Handle sensor updates from the RFXtrx gateway.\"\"\"\n if (\n not isinstance(event.device, rfxtrxmod.LightingDevice)\n or event.device.known_to_be_dimmable\n or event.device.known_to_be_rollershutter\n ):\n return\n\n new_device = get_new_device(event, config, RfxtrxSwitch)\n if new_device:\n new_device.apply_event(event)\n add_entities_callback([new_device])\n\n # Subscribe to main RFXtrx events\n hass.helpers.dispatcher.dispatcher_connect(SIGNAL_EVENT, switch_update)", "def configure(self):\n\n self.platform.configure()", "def setup(self):\n self.machine = Machine(['a', 'b', 'c', '_'])", "def setup_platform(hass, config, add_devices, discovery_info=None):\n if discovery_info is None:\n return\n\n myzigate = hass.data[ZIGATE_DOMAIN]\n\n def sync_attributes():\n devs = []\n for device in myzigate.devices:\n ieee = device.ieee or device.addr # compatibility\n actions = device.available_actions()\n if not any(actions.values()):\n continue\n for endpoint, action_type in actions.items():\n if [zigate.ACTIONS_ONOFF] == action_type:\n key = '{}-{}-{}'.format(ieee,\n 'switch',\n endpoint\n )\n if key in hass.data[DATA_ZIGATE_ATTRS]:\n continue\n _LOGGER.debug(('Creating switch '\n 'for device '\n '{} {}').format(device,\n endpoint))\n entity = ZiGateSwitch(hass, device, endpoint)\n devs.append(entity)\n hass.data[DATA_ZIGATE_ATTRS][key] = entity\n\n add_devices(devs)\n sync_attributes()\n zigate.dispatcher.connect(sync_attributes,\n zigate.ZIGATE_ATTRIBUTE_ADDED, weak=False)", "def __init__(self,\n device_name,\n create_device_func,\n props,\n hub_name_prop,\n primary_port_prop,\n secondary_port_prop,\n ethernet_switch_prop,\n ethernet_port_prop,\n get_switchboard_if_initialized,\n power_and_data_share_cable=False,\n pre_off_func=None):\n super().__init__(device_name=device_name)\n\n self._create_device_func = create_device_func\n self._hub_name_prop = hub_name_prop\n self._primary_port_prop = primary_port_prop\n self._secondary_port_prop = secondary_port_prop\n self._props = props\n self._ethernet_switch = None\n\n # Set the properties\n self._get_switchboard_if_initialized = get_switchboard_if_initialized\n self._power_and_data_share_cable = power_and_data_share_cable\n self._pre_off_func = pre_off_func\n self._ethernet_switch_prop = ethernet_switch_prop\n self._ethernet_port_prop = ethernet_port_prop", "async def async_setup_platform(hass, config, async_add_entities,\n discovery_info=None):\n\n name = config.get(CONF_NAME)\n\n devices = []\n unique_id = None\n\n device = XiaomiPlugGenericSwitch(name, unique_id, hass, config)\n devices.append(device)\n\n async_add_entities(devices, update_before_add=True)", "def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen-blkfront xen-fbfront xen-kbdfront xen-netfront xen-pcifront xenbus_probe_frontend xenfs\"') # nopep8\n sed(\"/etc/mkinitcpio.conf\",\n 'HOOKS=\"base udev autodetect modconf block filesystems keyboard fsck',\n 'HOOKS=\"base udev block filesystems shutdown autodetect\"')\n\n # upgrade pacakges\n run(\"pacman --noconfirm -Syu\")\n\n # put new pacman.conf in place\n run(\"mv /etc/pacman.conf.pacnew /etc/pacman.conf\")\n\n # install essential packages\n run(\"pacman --noconfirm -S base-devel\")\n run(\"pacman --noconfirm -S curl git rsync\")\n\n # create a user, named 'aur', to safely install AUR packages under fakeroot\n # uid and gid values auto increment from 1000\n # to prevent conficts set the 'aur' user's gid and uid to 902\n run(\"groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur\")\n\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")\n\n # install yaourt and upgrade non-pacman rackspace installed packages\n sudo(\"rm -rf /home/aur/.builds && mkdir /home/aur/.builds/\", user=\"aur\")\n with cd(\"/home/aur/.builds/\"):\n sudo(\"bash <(curl aur.sh) -si --noconfirm package-query yaourt\", user=\"aur\")\n sudo(\"yaourt --noconfirm -S xe-guest-utilities\", user=\"aur\")\n\n # allow fabric to sftp with contrib.files.put\n # http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8\n # change before reboot because then the sshd config will be reloaded\n # sed(\"/etc/ssh/sshd_config\", \"Subsystem sftp /usr/lib/openssh/sftp-server\",\n # \"Subsystem sftp internal-sftp\")\n\n # systemd\n sed(\"/boot/grub/menu.lst\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0 init=/usr/lib/systemd/systemd\")\n reboot()\n if not contains(\"/proc/1/comm\", \"systemd\"):\n abort(\"systemd is not installed properly\")\n server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]\n run(\"hostnamectl set-hostname {0}\".format(server.name))\n run(\"mv /etc/locale.gen.pacnew /etc/locale.gen.conf\")\n uncomment(\"/etc/locale.gen\", \"en_US.UTF-8 UTF-8\")\n uncomment(\"/etc/locale.gen\", \"en_US ISO-8859-1\")\n run(\"locale-gen\")\n run(\"localectl set-locale LANG='en_US.utf8'\")\n run(\"timedatectl set-timezone US/Central\")", "def setup_platform(\n hass: HomeAssistant,\n config: Dict,\n add_devices: Callable,\n discovery_info: Optional[Dict] = None,\n) -> None:\n havdalah = config[HAVDALAH_MINUTES]\n candle_light = config[CANDLE_LIGHT_MINUTES]\n cities = config[GEONAMES]\n cities_list = cities.split(\",\")\n\n add_devices(\n [\n ShabbatTimes(\n hass,\n city,\n \"Shabbat Times {}\".format(city.replace(\"-\", \"_\")),\n havdalah,\n candle_light,\n )\n for city in cities_list\n ]\n )", "async def async_setup(self):\n dev_reg = await device_registry.async_get_registry(self.hass)\n model_type = self.device.settings[\"device\"][\"type\"]\n dev_reg.async_get_or_create(\n config_entry_id=self.entry.entry_id,\n name=self.name,\n connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},\n # This is duplicate but otherwise via_device can't work\n identifiers={(DOMAIN, self.mac)},\n manufacturer=\"Shelly\",\n model=aioshelly.MODEL_NAMES.get(model_type, model_type),\n sw_version=self.device.settings[\"fw\"],\n )", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # pylint: disable=import-error\n from fritzconnection import FritzCallforwarding\n from fritzconnection.fritzconnection import FritzConnectionException\n\n host = config.get(CONF_HOST)\n port = config.get(CONF_PORT)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n fritz_box = None\n\n try:\n fritz_box = FritzCallforwarding(address=host,\n port=port,\n user=username,\n password=password)\n except (ValueError, TypeError, FritzConnectionException):\n fritz_box = None\n\n if fritz_box is None:\n _LOGGER.error('Failed to establish connection to FRITZ!Box '\n 'with IP: %s', host)\n raise ConnectionError('Failed to establish connection to FRITZ!Box '\n 'with IP: %s', host)\n else:\n _LOGGER.debug('Successfully connected to FRITZ!Box')\n\n devices = []\n for call_forwarding in fritz_box.get_call_forwardings():\n _LOGGER.debug(call_forwarding)\n devices.append(FritzCallForwardingSwitch(fritz_box, call_forwarding))\n add_devices(devices)", "def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError", "def _default_setup(self):\n self._n_configs = 1\n self._sn_size = 100\n self._nt = 10000\n self._active_brdch = np.zeros(\n (), dtype=[(\"SIS 3302\", bool, (4, 8)), (\"SIS 3305\", bool, (2, 8))]\n )\n self._active_brdch[\"SIS 3302\"][0][0] = True\n self._active_brdch[\"SIS 3305\"][0][0] = True\n self._config_names = []\n self._active_config = (\"config01\",)\n self._sis3305_mode = 0", "def shell_init_instructions(cmd, equivalent):\n\n shell_specific = \"{sh_arg}\" in equivalent\n\n msg = [\n \"`%s` requires Spack's shell support.\" % cmd,\n \"\",\n \"To set up shell support, run the command below for your shell.\",\n \"\",\n color.colorize(\"@*c{For bash/zsh/sh:}\"),\n \" . %s/setup-env.sh\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For csh/tcsh:}\"),\n \" source %s/setup-env.csh\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For fish:}\"),\n \" source %s/setup-env.fish\" % spack.paths.share_path,\n \"\",\n color.colorize(\"@*c{For Windows batch:}\"),\n \" source %s/spack_cmd.bat\" % spack.paths.share_path,\n \"\",\n \"Or, if you do not want to use shell support, run \"\n + (\"one of these\" if shell_specific else \"this\")\n + \" instead:\",\n \"\",\n ]\n\n if shell_specific:\n msg += [\n equivalent.format(sh_arg=\"--sh \") + \" # bash/zsh/sh\",\n equivalent.format(sh_arg=\"--csh \") + \" # csh/tcsh\",\n equivalent.format(sh_arg=\"--fish\") + \" # fish\",\n equivalent.format(sh_arg=\"--bat \") + \" # batch\",\n ]\n else:\n msg += [\" \" + equivalent]\n\n msg += [\n \"\",\n \"If you have already set up Spack's shell support but still receive\",\n \"this message, please make sure to call Spack via the `spack` command\",\n \"without any path components (such as `bin/spack`).\",\n ]\n\n msg += [\"\"]\n tty.error(*msg)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n # import awesomelights\n\n # Assign configuration variables. The configuration check takes care they are\n # present.\n host = config.get(CONF_HOST)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n # Setup connection with devices/cloud\n # hub = awesomelights.Hub(host, username, password)\n _LOGGER.info(\"hub = awesomelights.Hub(host, username, password)\")\n\n # Verify that passed in configuration works\n # if not hub.is_valid_login():\n # _LOGGER.error(\"Could not connect to AwesomeLight hub\")\n # return\n\n # Add devices\n # add_devices(AwesomeLight(light) for light in hub.lights())\n add_devices([AwesomeLight(Light)])", "def setPlatform(self):\n\t\treturn None", "def setup_platform(hass, config, add_devices, discovery_info=None):\n cuby = get_component('cuby')\n # Default needed in case of discovery\n sensors = config.get(CONF_MONITORED_CONDITIONS, SENSOR_TYPES)\n\n for device in cuby.CUBY.devices.values():\n for variable in sensors:\n add_devices(\n [CubySensor(cuby.CUBY, device, variable)], True)", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n entry_config = hass.data[DOMAIN]\n lge_devices = entry_config.get(LGE_DEVICES)\n if not lge_devices:\n return\n\n _LOGGER.debug(\"Starting LGE ThinQ switch setup...\")\n lge_switch = []\n\n # add WM devices\n lge_switch.extend(\n [\n LGESwitch(lge_device, switch_desc)\n for switch_desc in WASH_DEV_SWITCH\n for lge_device in get_multiple_devices_types(lge_devices, WM_DEVICE_TYPES)\n if _switch_exist(lge_device, switch_desc)\n ]\n )\n\n # add refrigerators\n lge_switch.extend(\n [\n LGESwitch(lge_device, switch_desc)\n for switch_desc in REFRIGERATOR_SWITCH\n for lge_device in lge_devices.get(DeviceType.REFRIGERATOR, [])\n if _switch_exist(lge_device, switch_desc)\n ]\n )\n\n # add AC duct zone switch\n lge_switch.extend(\n [\n LGEDuctSwitch(lge_device, duct_zone)\n for lge_device in lge_devices.get(DeviceType.AC, [])\n for duct_zone in lge_device.device.duct_zones\n ]\n )\n\n async_add_entities(lge_switch)", "def cli():\n\n global platform_fanutil\n global platform_chassis\n\n if os.geteuid() != 0:\n click.echo(\"Root privileges are required for this operation\")\n sys.exit(1)\n\n # Load the helper class\n helper = UtilHelper()\n\n if not helper.check_pddf_mode():\n click.echo(\"PDDF mode should be supported and enabled for this platform for this operation\")\n sys.exit(1)\n\n # Load new platform api class\n try:\n import sonic_platform.platform\n platform_chassis = sonic_platform.platform.Platform().get_chassis()\n except Exception as e:\n click.echo(\"Failed to load chassis due to {}\".format(str(e)))\n\n\n # Load platform-specific fanutil class if new platform object class is not found\n if platform_chassis is None:\n try:\n platform_fanutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME)\n except Exception as e:\n click.echo(\"Failed to load {}: {}\".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e)))\n sys.exit(2)", "async def test_switch(\n hass: HomeAssistant, mock_bridge_v2, v2_resources_test_data\n) -> None:\n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"switch\")\n # there shouldn't have been any requests at this point\n assert len(mock_bridge_v2.mock_requests) == 0\n # 2 entities should be created from test data\n assert len(hass.states.async_all()) == 2\n\n # test config switch to enable/disable motion sensor\n test_entity = hass.states.get(\"switch.hue_motion_sensor_motion\")\n assert test_entity is not None\n assert test_entity.name == \"Hue motion sensor Motion\"\n assert test_entity.state == \"on\"\n assert test_entity.attributes[\"device_class\"] == \"switch\"", "def setup_platform(hass, config, add_entities, discovery_info=None):\n pass", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n lights = []\n for channel, device_config in config[CONF_DEVICES].items():\n device = {}\n device[\"name\"] = device_config[CONF_NAME]\n device[\"dimmable\"] = device_config[\"dimmable\"]\n device[\"channel\"] = channel\n device[\"driver\"] = config[CONF_DRIVER]\n device[\"host\"] = config[CONF_HOST]\n device[\"port\"] = config[CONF_PORT]\n lights.append(FutureNowLight(device))\n\n add_entities(lights, True)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n name = config.get(CONF_NAME)\n host = config.get(CONF_HOST)\n port = config.get(CONF_PORT)\n icon = config.get(CONF_ICON)\n if not icon:\n icon = 'mdi:television'\n server = 'http://192.168.0.12:8008/ssdp/device-desc.xml'\n client = pydial.DialClient(server)\n device = client.get_device_description()\n status = device.friendly_name\n add_entities([DialSensor(name,host,port,icon,status)])", "def setup_platform(hass, config, add_devices, discovery_info=None):\n dev_id = config.get(CONF_ID, None)\n devname = config.get(CONF_NAME, \"EnOcean binary sensor\")\n add_devices([EnOceanBinarySensor(dev_id, devname)])", "async def async_setup_platform(\n hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None\n) -> None:\n if discovery_info is None:\n return\n\n broker = hass.data[DOMAIN][\"broker\"]\n\n async_add_entities(\n [\n GeniusSwitch(broker, z)\n for z in broker.client.zone_objs\n if z.data[\"type\"] == GH_ON_OFF_ZONE\n ]\n )", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n\n data = hass.data[LUPUSEC_DOMAIN]\n\n device_types = [CONST.TYPE_OPENING]\n\n devices = []\n for device in data.lupusec.get_devices(generic_type=device_types):\n devices.append(LupusecBinarySensor(data, device))\n\n add_entities(devices)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host = config[CONF_HOST]\n monitored_variables = config[CONF_MONITORED_VARIABLES]\n\n charger = openevsewifi.Charger(host)\n\n entities = [\n OpenEVSESensor(charger, description)\n for description in SENSOR_TYPES\n if description.key in monitored_variables\n ]\n\n add_entities(entities, True)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None\n) -> None:\n # Assign configuration variables.\n # The configuration check takes care they are present.\n host = config[CONF_HOST]\n username = config[CONF_USERNAME]\n password = config.get(CONF_PASSWORD)\n\n # Setup connection with devices/cloud\n hub = awesomelights.Hub(host, username, password)\n\n # Verify that passed in configuration works\n if not hub.is_valid_login():\n _LOGGER.error(\"Could not connect to AwesomeLight hub\")\n return\n\n # Add devices\n add_entities(AwesomeLight(light) for light in hub.lights())", "def initialize_shi_mcc(mcc):\n pi = ProfileInstance.getInstance()\n hw = HardwareStatusInstance.getInstance()\n try:\n\n\n # I don't know what this is talking about\n # TODO: Log better events\n Logging.logEvent(\"Debug\", \"Status Update\",\n {\"message\": \"Power on the Shi Mcc\",\n \"level\": 3})\n\n # starting the helper thread to read the fifo file\n mcc.open_port()\n\n while hw.pc_104.digital_out.getVal('CryoP Pwr Relay 1') is None:\n time.sleep(.5)\n\n # Checks to see if the MCC is currently powered,\n currently_powered = hw.pc_104.digital_out.getVal('MCC2 Power')\n\n\n # If it isn't it turns on.\n # TODO: Turn this into loop\n print(\"about to wait for MCC to turn on\")\n hw.pc_104.digital_out.update({'MCC2 Power': True})\n if not currently_powered:\n time.sleep(2)\n\n # This is here to clear any old data that might be in the port, waiting for .2 seconds to allow for HW to reply\n mcc.flush_port(.2)\n\n # Now send some initialization commands:\n\n # 1: The maximum second stage temperature the cryopump may start to restart after a power failure, should be 65\n restart_temperature = mcc.get_regen_param_6()\n if restart_temperature['Data'] != 65:\n run_set_mcc_cmd(mcc.set_regen_param, [' ', '6', 65])\n\n # 2: Power failure recovery enabled only when T2 is less than the limit set point.\n power_failure_recovery_status = mcc.get_power_failure_recovery()\n if power_failure_recovery_status['Data'] != 2:\n run_set_mcc_cmd(mcc.set_regen_param, [' ', 2])\n\n except RuntimeError as e:\n item = \"Shi MCC\"\n error_details = \"ERROR: {}: There has been an error with the {} ({})\".format(item, item, e)\n log_hw_error(pi=pi, item=item, error_details=error_details)\n error = True\n except TimeoutError as e:\n HardwareStatusInstance.getInstance().shi_mcc_power = False\n item = \"Shi MCC\"\n error_details = \"ERROR: {}: There has been a Timeout error with the {} ({})\".format(item, item, e)\n log_hw_error(pi=pi, item=item, error_details=error_details)\n error = True\n else:\n HardwareStatusInstance.getInstance().shi_mcc_power = True\n error = False\n # This is next time the code should read mcc parameters...\n # It gets initialized to current time so they are read ASAP\n next_param_read_time = time.time()\n mcc_status_read_time = time.time()\n\n return error, next_param_read_time, mcc_status_read_time", "def prepare(self):\n super(Test200SmartSanityClear005, self).prepare()\n\n self.logger.info('Preconditions:')\n self.logger.info('1. Open Micro/WINr; ')\n self.logger.info('2. Set up connection with PLC;')\n self.logger.info('3. Download a project which has OB,DB,SDB;')\n self.MicroWIN.test_prepare('reset_factory_01.smart', False)\n # set cpu mode to run\n self.PLC['1'].set_plc_mode(1)\n self.memory_options = self.PLC['1'].find('memory_options')\n # force some value\n self.memory_options.force('v', 'byte', 0, value=self.force_value)\n time.sleep(5)\n self.PLC['1'].set_plc_mode(0)", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n\n bhyve = hass.data[DOMAIN][entry.entry_id][CONF_CLIENT]\n\n switches = []\n devices = filter_configured_devices(entry, await bhyve.devices)\n programs = await bhyve.timer_programs\n\n device_by_id = {}\n\n for device in devices:\n device_id = device.get(\"id\")\n device_by_id[device_id] = device\n if device.get(\"type\") == DEVICE_SPRINKLER:\n if not device.get(\"status\"):\n _LOGGER.warning(\n \"Unable to configure device %s: the 'status' attribute is missing. Has it been paired with the wifi hub?\",\n device.get(\"name\"),\n )\n continue\n\n # Filter out any programs which are not for this device\n device_programs = [\n program for program in programs if program.get(\"device_id\") == device_id\n ]\n\n switches.append(\n BHyveRainDelaySwitch(hass, bhyve, device, \"weather-pouring\")\n )\n\n all_zones = device.get(\"zones\")\n for zone in all_zones:\n zone_name = zone.get(\"name\")\n # if the zone doesn't have a name, set it to the device's name if there is only one (eg a hose timer)\n if zone_name is None:\n zone_name = (\n device.get(\"name\") if len(all_zones) == 1 else \"Unnamed Zone\"\n )\n switches.append(\n BHyveZoneSwitch(\n hass,\n bhyve,\n device,\n zone,\n zone_name,\n device_programs,\n \"water-pump\",\n )\n )\n\n for program in programs:\n program_device = device_by_id.get(program.get(\"device_id\"))\n program_id = program.get(\"program\")\n if program_device is not None and program_id is not None:\n _LOGGER.info(\"Creating switch: Program %s\", program.get(\"name\"))\n switches.append(\n BHyveProgramSwitch(\n hass, bhyve, program_device, program, \"bulletin-board\"\n )\n )\n\n async_add_entities(switches, True)\n\n async def async_service_handler(service):\n \"\"\"Map services to method of BHyve devices.\"\"\"\n _LOGGER.info(\"%s service called\", service.service)\n method = SERVICE_TO_METHOD.get(service.service)\n if not method:\n _LOGGER.warning(\"Unknown service method %s\", service.service)\n return\n\n params = {\n key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID\n }\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n component = hass.data.get(SWITCH_DOMAIN)\n if entity_ids:\n target_switches = [component.get_entity(entity) for entity in entity_ids]\n else:\n return\n\n method_name = method[\"method\"]\n _LOGGER.debug(\"Service handler: %s %s\", method_name, params)\n\n for entity in target_switches:\n if not hasattr(entity, method_name):\n _LOGGER.error(\"Service not implemented: %s\", method_name)\n return\n await getattr(entity, method_name)(**params)\n\n for service, details in SERVICE_TO_METHOD.items():\n schema = details[\"schema\"]\n hass.services.async_register(\n DOMAIN, service, async_service_handler, schema=schema\n )", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_hardware_sound_systems'] = True", "def __init__(self, coresys: CoreSys):\n self.coresys: CoreSys = coresys\n self._devices: dict[str, Device] = {}\n self._udev = pyudev.Context()\n\n self._montior: HwMonitor = HwMonitor(coresys)\n self._helper: HwHelper = HwHelper(coresys)\n self._policy: HwPolicy = HwPolicy(coresys)\n self._disk: HwDisk = HwDisk(coresys)", "def setup_platform(hass, config, add_devices, discovery_info=None):\r\n pull_mode = config[CONF_PULL_MODE]\r\n invert_logic = config[CONF_INVERT_LOGIC]\r\n\r\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\r\n\r\n binary_sensors = []\r\n pins = config[CONF_PINS]\r\n\r\n for pin_num, pin_name in pins.items():\r\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))\r\n add_devices(binary_sensors, True)", "def t0_switch_config_helper(test_obj: 'T0TestBase'):\n configer = SwitchConfiger(test_obj)\n test_obj.dut.switch_id = configer.start_switch()", "def setup_CL():\n\n #Set up openCL platform\n NAME = 'NVIDIA CUDA'\n platforms = cl.get_platforms()\n\n dev = None\n for p in platforms:\n #Easy switching for local vs remote machine\n if p.name == 'Apple':\n NAME = 'Apple'\n if p.name == NAME:\n dev = p.get_devices()\n\n # Command queue, enable GPU profiling\n ctx = cl.Context(dev)\n queue = cl.CommandQueue(ctx,properties=cl.command_queue_properties.PROFILING_ENABLE)\n\n return [dev,ctx,queue]", "async def setup_platform(hass, platform: str, *,\n devices=None, scenes=None):\n hass.config.components.add(DOMAIN)\n config_entry = ConfigEntry(2, DOMAIN, \"Test\",\n {CONF_INSTALLED_APP_ID: str(uuid4())},\n SOURCE_USER, CONN_CLASS_CLOUD_PUSH)\n broker = DeviceBroker(hass, config_entry, Mock(), Mock(),\n devices or [], scenes or [])\n\n hass.data[DOMAIN] = {\n DATA_BROKERS: {\n config_entry.entry_id: broker\n }\n }\n await hass.config_entries.async_forward_entry_setup(\n config_entry, platform)\n await hass.async_block_till_done()\n return config_entry", "async def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n\n def get_entities():\n \"\"\"Get a list of entities.\"\"\"\n entities = []\n hc_api = hass.data[DOMAIN][config_entry.entry_id]\n for device_dict in hc_api.devices:\n entity_dicts = device_dict.get(CONF_ENTITIES, {}).get(\"switch\", [])\n entity_list = [HomeConnectProgramSwitch(**d) for d in entity_dicts]\n entity_list += [HomeConnectPowerSwitch(device_dict[CONF_DEVICE])]\n entities += entity_list\n return entities\n\n async_add_entities(await hass.async_add_executor_job(get_entities), True)", "def find_hardware(self, device_info=None):\n if os.name is not 'nt': # If not on a Windows system, just set up soundcard\n self.setup_soundcard()\n self.hardware.append('Soundcard')\n self.out_samplefreq = 44100\n else:\n if 'NIDAQ' in self.required_hardware and self.setup_nidaq(device_info):\n self.hardware.append('NIDAQ')\n if 'RP21' in self.required_hardware and self.setup_RP21('c:\\pystartle\\startle.rco'):\n self.hardware.append('RP21')\n if 'PA5' in self.required_hardware and self.setup_PA5():\n self.hardware.append('PA5')\n if 'RZ5D' in self.required_hardware and self.setup_RZ5D():\n self.hardware.append('RZ5D')", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n binary_sensors = []\n for name in hass.data[DOMAIN]:\n if name in BINARY_SENSORS:\n binary_sensors.append(NextcloudBinarySensor(name))\n add_entities(binary_sensors, True)", "def main():\n print(\"Configuring system\")\n ain_config, settling_conf, resolution_config = None, None, None\n print(\"\\tSetting globals\")\n Globals.init()\n print(\"\\tSetting signals\")\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGABRT, signal_handler)\n signal.signal(signal.SIGQUIT, signal_handler)\n signal.signal(signal.SIGTSTP, signal_handler)\n signal.signal(signal.SIGHUP, signal_handler)\n print(\"\\tConnecting to devices\")\n handles, information = ld_connect(T7_DEVICE, CT[0])\n if len(handles) != 0:\n print(\"\\tFound \" + str(len(handles)) + \" device(s)\")\n ain_addr = [AIN_ADDRS[0], AIN_ADDRS[2], AIN_ADDRS[1], AIN_ADDRS[3]]\n settling_addr = [SETTLING_ADDR[0], SETTLING_ADDR[2], SETTLING_ADDR[1], SETTLING_ADDR[3]]\n resolution_addr = [RES_ADDR[0], RES_ADDR[2], RES_ADDR[1], RES_ADDR[3]]\n ain_range = [1.0, 10.0, 1.0, 10.0]\n gnd_ref_range = [NEGATIVE_REF_ADDR[0], NEGATIVE_REF_ADDR[2]]\n \"\"\" 1 is for AIN1, 3 for AIN3 and 5 for AIN5 \"\"\"\n gnd_io_range = [1, 3]\n ain_config = ld_ain_config(handles, ain_addr, aio_dir=1, ain_range=ain_range)\n settling_conf = ld_settling_config(handles, settling_addr, SETTLING_LIST[6])\n resolution_config = ld_resolution_config(handles, resolution_addr, RES_LIST[12])\n gnd_config = ld_gnd_ref_conf(handles, gnd_ref_range, gnd_io_range)\n Globals.add_global_handles(handles)\n Globals.add_global_information(information)\n if ain_config == 0 and settling_conf == 0 and resolution_config == 0 and gnd_config == 0:\n \"\"\"\n sync = Sync.Devices(handles, 10, 1)\n\n sync.sync()\n \"\"\"\n print(\"\\tScanning device(s)\")\n Monitor = Devices(handles, 500, [\"AIN0\", \"AIN2\"], 10000, 1)\n \"\"\"\n Sync = sync.Devices(handles, 500, [\"AIN0\", \"AIN2\", \"AIN4\"], 3000, 1)\n \"\"\"\n Sync.sync()\n \"\"\"\n monitor_dio_ain(handles, information)\n print(\"Closing connection to devices\")\n \"\"\"\n else:\n if ain_config == 0:\n print(\"Analog configuration: Success.\")\n else:\n print(\"Analog configuration: Failure.\")\n if settling_conf == 0:\n print(\"Settling time configuration: Success.\")\n else:\n print(\"Settling time configuration: Failure.\")\n if resolution_config == 0:\n print(\"Resolution configuration: Success.\")\n else:\n print(\"Resolution configuration: Failure.\")\n if gnd_config == 0:\n print(\"Gnd references configuration: Success.\")\n else:\n print(\"Gnd references configuration: Failure.\")\n print(\"Configuration unsuccessful. Closing connection\")\n for handle in Globals.handles:\n ljm.close(handle)\n print(\"Connections closed\")\n\n else:\n print(\"\\tUnable to detect any devices\")\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n signal.signal(signal.SIGQUIT, signal.SIG_DFL)\n signal.signal(signal.SIGTSTP, signal.SIG_DFL)\n signal.signal(signal.SIGHUP, signal.SIG_DFL)\n signal.signal(signal.SIGABRT, signal.SIG_DFL)\n print(\"Exiting\")\n return", "def setup_board(self, board, args=None, **kwargs):\n self.cros_sdk('setup board',\n ['./setup_board', '--board', board],\n args, **kwargs)", "def configure(ctx):\n mxmlc = ctx.options.mxmlc\n\n if not mxmlc:\n mxmlc = ctx.find_program('mxmlc')\n\n ctx.env.MXMLC = os.path.abspath(os.path.expanduser(mxmlc))\n\n ctx.env.JAVA = ctx.find_program('java')\n\n if not ctx.env.SIKULI_HOME:\n ctx.env.SIKULI_HOME = get_sikuli_home(ctx)\n ctx.msg('Setting SIKULI_HOME', ctx.env.SIKULI_HOME)\n\n if not os.path.exists(ctx.env.SIKULI_HOME):\n ctx.fatal('Unable to find Sikuli at %r' % (ctx.env.SIKULI_HOME,))\n\n ctx.env.FLASH_PLAYER = ctx.options.flash_player\n\n if not ctx.env.FLASH_PLAYER:\n ctx.fatal('Standalone Flash player required, supply --flash_player')\n\n ctx.msg('Using Flash Standalone Player', ctx.env.FLASH_PLAYER)", "async def async_setup_platform(hass, config, async_add_entities,\n discovery_info=None):\n from pyhs3 import HASS_SENSORS, DEVICE_ZWAVE_BATTERY\n\n sensor_devices = []\n homeseer = hass.data[DOMAIN]\n\n for device in homeseer.devices:\n if device.device_type_string in HASS_SENSORS:\n if device.device_type_string == DEVICE_ZWAVE_BATTERY:\n dev = HSBattery(device, homeseer)\n else:\n dev = HSSensor(device, homeseer)\n sensor_devices.append(dev)\n _LOGGER.info('Added HomeSeer sensor-type device: {}'.format(dev.name))\n\n async_add_entities(sensor_devices)", "async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n _LOGGER.debug(\"Wattio Switch component running ...\")\n security_enabled = hass.data[DOMAIN][\"security_enabled\"]\n if discovery_info is None:\n _LOGGER.error(\"No Sensor(s) discovered\")\n return\n devices = []\n # Create Updater Object\n for device in hass.data[DOMAIN][\"devices\"]:\n if device[\"type\"] in SWITCHES:\n devices.append(\n WattioSwitch(\n device[\"name\"], device[\"type\"], ICON[device[\"type\"]], device[\"ieee\"]\n )\n )\n _LOGGER.debug(\"Adding device: %s\", device[\"name\"])\n\n if device[\"type\"] in SECURITY and security_enabled is True:\n devices.append(\n WattioSecurity(\n device[\"name\"], device[\"type\"], ICON[\"security\"], device[\"ieee\"]\n )\n )\n _LOGGER.debug(\"Adding device: %s\", device[\"name\"])\n async_add_entities(devices)", "async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n platform = entity_platform.async_get_current_platform()\n\n for service_name, schema, method in (\n (\"start_program\", {}, \"async_start_program\"),\n (\n \"start_zone\",\n {\n vol.Optional(\n CONF_DEFAULT_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN\n ): cv.positive_int\n },\n \"async_start_zone\",\n ),\n (\"stop_program\", {}, \"async_stop_program\"),\n (\"stop_zone\", {}, \"async_stop_zone\"),\n ):\n platform.async_register_entity_service(service_name, schema, method)\n\n data: RainMachineData = hass.data[DOMAIN][entry.entry_id]\n entities: list[RainMachineBaseSwitch] = []\n\n for kind, api_category, switch_class, switch_enabled_class in (\n (\"program\", DATA_PROGRAMS, RainMachineProgram, RainMachineProgramEnabled),\n (\"zone\", DATA_ZONES, RainMachineZone, RainMachineZoneEnabled),\n ):\n coordinator = data.coordinators[api_category]\n for uid, activity in coordinator.data.items():\n name = activity[\"name\"].capitalize()\n\n # Add a switch to start/stop the program or zone:\n entities.append(\n switch_class(\n entry,\n data,\n RainMachineActivitySwitchDescription(\n key=f\"{kind}_{uid}\",\n name=name,\n api_category=api_category,\n uid=uid,\n ),\n )\n )\n\n # Add a switch to enabled/disable the program or zone:\n entities.append(\n switch_enabled_class(\n entry,\n data,\n RainMachineActivitySwitchDescription(\n key=f\"{kind}_{uid}_enabled\",\n name=f\"{name} enabled\",\n api_category=api_category,\n uid=uid,\n ),\n )\n )\n\n # Add switches to control restrictions:\n for description in RESTRICTIONS_SWITCH_DESCRIPTIONS:\n coordinator = data.coordinators[description.api_category]\n if not key_exists(coordinator.data, description.data_key):\n continue\n entities.append(RainMachineRestrictionSwitch(entry, data, description))\n\n async_add_entities(entities)", "async def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None):\n\n if discovery_info is None:\n _LOGGER.warning(\n \"To use this you need to configure the 'docker_monitor' component\")\n return\n\n host_name = discovery_info[CONF_NAME]\n api = hass.data[DOMAIN][host_name]\n\n switches = [ContainerSwitch(host_name, api, name)\n for name in discovery_info[CONF_CONTAINERS].keys()\n if discovery_info[CONF_CONTAINERS][name][CONF_CONTAINER_SWITCH]]\n\n if switches:\n async_add_entities(switches)\n else:\n _LOGGER.info(\"No containers setup\")", "def state(config: dict):\n\n async def state_callback(device):\n if device.basic_info is not None:\n if device.available:\n print_device_details(device)\n\n device.shutdown_event_loop()\n\n logger.info(\"Initialising SonoffSwitch with host %s\" % config[\"host\"])\n SonoffSwitch(\n host=config[\"host\"],\n callback_after_update=state_callback,\n logger=logger,\n device_id=config[\"device_id\"],\n api_key=config[\"api_key\"],\n )", "def run():\r\n plugin_ins = EmPluginCgwshDeviceControl()\r\n plugin_ins.merge_cgwsh_device()", "def wsSetupCMSOSGEnvironment_(self):\n txt = '\\n#Written by cms_cmssw::wsSetupCMSOSGEnvironment_\\n'\n txt += ' echo \">>> setup CMS OSG environment:\"\\n'\n txt += ' echo \"set SCRAM ARCH to ' + self.executable_arch + '\"\\n'\n txt += ' export SCRAM_ARCH='+self.executable_arch+'\\n'\n txt += ' echo \"SCRAM_ARCH = $SCRAM_ARCH\"\\n'\n txt += ' echo \"OSG_APP is $OSG_APP\"\\n'\n txt += ' if [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\\n'\n txt += ' cmsSetupFile=$OSG_APP/cmssoft/cms/cmsset_default.sh\\n'\n txt += ' elif [ -f $CVMFS/cms.cern.ch/cmsset_default.sh ] ; then \\n'\n txt += ' cmsSetupFile=$CVMFS/cms.cern.ch/cmsset_default.sh\\n'\n txt += ' elif [ -f /cvmfs/cms.cern.ch/cmsset_default.sh ] ; then \\n'\n txt += ' cmsSetupFile=/cvmfs/cms.cern.ch/cmsset_default.sh\\n'\n txt += ' else\\n'\n txt += ' echo \"CVMSF = $CVMFS\"\\n'\n txt += ' echo \"/cvmfs/ is\"\\n'\n txt += ' echo \"ls /\"\\n'\n txt += ' ls /\\n'\n txt += ' echo \"ls /cvmfs\"\\n'\n txt += ' ls /cvmfs\\n'\n txt += ' echo \"ls /cvmfs/cms.cern.ch\"\\n'\n txt += ' ls /cvmfs/cms.cern.ch\\n'\n txt += ' ls /cvmfs/cms.cern.ch/cmsset*\\n'\n txt += ' ls /cvmfs/cms.cern.ch/cmsset_default.sh\\n'\n txt += ' echo \"ERROR ==> cmsset_default.sh file not found\"\\n'\n txt += ' job_exit_code=10020\\n'\n txt += ' func_exit\\n'\n txt += ' fi\\n'\n txt += '\\n'\n txt += ' echo \"sourcing $cmsSetupFile ...\"\\n'\n txt += ' source $cmsSetupFile\\n'\n txt += ' result=$?\\n'\n txt += ' if [ $result -ne 0 ]; then\\n'\n txt += ' echo \"ERROR ==> problem sourcing $cmsSetupFile\"\\n'\n txt += ' job_exit_code=10032\\n'\n txt += ' func_exit\\n'\n txt += ' else\\n'\n txt += ' echo \"==> setup cms environment ok\"\\n'\n txt += ' echo \"SCRAM_ARCH = $SCRAM_ARCH\"\\n'\n txt += ' fi\\n'\n\n return txt", "def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [CHURCH, CHURCH + 1, HALL, HALL + 1]:\n GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n import jsonpath\n jsonpath = jsonpath.jsonpath\n global HEAT_PUMPS\n hub.update_overview()\n if int(hub.config.get(CONF_CLIMATE, 1)):\n HEAT_PUMPS = hub.get('$.heatPumps')\n if HEAT_PUMPS:\n for heat_pump in HEAT_PUMPS[0]:\n device_label = jsonpath(heat_pump, '$.deviceLabel')[0]\n add_entities([\n VerisureHeatPump(device_label)\n ])", "def one_time_setup(node, rhbuild, branch: str) -> None:\n node.exec_command(\n cmd=f\"sudo rm -rf ceph && git clone --branch {branch} --single-branch --depth 1 {TEST_REPO}\"\n )\n os_ver = rhbuild.split(\"-\")[-1]\n ceph_ver = rhbuild.split(\"-\")[0]\n\n if os_ver == \"7\":\n node.exec_command(\n cmd=\"sed -i '49 a rbd feature disable testimg1 object-map fast-diff deep-flatten' \"\n \"ceph/qa/workunits/rbd/kernel.sh\"\n )\n\n if \"4.\" in ceph_ver:\n node.exec_command(\n cmd=\"sed -i 's/blocklist/blacklist/g' \"\n \"ceph/qa/workunits/rbd/krbd_exclusive_option.sh\"\n )\n\n try:\n node.exec_command(cmd=\"rpm -qa | grep xmlstarlet\")\n return\n except BaseException: # noqa\n pass\n\n EPEL_RPM = (\n f\"https://dl.fedoraproject.org/pub/epel/epel-release-latest-{os_ver}.noarch.rpm\"\n )\n\n commands = [\n {\"cmd\": f\"yum install -y {EPEL_RPM} --nogpgcheck\", \"sudo\": True},\n {\n \"cmd\": \"yum install -y xmlstarlet rbd-nbd qemu-img cryptsetup --nogpgcheck\",\n \"sudo\": True,\n },\n ]\n for command in commands:\n node.exec_command(**command)\n\n # Blind sleep to ensure the Mon service has restarted.\n # TODO: Identify a way to check the service is running\n sleep(5)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n thread1 = QQ(config[QQ_NUMBER])\n thread1.start()\n object_qq = Qqsensor(hass, QQ_NUMBER, thread1)\n add_devices([object_qq])", "def init():\n\n global registry, fsk_router, ook_router\n\n radio.init()\n OpenThings.init(Devices.CRYPT_PID)\n\n fsk_router = Registry.Router(\"fsk\")\n\n #OOK receive not yet written\n #It will be used to be able to learn codes from Energenie legacy hand remotes\n ##ook_router = Registry.Router(\"ook\")\n\n registry = Registry.DeviceRegistry()\n registry.set_fsk_router(fsk_router)\n ##registry.set_ook_router(ook_router\n\n path = os.path.join(sys.path[0], registry.DEFAULT_FILENAME)\n if os.path.isfile(path):\n registry.load_from(path)\n print(\"loaded registry from file\")\n registry.list()\n fsk_router.list()\n\n # Default discovery mode, unless changed by app\n ##discovery_none()\n ##discovery_auto()\n ##discovery_ask(ask)\n discovery_autojoin()\n ##discovery_askjoin(ask)", "def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name()\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n for i in attrs:\n cmds.setAttr('{node}.{attr}'.format(node=fkikcontrol, attr=i), k=False, cb=False)\n\n # Create FK/IK Switch attributes\n cmds.addAttr(fkikcontrol, sn='FKIKBlend', at='float', min=0, max=1, dv=0, k=True)\n cmds.addAttr(fkikcontrol, sn='AutoVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='FKVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='IKVis', at='bool', dv=1, k=True)\n\n # create control offset transforms\n # par = cmds.listRelatives(fkikcontrol, parent=True)\n # buf = create_offset_transform(fkikcontrol, BUF)\n # cmds.parent(fkikcontrol, buf)\n # if par: cmds.parent(buf, par[0])\n\n # Parent Skeleton to rig group\n ik_skeleton = [x.name() for x in params['ikSkeleton']]\n fk_skeleton = [x.name() for x in params['fkSkeleton']]\n cmds.parent(ik_skeleton[0], rig['rigGroup'])\n cmds.parent(fk_skeleton[0], rig['rigGroup'])\n\n # Constraint Bind Skeleton\n fk_ik_finish(ik_joints, bind_skeleton, params)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n import sharp_aquos_rc\n\n name = config.get(CONF_NAME)\n port = config.get(CONF_PORT)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n if discovery_info:\n _LOGGER.debug('%s', discovery_info)\n vals = discovery_info.split(':')\n if len(vals) > 1:\n port = vals[1]\n\n host = vals[0]\n remote = sharp_aquos_rc.TV(host,\n port,\n username,\n password)\n add_devices([SharpAquosTVDevice(name, remote)])\n return True\n\n host = config.get(CONF_HOST)\n remote = sharp_aquos_rc.TV(host,\n port,\n username,\n password)\n\n add_devices([SharpAquosTVDevice(name, remote)])\n return True", "def initialize_home_hub(argv):\n parse_cmd_line_opts(argv)\n init_logging()\n init_error_reporting()\n \n # Verify we have a valid home id\n if HOME_ID is None:\n print('Home ID is invalid or missing. Please provide an integer following the -i flag')\n exit()\n\n # Begin Home Hub Specific Setup\n logger.info('Starting the Home Hub main program for Home: %s', HOME_ID)\n\n # Get the email and password for this HH's user from the env vars\n powernet_user_email = os.getenv('POWERNET_USER_EMAIL', None)\n powernet_user_password = os.getenv('POWERNET_USER_PASSWORD', None)\n \n if powernet_user_email is None:\n logger.info('Missing the required login email address')\n logger.info('Please set the POWERNET_USER_EMAIL environment variable and try again')\n exit()\n \n if powernet_user_password is None:\n logger.info('Missing the required login password')\n logger.info('Please set the POWERNET_USER_PASSWORD environment variable and try again')\n exit()\n \n # attempt to authenticate against our API\n form_payload = {'email': powernet_user_email, 'password': powernet_user_password}\n response = requests.post('https://pwrnet-158117.appspot.com/api/v1/powernet_user/auth/', data=form_payload)\n auth_token = response.json()['token']\n\n # Initializing variables for queue and threads\n rpi = HardwareInterface(house_id=HOME_ID, gpio_map=None, auth_token=auth_token)\n buffer_size = 8\n q_ai = Queue(buffer_size)\n\n # Initialize threads\n producer_ai_thread = Thread(name='Producer', target=rpi.producer_ai, args=(q_ai,))\n producer_ai_thread.start()\n\n consumer_ai_thread = Thread(name='Consumer', target=rpi.consumer_ai, args=(q_ai,))\n consumer_ai_thread.start()\n\n devices_thread = Thread(name='Device', target=rpi.devices_th)\n devices_thread.start()\n\n load_control_thread = Thread(name=\"LoadControl\", target=rpi.local_controller_th)\n load_control_thread.start()", "def __init__(self, device: SHCDevice, parent_id: str, entry_id: str) -> None:\n super().__init__(device, parent_id, entry_id)\n switcher = {\n \"ENTRANCE_DOOR\": BinarySensorDeviceClass.DOOR,\n \"REGULAR_WINDOW\": BinarySensorDeviceClass.WINDOW,\n \"FRENCH_WINDOW\": BinarySensorDeviceClass.DOOR,\n \"GENERIC\": BinarySensorDeviceClass.WINDOW,\n }\n self._attr_device_class = switcher.get(\n self._device.device_class, BinarySensorDeviceClass.WINDOW\n )", "def _initialize_hardware(self):\n # Import\n try:\n from gpiozero import MCP3008\n except Exception as ex:\n logging.error('\\n *** ERROR importing gpiozero: {}'.format(ex))\n\n # Things failed, must be running locally, not on a widget, so don't\n # bother initializing the MCP3008\n return\n\n # Initialize the MCP3008\n try:\n self._sensor = MCP3008(channel=0)\n except Exception as ex:\n logging.error('\\n *** ERROR initializing MCP3008: {}'.format(ex))\n return\n\n # Start force loop thread\n threading.Thread(target=self._force_loop, daemon=True).start()", "def setup(params):\n os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'\n os.environ['TF_SYNC_ON_FINISH'] = str(int(params.sync_on_finish))\n argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n\n # Sets GPU thread settings\n params = params._replace(gpu_thread_mode=params.gpu_thread_mode.lower())\n os.environ['TF_GPU_THREAD_MODE'] = params.gpu_thread_mode\n\n # Default to two threads. One for the device compute and the other for\n # memory copies.\n per_gpu_thread_count = params.per_gpu_thread_count or 2\n total_gpu_thread_count = per_gpu_thread_count * params.num_gpus\n\n os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)\n\n if not params.num_inter_threads and params.gpu_thread_mode in [\n 'gpu_private', 'gpu_shared'\n ]:\n cpu_count = multiprocessing.cpu_count()\n main_thread_count = max(cpu_count - total_gpu_thread_count, 1)\n params = params._replace(num_inter_threads=main_thread_count)\n\n platforms_util.initialize(params, create_config_proto(params))\n\n return params", "def setup_sshd(self):\n # Update apt repository\n command = 'apt update -y > /dev/null 2>&1'\n if self.debug is True:\n print('Executing apt update -y ')\n try:\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n except:\n print(\"An error occured during 'apt update -u'\")\n\n # Install ssh package\n command = 'apt install ssh -y > /dev/null 2>&1'\n if self.debug is True:\n print('Executing apt install ssh -y')\n try:\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n except:\n print(\"An error occured during 'apt install ssh -y' while installing ssh\")\n\n # Configure sshd using the config\n self.config_sshd()\n\n # Reload sshd config\n try:\n command = \"service ssh restart > /dev/null 2>&1\"\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n print('SSHD_installed and configured successfully, SSHD listening on port {}'.format(self.ssh_port))\n except:\n print('An error occured during ssh \"sudo service ssh reload\" while installing ssh')", "async def async_setup_platform(hass: HomeAssistantType,\n config: ConfigType,\n async_add_entities,\n discovery_info=None) -> None:\n _LOGGER.info(\"Startup Youjia platform configuration.\")\n\n if (discovery_info is not None and config is None) or len(config) == 0:\n config = discovery_info\n\n if config is None:\n return\n\n if discovery_info is None:\n return\n\n if config['names'] is None:\n return\n\n for index, name in config['names'].items():\n entry_id = \"{0}{1:0>2}\".format(config['entity_id'], index)\n _LOGGER.info(\"Adding brightness light {} of {} into HA.\".format(name, entry_id))\n async_add_entities([YoujiaX160(name,\n entry_id,\n config['entity_id'],\n index,\n config['total_solts'],\n config['host_name']\n )], True)\n if config['auto'] is True:\n thread = threading.Thread(target=auto_checking_switch_state,\n args=(get_host(config['host_name']), config['entity_id']))\n thread.daemon = True\n SWITCH_STATUS_CHECKING_THREAD[config['name']] = thread\n thread.start()", "def setup_tf_environment() -> None:\n _setup_cpu_environment()\n _setup_gpu_environment()", "def wsSetupEnvironment(self, nj=0):\n psetName = 'pset.py'\n\n # Prepare JobType-independent part\n txt = '\\n#Written by cms_cmssw::wsSetupEnvironment\\n'\n txt += 'echo \">>> setup environment\"\\n'\n txt += 'echo \"set SCRAM ARCH to ' + self.executable_arch + '\"\\n'\n txt += 'export SCRAM_ARCH=' + self.executable_arch + '\\n'\n txt += 'echo \"SCRAM_ARCH = $SCRAM_ARCH\"\\n'\n txt += 'if [ $middleware == LCG ] || [ $middleware == CAF ] || [ $middleware == LSF ]; then \\n'\n txt += self.wsSetupCMSLCGEnvironment_()\n txt += 'elif [ $middleware == OSG ]; then\\n'\n txt += ' WORKING_DIR=`/bin/mktemp -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\\n'\n txt += ' if [ ! $? == 0 ] ;then\\n'\n txt += ' echo \"ERROR ==> OSG $WORKING_DIR could not be created on WN `hostname`\"\\n'\n txt += ' job_exit_code=10016\\n'\n txt += ' func_exit\\n'\n txt += ' fi\\n'\n txt += ' echo \">>> Created working directory: $WORKING_DIR\"\\n'\n txt += '\\n'\n txt += ' echo \"Change to working directory: $WORKING_DIR\"\\n'\n txt += ' cd $WORKING_DIR\\n'\n txt += ' echo \">>> current directory (WORKING_DIR): $WORKING_DIR\"\\n'\n txt += self.wsSetupCMSOSGEnvironment_()\n #Setup SGE Environment\n txt += 'elif [ $middleware == SGE ]; then\\n'\n txt += self.wsSetupCMSLCGEnvironment_()\n\n txt += 'elif [ $middleware == ARC ]; then\\n'\n txt += self.wsSetupCMSLCGEnvironment_()\n\n #Setup PBS Environment\n txt += 'elif [ $middleware == PBS ] || [ $middleware == PBSV2 ] || [ $middleware == SLURM ]; then\\n'\n txt += self.wsSetupCMSLCGEnvironment_()\n\n txt += 'fi\\n'\n\n # Prepare JobType-specific part\n scram = self.scram.commandName()\n txt += '\\n\\n'\n txt += 'echo \">>> specific cmssw setup environment:\"\\n'\n txt += 'echo \"CMSSW_VERSION = '+self.version+'\"\\n'\n txt += scram+' project CMSSW '+self.version+'\\n'\n txt += 'status=$?\\n'\n txt += 'if [ $status != 0 ] ; then\\n'\n txt += ' echo \"ERROR ==> CMSSW '+self.version+' not found on `hostname`\" \\n'\n txt += ' job_exit_code=10034\\n'\n txt += ' func_exit\\n'\n txt += 'fi \\n'\n txt += 'cd '+self.version+'\\n'\n txt += 'SOFTWARE_DIR=`pwd`; export SOFTWARE_DIR\\n'\n txt += 'echo \">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR\" \\n'\n txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\\n'\n txt += 'if [ $? != 0 ] ; then\\n'\n txt += ' echo \"ERROR ==> Problem with the command: \"\\n'\n txt += ' echo \"eval \\`'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME \\` at `hostname`\"\\n'\n txt += ' job_exit_code=10034\\n'\n txt += ' func_exit\\n'\n txt += 'fi \\n'\n # Handle the arguments:\n txt += \"\\n\"\n txt += \"## number of arguments (first argument always jobnumber, the second is the resubmission number)\\n\"\n txt += \"\\n\"\n txt += \"if [ $nargs -lt \"+str(self.argsList)+\" ]\\n\"\n txt += \"then\\n\"\n txt += \" echo 'ERROR ==> Too few arguments' +$nargs+ \\n\"\n txt += ' job_exit_code=50113\\n'\n txt += \" func_exit\\n\"\n txt += \"fi\\n\"\n txt += \"\\n\"\n\n # Prepare job-specific part\n job = common.job_list[nj]\n if (self.datasetPath):\n txt += '\\n'\n txt += 'DatasetPath='+self.datasetPath+'\\n'\n\n txt += 'PrimaryDataset='+self.primaryDataset +'\\n'\n txt += 'DataTier='+self.dataTier+'\\n'\n txt += 'ApplicationFamily=cmsRun\\n'\n\n else:\n txt += 'DatasetPath=MCDataTier\\n'\n txt += 'PrimaryDataset=null\\n'\n txt += 'DataTier=null\\n'\n txt += 'ApplicationFamily=MCDataTier\\n'\n if self.pset != None:\n pset = os.path.basename(job.configFilename())\n pkl = os.path.basename(job.configFilename()) + '.pkl'\n txt += '\\n'\n txt += 'cp $RUNTIME_AREA/'+pset+' .\\n'\n txt += 'cp $RUNTIME_AREA/'+pkl+' .\\n'\n\n txt += 'PreserveSeeds=' + ','.join(self.preserveSeeds) + '; export PreserveSeeds\\n'\n txt += 'IncrementSeeds=' + ','.join(self.incrementSeeds) + '; export IncrementSeeds\\n'\n txt += 'echo \"PreserveSeeds: <$PreserveSeeds>\"\\n'\n txt += 'echo \"IncrementSeeds:<$IncrementSeeds>\"\\n'\n\n txt += 'mv -f ' + pset + ' ' + psetName + '\\n'\n #if self.var_filter:\n # #print \"self.var_filter = \",self.var_filter\n # txt += \"export var_filter=\"+\"'\"+self.var_filter+\"'\\n\"\n # txt += 'echo $var_filter'\n else:\n txt += '\\n'\n if self.AdditionalArgs: txt += 'export AdditionalArgs=\\\"%s\\\"\\n'%(self.AdditionalArgs)\n if int(self.NumEvents) != 0: txt += 'export MaxEvents=%s\\n'%str(self.NumEvents)\n return txt", "def _initialize(self):\r\n print(\"Set the CP mode to EVSE\")\r\n self.whitebeet.controlPilotSetMode(1)\r\n print(\"Set the CP duty cycle to 100%\")\r\n self.whitebeet.controlPilotSetDutyCycle(100)\r\n print(\"Start the CP service\")\r\n self.whitebeet.controlPilotStart()\r\n print(\"Start SLAC in EVSE mode\")\r\n self.whitebeet.slacStart(1)\r\n time.sleep(2)", "def usb_setup():\n print(\"Warning: using deprecated usb_setup routine!\")\n largest = largest_partition()\n medium = medium_partition()\n smallest = smallest_partition()\n\n print(\"Starting USB installation\")\n print(\"Using {} as archive storage\".format(largest))\n print(\"Using {} as volatile storage\".format(medium))\n print(\"Using {} as important storage\".format(smallest))\n\n lncm_usb = \"/usr/local/sbin/lncm-usb\"\n\n cli_invocation = [\n lncm_usb,\n largest,\n medium,\n smallest,\n get_uuid(largest),\n get_uuid(medium),\n get_uuid(smallest),\n str(largest_part_size()),\n ]\n\n call(cli_invocation)", "def setup(hass, base_config):\n from pyhusmow import API as HUSMOW_API\n\n config = base_config.get(DOMAIN)\n\n if hass.data.get(DOMAIN) is None:\n hass.data[DOMAIN] = { 'devices': [] }\n\n api = HUSMOW_API()\n api.login(config.get(CONF_USERNAME), config.get(CONF_PASSWORD))\n\n robots = api.list_robots()\n\n if not robots:\n return False\n\n for robot in robots:\n hass.data[DOMAIN]['devices'].append(AutomowerDevice(robot, api))\n\n for component in AUTOMOWER_COMPONENTS:\n discovery.load_platform(hass, component, DOMAIN, {}, base_config)\n\n return True", "def setup_platform(hass, config, add_devices, discovery_info=None):\n station_id = config.get(CONF_STATION_ID)\n name = config.get(CONF_NAME)\n\n logger = logging.getLogger(__name__)\n probe = ZamgData(station_id=station_id, logger=logger)\n\n sensors = [ZamgSensor(probe, variable, name)\n for variable in config[CONF_MONITORED_CONDITIONS]]\n\n add_devices(sensors, True)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n token = config.get(CONF_ACCESS_TOKEN)\n\n client = ClementineRemote(host, port, token, reconnect=True)\n\n add_entities([ClementineDevice(client, config[CONF_NAME])])", "def setup_platform(hass, config, add_devices, discovery_info=None):\n token = load_token(hass)\n \n if not token:\n request_app_setup(hass, config, add_devices, discovery_info)\n else:\n continue_setup_platform(hass, config, token, add_devices, discovery_info)", "def perform():\n others = not flag_do_only_gcc_build\n locate_gcc_subdir()\n setup_cross()\n if others:\n setup_kernel_headers()\n setup_binutils()\n setup_prereqs()\n setup_sysroot()\n setup_gcc()\n if others:\n setup_kernel_headers()\n setup_glibc()", "def localhost():\n env.run = local\n env.cd = lcd\n env.deployment = 'local'", "def configure_switch(self, config):\n raise NotImplementedError", "def setUp(self):\n self.os = \"debian\"" ]
[ "0.61745834", "0.6164841", "0.6163982", "0.60993034", "0.6015168", "0.5961966", "0.5939102", "0.59182364", "0.59003484", "0.58913684", "0.58688366", "0.58645135", "0.5812807", "0.5794662", "0.5756036", "0.57241225", "0.57174337", "0.5713566", "0.570899", "0.57045996", "0.56270975", "0.56062096", "0.5587231", "0.55821085", "0.55755335", "0.55533946", "0.5538434", "0.5532444", "0.55197966", "0.54985976", "0.54390925", "0.5426387", "0.54171574", "0.5416576", "0.5396849", "0.53881544", "0.53850216", "0.53613824", "0.53437763", "0.5342868", "0.5339579", "0.5339292", "0.5317407", "0.53162247", "0.53146017", "0.5313041", "0.53015846", "0.52868325", "0.52708405", "0.52666974", "0.5266145", "0.52610344", "0.5255005", "0.52507746", "0.52382153", "0.5237488", "0.52263194", "0.521789", "0.5214051", "0.5213063", "0.52089524", "0.52058107", "0.52042854", "0.51989836", "0.51989836", "0.51976323", "0.51972675", "0.5196895", "0.51946867", "0.5193587", "0.5186255", "0.51758605", "0.51641613", "0.5163859", "0.5161068", "0.5159982", "0.5155964", "0.51541215", "0.5154039", "0.51483446", "0.51476866", "0.5145782", "0.51433104", "0.51398116", "0.5136196", "0.51307017", "0.5126248", "0.5123439", "0.51187056", "0.51156104", "0.51083696", "0.5106", "0.5101316", "0.50925297", "0.5085235", "0.5084993", "0.5084093", "0.5081993", "0.50674045", "0.5065888", "0.5064889" ]
0.0
-1
Initialize a SHC number.
def __init__( self, device: SHCDevice, parent_id: str, entry_id: str, attr_name: str | None = None, ) -> None: super().__init__(device, parent_id, entry_id) self._attr_name = ( f"{device.name}" if attr_name is None else f"{device.name} {attr_name}" ) self._attr_unique_id = ( f"{device.root_device_id}_{device.id}" if attr_name is None else f"{device.root_device_id}_{device.id}_{attr_name.lower()}" ) self._device: SHCThermostat = device
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_hex_str(value):\n \n return SHex(value)", "def __init__(self, *args):\n _snap.TStrIntSH_swiginit(self, _snap.new_TStrIntSH(*args))", "def __init__(self, *args):\n this = _ida_hexrays.new_cnumber_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, signed_wss_magnitude):\n self.signed_wss_magnitude = signed_wss_magnitude", "def __init__(self, *args):\n _snap.TIntIntHHI_swiginit(self, _snap.new_TIntIntHHI(*args))", "def __init__(self, *args):\n _snap.TIntHSI_swiginit(self, _snap.new_TIntHSI(*args))", "def __init__(self, swc=0.1, sor=0.05, kro0=0.9, no=2.0, krw0=0.4, nw=2.0):\n self.kro0 = kro0\n self.krw0 = krw0\n self.no = no\n self.nw = nw\n self.swc = swc\n self.sor = sor", "def __init__(self,\n h=0.01):\n super().__init__(1, 100000000,\n 1, 1,\n 5, 10,\n 0, -1, h)", "def genSCID():\n scid_hex = getRandomBytes(8)\n scid_hex = getSHA256Hex(scid_hex)\n scid_hex = scid_hex[0:8]\n return scid_hex", "def __init__(self, *args):\n _snap.TStrIntPrIntHI_swiginit(self, _snap.new_TStrIntPrIntHI(*args))", "def sha_init(self):\n pass", "def initial_shear_modulus(self):\n pass", "def __init__(self, hsp):\n raise NotImplementedError", "def __init__(self, w0=6):\n self.w0 = w0\n if w0 == 6:\n # value of C_d from TC98\n self.C_d = 0.776", "def initial_shear_modulus(self):\n return self.c1 * self.c2", "def __init__(self, *args):\n _snap.TIntHI_swiginit(self, _snap.new_TIntHI(*args))", "def __init__(self, *args):\n _snap.TUIntH_swiginit(self, _snap.new_TUIntH(*args))", "def __init__(self, *args):\n _snap.TIntIntHI_swiginit(self, _snap.new_TIntIntHI(*args))", "def fromHSL(h, s, l):\n rgb = colorsys.hls_to_rgb(h,l,s)\n return IColor(rgb[0], rgb[1], rgb[2])", "def __init__(self, *args):\n _snap.TIntIntPrHI_swiginit(self, _snap.new_TIntIntPrHI(*args))", "def __init__(self, *args):\n _snap.TIntPrIntHI_swiginit(self, _snap.new_TIntPrIntHI(*args))", "def __init__(self):\n segment_number = 2\n list_digits = 3\n super().__init__(6, segment_number, list_digits, \"000000\")", "def __init__(self, *args):\n _snap.TUIntHI_swiginit(self, _snap.new_TUIntHI(*args))", "def init(self, scl: Pin, sda: Pin, *, freq: int = 400000) -> None:\n ...", "def __init__(self):\n segment_number = 2\n list_digits = 2\n super().__init__(6, segment_number, list_digits, \"000000\")", "def __init__(self, seed):\n self.m = 2**32\n self.a = np.uint32(1664525)\n self.c = np.uint32(1013904223)\n\n self.state = np.uint32(seed)\n \n self._norm = np.float32(np.uint32(-1))\n return", "def __init__(self, *args):\n _snap.TIntStrPrIntHI_swiginit(self, _snap.new_TIntStrPrIntHI(*args))", "def __init__(self):\n self.N_Chls = 0\n self.N_Chl_a = 0\n self.N_Chl_b = 0\n self.type = \"none\"", "def __init__(self, *args):\n _snap.TStrIntPrHI_swiginit(self, _snap.new_TStrIntPrHI(*args))", "def __init__(self, bits): \n self.n = self.generarN(bits)\n length = self.bitLen(self.n)\n seed = random.getrandbits(length)\n self.semilla(seed)", "def __init__(self, guidstr=None):\n v_prim.__init__(self)\n self._vs_length = 16\n self._vs_value = '\\x00' * 16\n self._vs_fmt = '16s'\n self._guid_fields = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n if guidstr is not None:\n self._parseGuidStr(guidstr)", "def __init__(s,i,j):\n # Posição do centro\n s.cx, s.cy = convert(i,j)\n # Cor (pode ser passada para o construtor no futuro)\n s.cor = (200,200,200)\n\n # Vértices do hexágono\n s.pontos = (\n (s.cx, s.cy-L),\n (s.cx+l, s.cy-L/2),\n (s.cx+l, s.cy+L/2),\n (s.cx, s.cy+L),\n (s.cx-l, s.cy+L/2),\n (s.cx-l, s.cy-L/2),\n )", "def __init__(\n self,\n poly_modulus_degree=8192,\n coeff_mod_bit_sizes=[60, 40, 40],\n scale_bits=40,\n scheme=\"CKKS\",\n ):\n self._context = None\n self.scheme_type_mapping = {\n \"CKKS\": ts.SCHEME_TYPE.CKKS,\n \"BFV\": ts.SCHEME_TYPE.BFV,\n }\n self.poly_modulus_degree = poly_modulus_degree\n self.coeff_mod_bit_sizes = coeff_mod_bit_sizes\n self.scale_bits = scale_bits\n _scheme = scheme\n # Setup TenSEAL context\n self.scheme_type = self.scheme_type_mapping[_scheme]\n self.serialized = None", "def init_cg(self):\n self.add_pc(2)\n self.pb[self.pc - 2] = \"ASSIGN\", _m(CodeGenerator.REGISTER_SIZE + CodeGenerator.INIT_MEMORY_VALUE, \"#\"), _m(\n self.top_sp)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(self.top_sp), _m(self.top_sp, \"@\")\n self.init_global_func()\n self.make_output()", "def __init__(self, c, w, t, s):\n self.color = c\n self.weight = w\n self.type = t\n self.size = s", "def __init__(self, *args):\n _snap.TIntIntVHI_swiginit(self, _snap.new_TIntIntVHI(*args))", "def __init__(self, num):\n if num == 1 or num == 2:\n segment_number = 0\n else:\n segment_number = 1\n if num == 1 or num == 3:\n list_digits = 0\n else:\n list_digits = 1\n super().__init__(7, segment_number, list_digits)\n self.num = num\n self._th_blink = None \n self._end_th_blink = False", "def __init__(self, *args):\n _snap.TStrIntHI_swiginit(self, _snap.new_TStrIntHI(*args))", "def __init__(self, *args):\n _snap.TIntIntPrVHI_swiginit(self, _snap.new_TIntIntPrVHI(*args))", "def __init__(self, *args):\n _snap.TIntPrIntVHI_swiginit(self, _snap.new_TIntPrIntVHI(*args))", "def __init__(self, *args):\n _snap.TStrPrIntHI_swiginit(self, _snap.new_TStrPrIntHI(*args))", "def __init__(self, *args):\n _snap.TIntTrIntHI_swiginit(self, _snap.new_TIntTrIntHI(*args))", "def __init__(self, *args):\n _snap.TIntS_swiginit(self, _snap.new_TIntS(*args))", "def __init__(self):\n # Create an 8-byte initialization vector", "def initial_shear_modulus(self):\n return 2.0 * (self.c1 + self.c2)", "def initialize_base(self, es):\n ## meta_parameters.cs_exponent == 1.0\n b = 1.0\n ## meta_parameters.cs_multiplier == 1.0\n self.cs = 1.0 * (es.sp.weights.mueff + 2)**b / (es.N**b + (es.sp.weights.mueff + 3)**b)\n self.ps = np.zeros(es.N)\n self.is_initialized_base = True\n return self", "def __init__(self, number_of_cheeses, number_of_stools):\n self.number_of_cheeses = number_of_cheeses\n self.number_of_stools = number_of_stools", "def __init__(self, *args):\n _snap.TIntIntHH_swiginit(self, _snap.new_TIntIntHH(*args))", "def __init__(self, *args):\n _snap.TIntVIntHI_swiginit(self, _snap.new_TIntVIntHI(*args))", "def __init__(self, *args):\n _snap.TIntPrIntPrVHI_swiginit(self, _snap.new_TIntPrIntPrVHI(*args))", "def __init__(self):\n self.ram = [0] * 256\n self.reg = [0] * 8\n self.pc = 0\n self.SP = 7\n self.reg[self.SP] = 0xf4\n self.E = 0\n self.L = 0\n self.G = 0", "def __init__(self, *args):\n _snap.TStrTrIntHI_swiginit(self, _snap.new_TStrTrIntHI(*args))", "def write_init(self):\n\n writer = CodeWriteSubroutines(self.filename)\n\n bootstrap_code = \" @256\\n\"\n bootstrap_code += \" D=A\\n\"\n bootstrap_code += \" @SP\\n\"\n bootstrap_code += \" M=D\\n\"\n\n bootstrap_code += writer.write_call(['call', 'Sys.init', '0'])\n\n return bootstrap_code", "def cosh(self):\t\t\n\t\tval = np.cosh(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = np.sinh(self.val)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)", "def InitStats(ss):\n\n ss.SumSSE = 0\n ss.SumAvgSSE = 0\n ss.SumCosDiff = 0\n ss.SumErr = 0\n ss.FirstZero = -1\n ss.NZero = 0\n\n ss.TrlErr = 0\n ss.TrlSSE = 0\n ss.TrlAvgSSE = 0\n ss.EpcSSE = 0\n ss.EpcAvgSSE = 0\n ss.EpcPctErr = 0\n ss.EpcCosDiff = 0", "def __init__(self):\n \n self.ram = [0] * 256\n self.reg = [0] * 8\n self.SP = 8 # ? last item in our regsiters\n self.pc = 0 # PROGRAM COUNTER", "def __init__(self):\n segment_number = 2\n list_digits = 4\n super().__init__(4, segment_number, list_digits, default_val=\"0 \")\n self.set_credit(self.get_credit())", "def __init__(self, *args):\n _snap.TStrIntVHI_swiginit(self, _snap.new_TStrIntVHI(*args))", "def __init__(self, *args):\n _snap.TIntPrStrHI_swiginit(self, _snap.new_TIntPrStrHI(*args))", "def __init__(self, *args):\n _snap.TIntH_swiginit(self, _snap.new_TIntH(*args))", "def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()", "def __init__(self):\n super().__init__(\"ccx\", 3, [])", "def cosh(data):\n return _make.cosh(data)", "def h0(data_1=None, data_2=None):\n\n hsh = SHA512.new()\n hsh.update(b\"3\")\n hsh.update(data_1)\n hsh.update(data_2)\n return hsh", "def __init__(self, sh_order, bval, bvec, smooth=0, sampling_points=None,\n sampling_edges=None):\n bvec = bvec[:, bval > 0]\n m, n = sph_harm_ind_list(sh_order)\n x, y, z = bvec\n r, pol, azi = cart2sphere(x, y, z)\n B = real_sph_harm(m, n, azi[:, None], pol[:, None])\n L = -n*(n+1)\n legendre0 = lpn(sh_order, 0)[0]\n F = legendre0[n]\n self.B = B\n self._m = m\n self._n = n\n self._set_fit_matrix(B, L, F, smooth)\n if sampling_points is not None:\n self.set_sampling_points(sampling_points, sampling_edges)", "def __init__(self, level):\n CongruenceSubgroup.__init__(self, level)\n\n # We *don't* call the GammaH init script, as this requires calculating\n # generators for the units modulo N which is time-consuming; this will\n # be done if needed by the _generators_for_H and _list_of_elements_in_H\n # methods.\n #\n #GammaH_class.__init__(self, level, [int(x) for x in IntegerModRing(level).unit_gens()])", "def loadsdss(hdu):\n farr=hdu[0].data[0]\n xarr=np.arange(len(farr))\n warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))\n return create_spectrum(warr, farr)", "def loadsdss(hdu):\n farr=hdu[0].data[0]\n xarr=np.arange(len(farr))\n warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))\n return create_spectrum(warr, farr)", "def __init__(self):\n self.modulus = 769\n self.hs = [[]] * self.modulus", "def __init__(self, *args):\n _snap.TStrIntPrIntH_swiginit(self, _snap.new_TStrIntPrIntH(*args))", "def __init__(self, lzw_min_code_sz, col_table_sz):\n self.code_table = dict()\n clear_code = 1<<lzw_min_code_sz\n eoi_code = clear_code + 1\n self.code_table[clear_code] = [CLEARCODEVAL]\n self.code_table[eoi_code] = [EOICODEVAL]\n for color in range(col_table_sz):\n self.code_table[color] = [color]", "def __init__(self, *args):\n _snap.TStrIntFltPrHI_swiginit(self, _snap.new_TStrIntFltPrHI(*args))", "def __init__(self, *args):\n _snap.TStrIntPrVHI_swiginit(self, _snap.new_TStrIntPrVHI(*args))", "def initialise(self):\n # Can take quite a lot of time due to the homing\n print(\"Initialising spectrograph.\")\n err = self._dll.ShamrockInitialize()\n self.status(\"Initialisation\", err)", "def __init__(self, seed):\n # MT 19937 constants\n self.state_size = 624\n self.mt_const0 = 1812433253\n self.mt_const1 = 2636928640\n self.mt_const2 = 4022730752\n self.mt_const3 = 0x80000000\n self.mt_const4 = 0x7fffffff\n\n # use this to maintain state for getting a single byte every time\n self.num = None\n self.count = 0\n \n self.index = 0\n self.mt = [0] * self.state_size\n self.mt[0] = seed\n for i in range(1, self.state_size):\n self.mt[i] = self.get_lsb(self.mt_const0 * (self.mt[i - 1] ^ self.mt[i - 1] >> 30) + i, 32)", "def __init__(self, lib: ctypes.CDLL, seed: int) -> None:\n self.stanlib = lib\n\n construct = self.stanlib.bs_rng_construct\n construct.restype = ctypes.c_void_p\n construct.argtypes = [ctypes.c_uint, star_star_char]\n self.ptr = construct(seed, None)\n\n if not self.ptr:\n raise RuntimeError(\"Failed to construct RNG.\")\n\n self._destruct = self.stanlib.bs_rng_destruct\n self._destruct.restype = None\n self._destruct.argtypes = [ctypes.c_void_p]", "def __init__(self, *args):\n _snap.TIntFltHI_swiginit(self, _snap.new_TIntFltHI(*args))", "def __init__(self, hash_str, salt):\n self.hash = hash_str\n self.salt = salt", "def __init__(self, *args):\n _snap.TStrUInt64VHI_swiginit(self, _snap.new_TStrUInt64VHI(*args))", "def __init__(self, _SIn, _EofCh=0):\n _snap.TChRet_swiginit(self, _snap.new_TChRet(_SIn, _EofCh))", "def make_hsbk(specifier):\n if isinstance(specifier, str):\n h, s, b, k = ColourParser.hsbk(specifier)\n if b is None:\n b = 1\n\n elif isinstance(specifier, (list, tuple)):\n h, s, b, k = 0, 0, 1, 3500\n if len(specifier) > 0:\n h = specifier[0]\n if len(specifier) > 1:\n s = specifier[1]\n if len(specifier) > 2:\n b = specifier[2]\n if len(specifier) > 3:\n k = specifier[3]\n\n elif isinstance(specifier, dict):\n h = specifier.get(\"hue\", 0)\n s = specifier.get(\"saturation\", 0)\n b = specifier.get(\"brightness\", 1)\n k = specifier.get(\"kelvin\", 3500)\n\n return {\n \"hue\": h or 0,\n \"saturation\": s or 0,\n \"brightness\": b if b is not None else 1,\n \"kelvin\": int(k) if k is not None else 3500,\n }", "def __init__(self, S, f, num_t_sens, num_t_insens):\n # Number of states \n self.S = S \n self.f = f\n self.t_sens = num_t_sens\n self.t_insens = num_t_insens\n \n self.name = 'CRF'", "def __init__(self, *args):\n _snap.TIntStrHI_swiginit(self, _snap.new_TIntStrHI(*args))", "def __init__(self, *args):\n _snap.TStrUInt64HI_swiginit(self, _snap.new_TStrUInt64HI(*args))", "def fromhls(self, h, l, s):\n\n r, g, b = hls_to_rgb(h, l, s)\n self.r = round_int(r * 255.0) & 0xFF\n self.g = round_int(g * 255.0) & 0xFF\n self.b = round_int(b * 255.0) & 0xFF", "def __init__(self):\r\n super(Clashtest, self).__init__(\"clashtest\")\r\n # load clashtest shader\r\n self.shader = Shader(\"clashtest\")\r\n\r\n self.img = (ctypes.c_char * (self.ix * 3))()\r\n self.step = 3 * int(self.ix / 50)\r\n self.img_sz = len(self.img)-3\r\n self.s_flg = False\r\n self.y0 = int(self.iy / 2)", "def initialise_source(self, c, key):\n if key == 'p':\n return 50e5\n elif key == 'h':\n flow = c.to_flow()\n if c.source_id == 'out1':\n T = 200 + 273.15\n return h_mix_pT(flow, T)\n else:\n T = 250 + 273.15\n return h_mix_pT(flow, T)", "def __init__(self, mol: Chem.Mol, shell_count: int, include_Hs : bool = False):\n\n self.shellCount : int = shell_count\n self.includeHs = include_Hs\n self.mol: Chem.Mol = self._getMolWithEHTcharges(mol)\n self.atomDescriptors: dict = dict()\n super().__init__()", "def __init__(self, *args):\n _snap.TUInt64HI_swiginit(self, _snap.new_TUInt64HI(*args))", "def __init__(self, x_0, y_0, initX, initY,h=5):\n self.x_0=x_0\n self.y_0=y_0\n self.x_init=initX\n self.y_init=initY\n self.step=h", "def make_init(self,L,noise=0.005):\n self.L = L\n self.x0 = self.hexagonal_lattice(int(np.ceil(self.L/0.5)),int(np.ceil(self.L/np.sqrt(3))),noise=noise)\n # self.x0 = self.hexagonal_lattice(self.n_c,self.n_c,noise=noise)\n # self.x0 = self.x0[self.x0.max(axis=1) < L*0.95]\n self.x0 += 1e-3\n self.x0 = self.x0[self.x0.max(axis=1) < L*0.97]\n self.x = self.x0\n self.n_c = self.x0.shape[0]\n self.n_C = self.n_c", "def __init__(self, *args):\n _snap.TIntUInt64HI_swiginit(self, _snap.new_TIntUInt64HI(*args))", "def __init__(self, number: str, suit: str) -> None:\n self.number = number\n self.suit = suit", "def __init__ (self, scHandle):\n Greenlet.__init__(self)\n\n self.scHandle = scHandle", "def __init__(self, *args):\n _snap.TIntIntH_swiginit(self, _snap.new_TIntIntH(*args))", "def __init__(self, *args):\n _snap.TIntIntPrH_swiginit(self, _snap.new_TIntIntPrH(*args))", "def __init__(self, st):\n\t\tself.mac = st[:6]\n\t\tself.hw_rev, self.sw_rev, self.buffer_capacity, \\\n\t\tself.max_point_rate = struct.unpack(\"<HHHI\", st[6:16])\n\t\tself.status = Status(st[16:36])", "def Cls(self):\n self.Bus.Write_uInt8(self.Address,0x10,0x00)", "def __init__(self):\n self.start_time = '00000000'\n self.__number = '000000000000000000000000'", "def __init__(self, *args):\n _snap.TIntPrIntH_swiginit(self, _snap.new_TIntPrIntH(*args))", "def initialise_source(self, c, key):\n if key == 'p':\n return 1e5\n elif key == 'h':\n if self.Q.val < 0 and self.Q.is_set:\n return 1e5\n elif self.Q.val > 0 and self.Q.is_set:\n return 5e5\n else:\n return 3e5" ]
[ "0.5653801", "0.55701524", "0.55095994", "0.5451962", "0.5409322", "0.5390917", "0.5353926", "0.53247625", "0.5211058", "0.5170437", "0.51681376", "0.5165097", "0.5136337", "0.51200897", "0.5113493", "0.51005596", "0.5099268", "0.5097797", "0.50901145", "0.50761837", "0.50750244", "0.50721306", "0.5067176", "0.5067048", "0.50537664", "0.5050364", "0.50298816", "0.5023319", "0.5012072", "0.5008709", "0.50078607", "0.5001568", "0.49998277", "0.4993314", "0.4989034", "0.49853128", "0.4985246", "0.49751282", "0.49713144", "0.4962592", "0.4955418", "0.49498218", "0.4935705", "0.49353337", "0.49345824", "0.4934251", "0.4930964", "0.49262393", "0.4925742", "0.49235803", "0.4920121", "0.49187195", "0.49176806", "0.4913395", "0.49033448", "0.4898881", "0.48988682", "0.48988318", "0.489723", "0.489278", "0.48897368", "0.48874027", "0.48872182", "0.4886757", "0.4879331", "0.48758745", "0.4872408", "0.4872408", "0.48702332", "0.48677424", "0.48645988", "0.48625958", "0.4851471", "0.48482844", "0.48476034", "0.48269254", "0.48247674", "0.482302", "0.48196936", "0.48181453", "0.48088115", "0.48081303", "0.48048252", "0.48041505", "0.48033753", "0.48015705", "0.4801275", "0.48007646", "0.47973442", "0.47972992", "0.47967815", "0.47916472", "0.47897884", "0.47839633", "0.4777831", "0.47743315", "0.47597948", "0.47519737", "0.47482708", "0.47471714", "0.47405267" ]
0.0
-1
Update the current value.
def set_native_value(self, value: float) -> None: self._device.offset = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateValue(self):\n self.value = self.var.get()", "def update(self, val):\n self.current_val = val\n self.redraw()", "async def updated(self, value):\n pass", "def update(self):\n self.value = self.sensor.update()", "def value(self, value):\n self._update_value(value)", "def update(self):\n try:\n value = self.get_value()\n except:\n self.send_connection_state(False)\n return\n self.send_connection_state(True)\n self.send_new_value(value)", "def update(self, value):\n # If the value has not already been set, set it.\n if self.value is None:\n self.value = value\n else:\n # Calculate the new value.\n self.value = ((1-self.weight) * self.value + self.weight * value)\n return self.value", "def update(self, x):\n pass", "def update( self, dval ):\n self.val[:] += dval[:]\n return", "def update(self):\n self._data.update()\n\n self._state = self._data.get_value(self._type)", "def update():", "def update():", "def updateItem(self, value):\n self.value = value\n self.age = 0\n self.freq += 1", "def update_val(self, val):\n self.in_val = val", "def set_curr_value(self, val):\n # only goal that is in progress can have it's current value changed\n if self._status != EGoalStatus.IN_PROGRESS:\n raise NotImplementedError('Cannot set value to finished or not started goal')\n # try cast to int - mainly for QuantifiedGoal representation\n val = self.fw.types.try_float_cast(val)\n # update both in the stages object and in raw data\n self._values[EStage.CURRENT] = self._data_process(val)\n self._skeleton.curr_value = val\n # use progressor to update the database\n self._progressor.dump_to_database(self)", "def update(self):\n\n pass", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def update(self, val, feats):\n raise NotImplementedError", "def _update(self):\n pass", "def updateValue(self):\n if len(self.__XValue) > 0:\n # TODO: Should be calling the base __append method\n self.values.append((self.__XValue[-1] + self.__offset) % 360)\n self.lastUpdate = time.time()", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self) -> None:\n ...", "def update_object ( self, event ):\n self.value = self.control.GetValue()", "def update(self):\r\n pass", "def value(self, value):\n\t\toldvalue = self._value\n\t\tself._value = value\n\t\tif oldvalue != value:\n\t\t\tself.changed()", "def update( ):\r\n pass", "def _set_value(self, value):\n if value is undefined:\n self._status = 3 if (self._count == 0) else 0\n return # new tick, but no update of value\n self._last_value = self._value\n self._value = value\n self._count += 1\n self._last_timestamp = self._timestamp\n self._timestamp = time.time()\n self._status = 0\n if self._ob is not None:\n ob = self._ob()\n if hasattr(ob, '_signal_changed'):\n ob._signal_changed(self)", "def change_value(self,val):\n self.val = val", "def update(self):\n raise NotImplementedError", "def Update(self, value):\n self.SetValue(self.GetValue() + tf.cast(value, self.dtype))", "def Update(self, value):\n self.SetValue(self.GetValue() + tf.cast(value, self.dtype))", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def update(self):\n self.ticker.update()\n self._values = self.ticker.values", "def _refresh(self):\n self._value = int(self._service.get_value(self._entry_point, self._path))", "def update(self, value):\n log_gui.debug(\"update value of field %s with : %s\", repr(self._name), value)\n wid = self._store_widget\n wid.setProperty(\"python-object\", value)\n wid.emit(self._sig)", "def update(self):\n self._state = 23", "def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def update(self)->None:\n pass", "def update_value(self, reward):\n\t\tval = self.value\n\t\tval = val + ((reward - val)/self.visited)\n\t\tself.value = val", "def update(self) -> None:\n self.data.update()\n self._state = round(self.data.rate[\"rates\"][self._target], 3)", "def update(self):\n raise NotImplementedError()", "def _update_value(self, value):\n old_hash = get_hash(self._value)\n new_hash = get_hash(value)\n self._value = value\n if old_hash is None or new_hash is None or (old_hash != new_hash):\n self.is_dirty = True", "def update_proxy(self, instance, value):\n self.value = value", "def update(self, v, r):\n pass", "def update(self)->None:\n database.cursor.execute(\"UPDATE votes SET value = %s WHERE id = %s\", (\n self.value,\n self.id\n ))\n database.connection.commit()", "def _update_value(self):\n args = [] # todo: pyscript support for list comprehension\n for s in self._upstream:\n args.append(s())\n value = self._call_func(*args)\n self._set_value(value)", "def update(self):", "def update(self):", "def update(self):", "def update(self):\n self._state = self._state", "def update(self, value):\n if value < self.min:\n self.min = value\n if value > self.max:\n self.max = value\n self.total += value\n self.instances += 1\n self.values.append(value)", "def _refresh(self):\n self._value = self._service.get_value(self._entry_point, self._path)", "def update(self, *args, **kw):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, v_input):\n\n self.v = v_input", "def update(self):\n self.sensor.update()", "def update(self, value=1):\n self.step += float(value)\n self._draw()", "def update(self):\n # default implementation is to do nothing.", "def value(self, value):\n self._value = value\n self.is_dirty = True", "def update(self):\n self.m.update()", "def _update(self, count=True, forced=False):", "def update(self) -> None:\n pass", "def updateValue(self,i,x):\n assert 0 <= i < len(self)\n self.__update_aux(0,0,len(self),i,x)", "def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value", "def increment(cls, value):\r\n value.value += 1", "def update_current(self):\n velocity, horizontal_angle, vertical_angle = self.current_function()\n self.set_current_velocity(velocity, horizontal_angle, vertical_angle)", "def updateValue(self, state):\n return self.getQValue(state, self.policy[state[0], state[1]])", "def update(self):\n _LOGGER.debug(\"Requesting update from sensor...\")\n self._cuby.refresh_devices()\n\n state = \\\n float(self._cuby.devices[self._device_id][self._sensor_name])\n\n if self._sensor_name in FORMAT_NUMBERS:\n self._state = '{0:.1f}'.format(state)\n else:\n self._state = state", "def value(self, value):\n\n\t\tself.__value = value", "def setCurrent(self, value):\n\n\t\tself._current = self._setpoint - value", "def update(self, values):\n pass", "def update(self, value):\n self.bar.setValue(value)\n self.text_label.setText('{}: {}/{}'.format(self.label, value, self.num_regions))", "def changeCurrentValue(self):\n if(self.dropDown.currentIndex() >= 0):\n self.__currentValue = self.dropDown.currentIndex()\n\n if(self.__functionToInvoke != None):\n self.__functionToInvoke()", "def result(self, value):\n self.set_local(0, value)", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def auto_update(self, value):\n self._auto_update = value\n self._last_auto_update_state = self._auto_update", "def force_an_update_when_value_changed(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"force_an_update_when_value_changed\")", "def update_variable(value):\n return value", "def set_current_value(self, value):\n self._oldvalue = self.get_current_value()\n self._currentvalue = value\n if self._oldvalue != value:\n # self.notify_observers(\"new_ip_detected\", {\"ip\": value})\n LOG.debug(\"%s.set_current_value(%s)\", self.__class__.__name__, value)\n return value", "def force_update(self):\n self.update(self.poll())", "def update(self):\n self._xfinity_data.update()", "def update(self, value: Opt[bytes], wal: bool = True):\n self.value = value\n self.wal = wal", "def _update(self, handle, value):\n _LOGGER.debug(\n \"%s: %15s temperature = %-2d.%-2d, humidity = %3d\",\n handle,\n self.name,\n value[0],\n value[2],\n value[1],\n )\n self.data[\"temp\"] = float(\"%d.%d\" % (value[0], value[2]))\n self.data[\"humid\"] = value[1]" ]
[ "0.80516917", "0.8023897", "0.7922197", "0.7634436", "0.7568084", "0.7530883", "0.7463531", "0.73235893", "0.73007935", "0.72947305", "0.72197104", "0.72197104", "0.7211766", "0.7185519", "0.711843", "0.7090071", "0.70864594", "0.70864594", "0.7069182", "0.70634", "0.7053123", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7045377", "0.7042354", "0.70288765", "0.7026397", "0.70224977", "0.7003386", "0.700193", "0.6997197", "0.69921356", "0.6987756", "0.6987756", "0.6957357", "0.6954695", "0.69522494", "0.6937077", "0.6930477", "0.69143915", "0.6911296", "0.690063", "0.6882572", "0.6875018", "0.6858212", "0.68508357", "0.6836399", "0.68270314", "0.6784149", "0.67697316", "0.67697316", "0.67697316", "0.67647356", "0.6752631", "0.67176455", "0.6708325", "0.6706006", "0.6706006", "0.6706006", "0.6706006", "0.66823524", "0.6674121", "0.6669401", "0.6656878", "0.6655264", "0.665217", "0.66357636", "0.6613043", "0.6604762", "0.6596947", "0.6593737", "0.6593597", "0.65750915", "0.65693086", "0.65635604", "0.65619445", "0.6558048", "0.6550996", "0.6533087", "0.6528577", "0.6517", "0.6513396", "0.6505216", "0.6491882", "0.64863664", "0.6478089", "0.6477662", "0.6474951", "0.64618266" ]
0.0
-1
Return the value of the number.
def native_value(self) -> float: return self._device.offset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNumber():", "def number_value(self) -> typing.Optional[jsii.Number]:\n return self._values.get('number_value')", "def getNumber(self):\n return self.number", "def get_number(self):\n return self.__number", "def number(self):\n return self._num", "def getNumber(self):\n return self.__number", "def visit_Numeric(self, node):\n return node.value", "def number(self):\n return self._number", "def number(self):\n return self._number", "def value(self, *args):\n return _ida_hexrays.cnumber_t_value(self, *args)", "def __float__(self):\n return float(self.number)", "def number(self) -> float:\n return self._number", "def value(x):\n\tif isNumber(x): return x\n\telse: return x.value()", "def getValue(self) -> int:\n ...", "def num(self):\n return self.num", "def get_number(self):\n\n return self._number", "def visit_Num(self, node):\n token = node.token\n if token.type in (INTEGER, FLOAT):\n return token.value", "def number(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"number\")", "def value(self) -> int:\n return self._value", "def value(x):\n if isNumber(x):\n return x\n else:\n return x.value()", "def get_val(self):\n return self.value", "def value(self) -> Union[int, float]:\n return self._value", "def number(self):", "def get_value(self):\n return self._val", "def getval(self):\r\n return self.value", "def __int__(self):\n\n return self.value", "def get_value(self):\r\n return 0", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def _get_value(self):\n \n return self._value", "def value(self):\n return self._val", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def _value(self):\n return self.device.value(*self._id[1:])", "def num (self):\n return self.value[0]/self.value[1]", "def value(self) -> float:\n return pulumi.get(self, \"value\")", "def value(self) -> float:\n return pulumi.get(self, \"value\")", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def _value(token):\n result = re.match(r'\\d*', '0' + token)\n return int(result.group(0))", "def get_value(self):\n return self._value", "def value(self):\n return self.compute_value()", "def value(self):\n return self._value", "def value(self):\n return self._value", "def _get_value(self):\n return self.__value", "def getValue(self):\n return self.value", "def getValue(self):\n return self.value", "def getValue(self) -> Optional[int]:\n return self.__value", "def _get_value(self, value):\r\n try:\r\n return int(value)\r\n except ValueError:\r\n return self.registers[value]", "def value(self):\n return self.__n", "def as_number(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"as_number\")", "def value(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"value\")", "def _getvalue_expr_Num(self, expr: ast.Num) -> Any:\n return expr.n", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def get_value(self) -> float:\n return self._data_provider.get_value()", "def Get(self):\n value=0\n return value", "def Value(self) -> _n_0_t_14:", "def to_number(self):\n return self._to_number", "def value(self) -> float:", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def num(value):\n value = Decimal(value)\n return value", "def value(self):\n return self._value_", "def val(self):\r\n if not self.value:\r\n self._calc()\r\n\r\n return self.value", "def val(self):\r\n if not self.value:\r\n self._calc()\r\n\r\n return self.value", "def process(self, value):\n return float(value)", "def value(self) -> float:\n return self._value", "def value(self) -> float:\n return self._value", "def value(self) -> float:\n return self._value" ]
[ "0.7854039", "0.750523", "0.73744917", "0.73732847", "0.73537433", "0.7302086", "0.7273926", "0.72530407", "0.72530407", "0.72048277", "0.71437126", "0.71321", "0.71275204", "0.7098608", "0.7093455", "0.70763844", "0.7041058", "0.6980676", "0.69784814", "0.6971804", "0.6959118", "0.69372", "0.6902923", "0.6850924", "0.68373185", "0.68273276", "0.67993546", "0.67924666", "0.67924666", "0.67924666", "0.67616963", "0.67606837", "0.675739", "0.675739", "0.67488265", "0.674799", "0.6747705", "0.6747705", "0.6732052", "0.6732052", "0.67182875", "0.67182875", "0.67182875", "0.6716691", "0.67129034", "0.67073673", "0.6680246", "0.6680246", "0.6677161", "0.66769856", "0.6661694", "0.66498303", "0.66479284", "0.66394734", "0.6637442", "0.66338664", "0.66253334", "0.6619801", "0.6619801", "0.6619801", "0.6600116", "0.6584742", "0.65825677", "0.65736854", "0.6573419", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.6561661", "0.65519166", "0.6547143", "0.653424", "0.653424", "0.6532321", "0.65214103", "0.65214103", "0.65214103" ]
0.0
-1
Return the step of the number.
def native_step(self) -> float: return self._device.step_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_step(self) -> int:\n return self.step", "def step(self) -> int:\n return self._step", "def step(self) -> float:\n step = DEFAULT_STEP\n value_range = abs(self.max_value - self.min_value)\n if value_range != 0:\n while value_range <= step:\n step /= 10.0\n return step", "def step(self):\n if self._step is None:\n return self._n_fft // 2\n else:\n return self._step", "def get_step(self):\n # decide which direction and how far\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n return self.step", "def get_step(self):\n return self.step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step ( self ) :\n return self.__step", "def _step(self) -> int:\n return self._config[CONF_STEP]", "def freq_step(self) -> int:\n f = self.frequency\n return int(f.step)", "def get_step(self):\n direction = choice([1,-1])\n direction = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def step(self):\n return self._step", "def GetAnimationStep(self):\r\n\r\n return self._animation_step", "def step_constant(step):\n return step", "def step(self):\n return _uhd_swig.range_t_step(self)", "def time_step(self) -> float:\n return self._timestep", "def get_steps_num():\n return 0", "def time_step(self):\n return self._time_step", "def unit_step(x):\n if x < 0:\n return 0\n\n return 1", "def cur_step(self):\n return self._cur_step", "def get_step():\n\n # Decide which direction to go and how far to go in that direction.\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4, 5, 6, 7, 8])\n step = direction * distance\n\n # Reject moves that go nowhere.\n if step == 0:\n get_step()\n else:\n return step", "def numberOfSteps(num):\n steps = 0\n \n while num != 0:\n if num % 2 == 0:\n num /= 2\n steps += 1\n else:\n num -= 1\n steps += 1\n return steps", "def get_time_step(self):\n return self._time_step", "def compute_step(X):\n return MOVING_STEP", "def state_step(self) -> float:\n raise NotImplementedError", "def step(self):\n return _uhd_swig.meta_range_t_step(self)", "def step_linear_double(step):\n return step * 2", "def _ith_point(self, i):\n if self.start is S.NegativeInfinity:\n initial = self.stop\n else:\n initial = self.start\n\n if self.start is S.NegativeInfinity:\n step = -1\n else:\n step = 1\n\n return initial + i*step", "def __next__(self):\n if self.step <= 0:\n raise StopIteration\n self.step -= 1\n return self.step", "def next(self):\n if self.step ==0:\n raise StopIteration\n self.step -= 1\n return self.step", "def step(indiv):\n\tsoma=0\n\ttamanho = len(indiv)\n\tfor i in range(len(indiv)):\n\t\tsoma += math.floor(indiv[i])\n\treturn 6*tamanho +soma", "def next(self):\n if self.step == 0:\n raise StopIteration\n self.step -= 1\n return self.step", "def getStep(self, *args):\n return _CompuCell.Simulator_getStep(self, *args)", "def mm2step(pos):\n return pos * KST101.STEPS_PER_MM", "def _estimate_step_number(n_points: int, batch_size: int) -> int:\n if min(n_points, batch_size) < 1:\n raise ValueError(\n f\"Both n_points={n_points} and batch_size={batch_size} must be at least 1.\"\n ) # pragma: no cover\n\n return n_points // batch_size", "def get_steps(steps):\n cexc.step_exceptions(steps)\n steps_int = int(steps)\n if steps_int > MAX_STEPS:\n steps_int = MAX_STEPS\n return steps_int", "def step(self, step=None):\n pass", "def _stepped_value(self, val):\n if isinstance(self.valstep, Number):\n val = (self.valmin\n + round((val - self.valmin) / self.valstep) * self.valstep)\n elif self.valstep is not None:\n valstep = np.asanyarray(self.valstep)\n if valstep.ndim != 1:\n raise ValueError(\n f\"valstep must have 1 dimension but has {valstep.ndim}\"\n )\n val = valstep[np.argmin(np.abs(valstep - val))]\n return val", "def step(self, d=1):\n raise NotImplementedError()", "def get_next(self) -> int:\n return self._current * self._step + self._offset", "def getCurrentStep():", "def timeStep(self):\n return self.params['h']", "def get_first_step(self):\n return self.get_step_by_index(0)", "def step_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"step_id\")", "def ntimestep(self):\n if self._ntimestep is None:\n self._ntimestep = self.get_data_ntimestep()\n\n return self._ntimestep", "def hailstone_steps(num):\n steps = 0\n\n while num > 1:\n steps = steps + 1\n if num % 2 == 1:\n num = int(num * 3 + 1)\n else:\n num = int(num / 2)\n\n return steps", "def getStep():\n # TODO: can there be non-Step logs?", "def total_steps(self) -> global___Expression:", "def steps_to_run(current_step, steps_per_epoch, steps_per_loop):\n if steps_per_loop <= 0:\n raise ValueError('steps_per_loop should be positive integer.')\n if steps_per_loop == 1:\n return steps_per_loop\n remainder_in_epoch = current_step % steps_per_epoch\n if remainder_in_epoch != 0:\n return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)\n else:\n return steps_per_loop", "def steps(self, length):\n steps = max(1, round(self.length / length, 0))\n return 1.0 / steps, int(steps)", "def calculate_slider_step(\n min_value: float, max_value: float, steps: int = 100\n) -> float:\n\n return 10 ** math.floor(math.log10((max_value - min_value) / steps))", "def calc_stepsize(self):\n # Calculate step size\n step = 1.0/((self.n+self.d)*np.max(np.sum(self.p, axis=0)))\n return step", "def step_id(self):\n return self._step_id", "def getSteps(fields, row):\n if 'steps' in fields:\n return int(row[fields.index('steps')])\n return None", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def num_steps(self) -> int:\n return self._num_steps", "def value(self):\n current_value = self.initial_value * self.schedule(self.step / self.nvalues)\n self.step += 1.\n return current_value", "def step(self):\n epoch = self.last_epoch + self.epoch_per_step\n if epoch > self.max_epochs:\n if self.deprecate_epoch:\n if self._is_epoch_level_update:\n self.main_scheduler.step()\n else:\n self.main_scheduler.step(epoch - self.max_epochs)\n self.last_epoch = epoch\n self._last_lr = self.main_scheduler.get_last_lr()\n else:\n return self._step(epoch)", "def step(self, state):\n a = super().step(state)\n return np.clip(a, -1, 1)", "def step(self, state):\n a = super().step(state)\n return np.clip(a, -1, 1)", "def get_max_step_number(self)->int:\n max_num: int = 0\n for step in self.Sequence:\n if isinstance(step, Step) and 'step' in step.Name.lower() and step.Name.lower().index('step') == 0:\n tail: str = step.Name.lower().replace('step', '')\n if tail.isdigit():\n num: int = int(tail)\n max_num = max(max_num, num)\n return max_num", "def RouteTargetAsNumberStep(self):\n return self._get_attribute('routeTargetAsNumberStep')", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def _getsteps(num_of_steps, limit):\n steps = []\n current = 0.0\n for i in range(0, num_of_steps):\n if i == num_of_steps - 1:\n steps.append(int(round(limit)))\n else:\n steps.append(int(round(current)))\n current += float(limit) / float(num_of_steps - 1)\n return steps", "def _ceil_div(value, block):\n return (value + block - 1) // block", "def _STEPS2TIME(step):\n return step/1000.", "def GetTimeStep(self):\n time_step = None\n\n time_step = self._solver_collection.GetTimeStep()\n \n if not time_step is None:\n\n self.time_step = time_step\n\n return self.time_step", "def step( self, name ):\n duration = self.summarize_step( start=self.step_start, step_name=name, level=self.level )\n now = time.time()\n self.step_start = now\n return duration", "def wavelenstep(self):\n return self._wavelenstep", "def step_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"step_id\")", "def num_steps(self):\n return self.torsoStepCount() + 1", "def get_steps(self):\n return self.steps", "def DegreeToSteps(self,Degree):\n return int(round(Degree*(self.Reduction/self.StepAngle/2)))", "def _e_step(self, x):\n return self.get_posterior(x)", "def get_steps(self, scheme):\n if scheme == 'monte_carlo':\n # calculate the number of steps for a monte-carlo scheme\n if self.parameters['monte_carlo_steps'] == 'auto':\n steps_min = self.parameters['monte_carlo_steps_min']\n steps_max = self.parameters['monte_carlo_steps_max']\n steps = np.clip(10 * 2**self.Nr, steps_min, steps_max) \n # Here, the factor 10 is an arbitrary scaling factor\n else:\n steps = self.parameters['monte_carlo_steps']\n \n elif scheme == 'metropolis':\n # calculate the number of steps for a metropolis scheme\n if self.parameters['metropolis_steps'] == 'auto':\n steps_min = self.parameters['metropolis_steps_min']\n steps_max = self.parameters['metropolis_steps_max']\n steps = np.clip(10 * 2**self.Nr, steps_min, steps_max) \n # Here, the factor 10 is an arbitrary scaling factor\n else:\n steps = self.parameters['metropolis_steps']\n \n else:\n raise ValueError('Unknown stepping scheme `%s`' % scheme)\n \n return int(steps)", "def multistep_number(self) -> int:\n return pulumi.get(self, \"multistep_number\")", "def multistep_number(self) -> int:\n return pulumi.get(self, \"multistep_number\")", "def sim_step(self):\n return traci.simulation.getCurrentTime()/1000 # milliseconds to seconds", "def _epsilon(self, step):\n if step < 0:\n return self._start\n elif step > self._steps:\n return self._stop\n else:\n return self._step_size * step + self._start", "def current_step(self) -> FlowNode:\n return self._current_step", "def get_last_step(self):\n return self.get_step_by_index(-1)", "def _volume_to_step_position(self, volume: pint.Quantity) -> int:\n # noinspection PyArgumentEqualDefault\n steps = volume * self._steps_per_ml\n return round(steps.m_as(\"steps\")) + self._offset_steps", "def get_steps(self):\n return self.steps", "def value(self, step):\n raise NotImplementedError", "def time(self, step: int) -> float:\n return self._start_time + self._parameters.dt*(step - self._start_step)", "def step(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.step", "def _num_factor(number, factor):\n assert factor != 0\n return number // factor", "def step2mm(step):\n return step / KST101.STEPS_PER_MM", "def best_step(self):\r\n return self._best_value_step", "def next_num(self):\n return self.page + 1", "def _step(self) -> None:", "def StepTolerance(self):\n\t\treturn self._get_attribute('stepTolerance')", "def get_page(self, num):\n return num + 10", "def step_index(self, step):\n return self.steps.index(step)" ]
[ "0.8003057", "0.77602637", "0.7494545", "0.7301309", "0.72224903", "0.7211852", "0.7211852", "0.7135624", "0.7135624", "0.70930195", "0.70930195", "0.70930195", "0.70930195", "0.70783025", "0.7060501", "0.7009056", "0.69800514", "0.6976799", "0.6710798", "0.6657582", "0.6655925", "0.66469085", "0.66316766", "0.65583897", "0.65025246", "0.6487104", "0.6468228", "0.6448543", "0.6398909", "0.6380575", "0.6367085", "0.6303652", "0.6257507", "0.6222615", "0.619082", "0.616334", "0.6151376", "0.61363417", "0.6115353", "0.60885805", "0.6081401", "0.60574126", "0.6015616", "0.59992874", "0.5996961", "0.59759694", "0.592874", "0.5917503", "0.58876264", "0.5879007", "0.5873258", "0.5857533", "0.5846045", "0.58333325", "0.5823241", "0.5816332", "0.58007705", "0.5792845", "0.5790246", "0.578215", "0.57402384", "0.5735753", "0.57147086", "0.57138866", "0.57127804", "0.57127804", "0.57019025", "0.56871307", "0.5681294", "0.56755704", "0.5674275", "0.5668347", "0.5666404", "0.5663646", "0.56589115", "0.565794", "0.56563544", "0.5646689", "0.5642275", "0.564213", "0.56235087", "0.561886", "0.561886", "0.56171834", "0.56167257", "0.56078714", "0.5591262", "0.55872107", "0.5585673", "0.55754924", "0.5572664", "0.5563061", "0.5560801", "0.5558699", "0.55585253", "0.55584043", "0.55582994", "0.5557477", "0.55530125", "0.5528807" ]
0.61539525
36
Return the min value of the number.
def native_min_value(self) -> float: return self._device.min_offset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minimum(self):\n return min(self.numbers)", "def minimum_number(self):\n return self._minimum_number", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def min(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min\")", "def min(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min\")", "def minimum(self) -> Union[int, float]:\n return self.range[0]", "def find_min(self):\n return self.min", "def find_min(self):\n return self.min", "def min():\n return KeeperOfMinOrMax(int.__gt__)", "def _get_minimum(self):\n return self._minimum", "def min(self) -> Union[float, int, str]:\n return self._data.min()", "def getMinValue(self):\n return self.MIN_VALUE", "def min_value(self) -> float:\n return DEFAULT_MIN_VALUE", "def min(self):\n return self._min", "def min(self):\n return self._min", "def get_min(self, line_number):\n if self.isIndexError(line_number):\n return 'null'\n return min(self.numbers[line_number])", "def min(self):\n return self.__min", "def MinimumValue(self):\n datatype = self.NumPyDataType\n if issubclass(datatype, numpy.integer):\n return numpy.iinfo(datatype).min\n elif issubclass(datatype, numpy.floating):\n return -numpy.inf\n else:\n raise TypeError(\"Cannot handle DataType: {0}\".format(datatype))", "def get_min(self):\n\t\tif self.left:\n\t\t\treturn self.left.get_min()\n\t\treturn self.value", "def min(self) -> float:\n return stats.min(self)", "def get_min(self):\n return self.serie.min()", "def native_min_value(self) -> float:\n return TEMP_MINIMUM", "def _get_minimum_value(self):\n if hasattr(self, '_minimum_value'):\n return self._minimum_value\n return None", "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def min_value(self) -> Union[int, float]:\n return self.left_boundary['value']", "def min(self):\n return min(self)", "def native_min_value(self) -> float:\n return -9", "def min(x):\n pass", "def peek_min(self):\n if self.root:\n return self.root.min().value\n raise ValueError(\"cannot perform peek_min on an empty tree\")", "def x_min(self):\n return self.get_min_value(self.X_INDEX)", "def Min(data):\n return data.min()", "def MinimumValue(self):\n return self._fitness[self._minIndex]", "def min(self, numeric_only=None):\n assert numeric_only == None\n return self._lift(\"min\")", "def find_min(self):\n return min(self.nodes, key=int)", "def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)", "def _minimum(self) -> float:\n return self._config[CONF_MIN]", "def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)", "def smallest_num():\n num = 1\n i = 1\n pass", "def min_value(dtype):\n return _api_internal._min_value(dtype)", "def getmin(self):\n\n return self.X", "def min_value(tree):\n min_utility = float(\"inf\")\n \n if (is_terminal(tree)):\n return tree\n else:\n #options = []\n for node in tree:\n #options.append(max_value(node))\n min_utility = min(min_utility, max_value(node))\n return min_utility", "def _get_min_positive_value(self, a, b):\n if a < 0 and b >= 0:\n return b\n if a >= 0 and b < 0:\n return a\n return min(a, b)", "def min(self):\n least = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] < least:\n least = self.data[i]\n return least", "def minimum_value(self):\n return self._fitness[self._minidx]", "def get_min(self):\n if not self:\n return None\n return self.left.get_min() if self.left else self.value #Ternarary Operator", "def _minimum(self) -> float:\n if self._type == \"power\":\n return 1.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_min\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]", "def x_min(self) -> ir.FloatingValue:\n return ops.GeoXMin(self).to_expr()", "def minimum(self):\n return self.properties.get('minimum')", "def min(self):\n return self.get_first()", "def min_number(self, rows: List[Row], column: NumberColumn) -> Number:\n cell_values = [row.values[column.name] for row in rows if row.values[column.name] is not None]\n if not cell_values:\n return 0.0 # type: ignore\n if not all([isinstance(value, Number) for value in cell_values]):\n raise ExecutionError(f\"Invalid values for number selection function: {cell_values}\")\n return min(cell_values) # type: ignore", "def min_temperature(self):\n mini, maxi = ct.c_int(), ct.c_int()\n self.lib.GetTemperatureRange(ct.pointer(mini), ct.pointer(maxi))\n return mini.value", "def potential_min(self):\n\n return self._args.min", "def get_min(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def get_min(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n minimum = df.min(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n minimum = round(float(minimum), 4)\r\n return minimum", "def min(self):\n mins = self.client.map(_call_min, self.vecDask, pure=False)\n min_val = np.inf\n for future, result in daskD.as_completed(mins, with_results=True):\n if result < min_val:\n min_val = result\n return min_val", "def cmin(self):\n return self['cmin']", "def cmin(self):\n return self[\"cmin\"]", "def getMinX(self):\n return self.minx", "def min():\n\n # check if collection passed to process() so far is empty\n assert len(inlist) > 0, \"process() has empty collection\"\n\n # assign tmp the first val inside collection \n tmp = inlist[0]\n # for loop to iterate through collection to find minimum \n for item in inlist:\n if item < tmp:\n tmp = item \n return tmp # return the minimum of all int", "def locked_temp_min(self) -> int:\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"C\":\r\n return self.locked_temp_min_c\r\n elif self.temperature_scale == \"F\":\r\n return self.locked_temp_min_f\r\n else:\r\n return self._locked_temp_min\r\n\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"locked_temp_min\"))", "def get_min(self):\n min_value= self.df[self.col_name].min()\n return min_value", "def userMinimum(self) -> float:\n return self._user_minimum", "def take_min(self):\n return self.get_first()", "def get_min_value(self, dim):\n return self._min_values[dim]", "def z_min(self):\n return self.get_min_value(self.Z_INDEX)", "def find_least_number(incoming_list):\n retval = min(incoming_list)\n return retval", "def calc_min(data: list) -> float:\n acc = data[0]\n for n in data:\n if n < acc:\n acc = n\n return float(acc)", "def minimum(self, start, end):\n return self.foldl1(start, end, min)", "def min_digit(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n s = 10\n while(x>0):\n if(s>(x%10)):\n s = x%10\n x = x//10\n return s", "def min(self, key=lambda _: _):\n return min(self, key=key)", "def min(self):\n return numpy.ma.min(self.data)", "def min(self):\r\n\t\treturn min(self.sample)", "def min(self):\n return self._min(self.root)", "def get_minimum_value_from_list(self, list_):\r\n return min(list_)", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def min(self, fn=lambda x: x):\n return _(min(*self._, key=fn))", "def lowest_number(list_int):\n if len(list_int) == 1:\n return list_int[0]\n number = lowest_number(list_int[1:])\n if list_int[0] < number:\n return list_int[0]\n else:\n return number", "def find_least_number(incoming_list):\n \n return_value = min(incoming_list)\n return return_value", "def task_4_min_value_integers(data: List[int]) -> int:\n return min(data, default=None)", "def find_smallest(numbers):\n small = numbers[0]\n for item in numbers:\n if item < small:\n small = item\n return small", "def getMin(self) -> int:\n return self.minStack[-1]", "def smallest_int(numbers):\n if numbers == []:\n return \n smallest_int = numbers[0]\n for number in numbers:\n if number < smallest_int:\n smallest_int = number\n \n return smallest_int", "def min_val(board):\n v = math.inf\n if terminal(board):\n return utility(board)\n for action in actions(board):\n v = min(v,max_val(result(board,action)))\n return v", "def min_sn(self) -> Union[float, int]:\n return self._min_sn", "def min_sn(self) -> Union[float, int]:\n return self._min_sn", "def _findSmallestMoney(self):\n\t\tsmallest = 99999999 # Just some high number\n\t\tfor x in self.playerList:\n\t\t\tif x != None:\n\t\t\t\tif x.money < smallest:\n\t\t\t\t\tsmallest = x.money\n\t\t\t\t\t\n\t\treturn smallest", "def find_least_number(incoming_list):\n\n least_number = min(incoming_list)\n return least_number", "def min(self):\n\n return time_stat(self, stat=\"min\")", "def min_value(gameState):\n if terminal_test(gameState): return 1\n return min( max_value(gameState.forecast_move(move)) for move in gameState.get_legal_moves() )" ]
[ "0.8187885", "0.8120689", "0.7916179", "0.7916179", "0.78326535", "0.7700931", "0.7700931", "0.7643324", "0.76173663", "0.76173663", "0.76063895", "0.75760686", "0.7549816", "0.7485435", "0.7461791", "0.7427969", "0.7427969", "0.74131995", "0.73779243", "0.7353689", "0.7327784", "0.73276645", "0.7316093", "0.73118854", "0.7311186", "0.72589135", "0.72589135", "0.7244063", "0.72338545", "0.72062206", "0.71974456", "0.71385854", "0.70689464", "0.7049584", "0.70456445", "0.70399815", "0.7014965", "0.7003275", "0.6997439", "0.6958712", "0.6945745", "0.69424534", "0.6935701", "0.69336706", "0.693263", "0.69303495", "0.69233036", "0.69224346", "0.69214034", "0.6897487", "0.6891154", "0.68793535", "0.68029654", "0.6794953", "0.67740816", "0.6754709", "0.67488927", "0.67425376", "0.6737571", "0.6734912", "0.673302", "0.6724225", "0.6721833", "0.6709186", "0.6707004", "0.669246", "0.6688868", "0.66783834", "0.6665573", "0.6656574", "0.66486365", "0.6639328", "0.66375846", "0.66262805", "0.6624992", "0.6605988", "0.6591072", "0.6580249", "0.6580249", "0.6580249", "0.6580249", "0.6580249", "0.6580249", "0.6580249", "0.6580249", "0.6580249", "0.65801346", "0.6569819", "0.65516096", "0.65053505", "0.6502649", "0.64999664", "0.64958286", "0.6494994", "0.6493583", "0.6493583", "0.6485728", "0.6484302", "0.64825606", "0.6480081" ]
0.68672127
52
Return the max value of the number.
def native_max_value(self) -> float: return self._device.max_offset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maximum_number(self):\n return self._maximum_number", "def maximum(self):\n return max(self.numbers)", "def max_value(self) -> int:\n return self.__max_value", "def max():\n return KeeperOfMinOrMax(int.__lt__)", "def get_max(self):\n return self._max", "def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")", "def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")", "def max(self):\n return self._reduce_for_stat_function(F.max, only_numeric=False)", "def get_max_value(self):\n max_value = max(self.values)\n return max_value", "def _get_maximum(self):\n return self._maximum", "def max_value(self) -> Optional[int]:\n return self._max_value", "def max(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max\")", "def max(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max\")", "def get_max(self):\n return self.max[-1]", "def max_num(num_list):\n\n return max(num_list)", "def maximum(self) -> Union[int, float]:\n return self.range[1]", "def get_max(self):\n\t\tif self.right:\n\t\t\treturn self.right.get_max()\n\t\treturn self.value", "def max(self):\n return self._max", "def max(self):\n return self._max", "def max(self):\n return self.__max", "def get_max_number():\n max_number = float(input(\"What is the max number you want?\"))\n return max_number", "def max_value(self) -> float:\n return DEFAULT_MAX_VALUE", "def max(self) -> float:\n return stats.max(self)", "def max(self, num_list):\n try:\n max = int(num_list[0])\n\n for number in num_list:\n try:\n if number > max:\n max = number\n except Exception as e:\n print(\"Error\", e)\n\n except Exception as e:\n print(\"Error:\", e)\n\n return max", "def max(self):\n max = 0\n a = self.array_form\n for i in xrange(len(a)):\n if a[i] != i and a[i] > max:\n max = a[i]\n return max", "def max(self):\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits-1)) - 1)\n iinfo._max_vals[self.key] = val\n return val", "def max(self):\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits-1)) - 1)\n iinfo._max_vals[self.key] = val\n return val", "def native_max_value(self) -> float:\n return 9", "def _max_in_bounds(self, max):\n if max >= self.valmax:\n if not self.closedmax:\n return self.val[1]\n max = self.valmax\n\n if max <= self.val[0]:\n max = self.val[0]\n return self._stepped_value(max)", "def _get_maximum_value(self):\n if hasattr(self, '_maximum_value'):\n return self._maximum_value\n return None", "def max(self) -> Union[float, int, str]:\n return self._data.max()", "def getmaxnumvar(self): # 3\n res,resargs = self.__obj.getmaxnumvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumvar_return_value = resargs\n return _maxnumvar_return_value", "def max_value(self) -> Union[int, float]:\n return self.right_boundary['value']", "def max(self):\n assert self.__stack\n return self.__max_values[-1]", "def get_max(self):\n return self.serie.max()", "def native_max_value(self) -> float:\n return TEMP_MAXIMUM", "def getmaxnumvar(self):\n maxnumvar_ = ctypes.c_int32()\n res = __library__.MSK_XX_getmaxnumvar(self.__nativep,ctypes.byref(maxnumvar_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n maxnumvar_ = maxnumvar_.value\n _maxnumvar_return_value = maxnumvar_\n return (_maxnumvar_return_value)", "def maxNumber(x):\n maxVal = x[0]\n for num in x:\n if maxVal <num:\n maxVal=num\n return maxVal", "def getMaxNumber():\n maxNumber = int(input(\"what is the maximum number that you want:\"))\n return maxNumber", "def Max(data):\n return data.max()", "def getMaxValue(self):\n # TODO: make this more consistent accross versions\n # This was a \"fix\" when we started supported PS5000a\n return self.MAX_VALUE", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def largestPrimeFactor(number):\n factorlist = primeFactors(number)\n maximumfactor = max(factorlist)\n return maximumfactor", "def GetMax(val, maximum):\n\tval = float(val)\n\tmaximum = float(maximum)\n\treturn max([val, maximum])", "def MaximumValue(self):\n datatype = self.NumPyDataType\n if issubclass(datatype, numpy.integer):\n return numpy.iinfo(datatype).max\n elif issubclass(datatype, numpy.floating):\n return numpy.inf\n else:\n raise TypeError(\"Cannot handle DataType: {0}\".format(datatype))", "def get_max(self, line_number):\n if self.isIndexError(line_number):\n return 'null'\n return max(self.numbers[line_number])", "def max(self):\n return max(self)", "def return_the_maximum(self):\n\n return self.__max_stack[-1]", "def max(self):\n maxs = self.client.map(_call_max, self.vecDask, pure=False)\n max_val = - np.inf\n for future, result in daskD.as_completed(maxs, with_results=True):\n if result > max_val:\n max_val = result\n return max_val", "def get_maximum ( self, object ):\n return self.maximum", "def highest_value():\n maximum_number = 0\n for i in xrange(length):\n challenger = frames[i]\n if abs(challenger) > maximum_number:\n maximum_number = abs(challenger)\n return maximum_number", "def getmaxnumanz(self): # 3\n res,resargs = self.__obj.getmaxnumanz64()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumanz_return_value = resargs\n return _maxnumanz_return_value", "def potential_max(self):\n\n return self._args.max", "def get_max(self):\n # 0(1)\n return self.max_stack.peek()\n\n # Don't need find_max we returned max_stack.peek()", "def peek_max(self):\n if self.root:\n return self.root.max().value\n raise ValueError(\"cannot perform peek_max on an empty tree\")", "def maximum(self):\n return self.properties.get('maximum')", "def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval", "def max(self):\n\n maximum = -float('inf')\n\n for i in range(self.sum.GetNbinsX()):\n bin_max = self.sum.GetBinContent(i+1) + self.sum.GetBinError(i+1)\n if bin_max > maximum:\n maximum = bin_max\n\n return maximum", "def get_max(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return max(data)", "def maxim(self) -> (int, float('inf')):\n\t\treturn 2", "def max_num_in_list(a_list):\n max_number = max(a_list)\n return max_number", "def max(self):\n return self.get_first()", "def max(self):\n return numpy.ma.max(self.data)", "def max(x):\n pass", "def get_max(self):\n if not self:\n return None\n return self.right.get_max() if self.right else self.value #Ternarary Operator", "def max_value(dtype):\n return _api_internal._max_value(dtype)", "def get_max_value(self, dim):\n return self._max_values[dim]", "def _maximum(self) -> float:\n return self._config[CONF_MAX]", "def get_max(self):\n current = self\n while current.hasRight(): # This is the belief that the max has to be to the right. If you can't go right either in the begining or any more\n # if current has a right this line will be set and will keep going from line 129 to 130 until there are no more rights.\n current = current.right\n # this line returns as soon there is no more rights. breaking out of the loop.\n return current.value", "def get_max(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def max_value(gameState):\n if terminal_test(gameState): return -1", "def get_max(self):\n\n max_value = self.head.value\n current_node = self.head\n # while current_node.next is not None: # when current_node = current.tail, this will not iterate\n while current_node.next is not None: # when current_node = current.tail, this will not iterate\n current_node = current_node.next\n # checks if the value is larger than our max value so far\n if max_value < current_node.value:\n max_value = current_node.value\n return max_value", "def maximum(x):\n return np.maximum(x, 0)", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def question_24(list_num: int) -> int:\n return max(list_num, key=list_num.count)", "def getmaxnumanz(self):\n maxnumanz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getmaxnumanz64(self.__nativep,ctypes.byref(maxnumanz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n maxnumanz_ = maxnumanz_.value\n _maxnumanz_return_value = maxnumanz_\n return (_maxnumanz_return_value)", "def max_val(board):\n v = -math.inf\n if terminal(board):\n return utility(board)\n for action in actions(board):\n v = max(v,min_val(result(board,action)))\n return v", "def max(self):\n\n return time_stat(self, stat=\"max\")", "def find_max(self):\n return max(self.nodes, key=int)", "def cmax(self):\n return self[\"cmax\"]", "def list_max(numbers):\n maxnum = 0\n \n for num in numbers[0:]:\n if num > maxnum:\n maxnum = num\n return maxnum", "def _max_factor(number, factors):\n return max(n for n in factors if n <= number)", "def get_max(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n maximum = df.max(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n maximum = round(float(maximum), 4)\r\n return maximum", "def find_max(self):\n\n if self.right:\n return self.right.find_max()\n\n return self.data", "def find_max(self):\n if self.right:\n return self.right.find_max()\n return self.data", "def find_greatest_number(incoming_list: list):\n return max(incoming_list)", "def cmax(self):\n return self['cmax']", "def max(self):\n return self._max(self.root)", "def getMaxValue(self):\n if self.right is None:\n return self.data\n return self.right.getMaxValue()", "def get_max(self):\n max_value= self.df[self.col_name].max()\n return max_value", "def max(self):\n no = self.root\n if no:\n no = self.__search_node_max_esq(no)\n if no:\n return no.valor\n return None", "def userMaximum(self) -> float:\n return self._user_maximum", "def getmaxnumbarvar(self): # 3\n res,resargs = self.__obj.getmaxnumbarvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumbarvar_return_value = resargs\n return _maxnumbarvar_return_value", "def maximum_value(self):\n if not self.root:\n return \"the tree is empty!\"\n\n max_val = self.root.value\n\n def _max_value(node):\n nonlocal max_val\n if not node:\n return\n if node.value > max_val:\n max_val = node.value\n\n _max_value(node.left)\n _max_value(node.right)\n _max_value(self.root)\n return max_val", "def max_val(t): \n maxVal = False\n \n def helper(obj):\n nonlocal maxVal\n for el in obj:\n if isinstance(el, int):\n if maxVal == False or maxVal < el:\n maxVal = el\n else:\n helper(el)\n \n helper(t)\n return maxVal", "def get_rmax(self):\n return self.rmax", "def find_max_numb(x,y):\n if x > y:\n print(x, \" - is max number.\")\n return x \n else:\n print(y, \" - is max number.\")\n return y", "def z_max(self):\n return self.get_max_value(self.Z_INDEX)", "def max_value(tree):\n max_utility = float(\"-inf\")\n \n if (is_terminal(tree)):\n return tree\n else:\n #options = []\n for node in tree:\n #options.append(max_value(node))\n max_utility = max(max_utility, min_value(node))\n return max_utility", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth" ]
[ "0.82954454", "0.81974316", "0.7996817", "0.797404", "0.78951424", "0.7878407", "0.7878407", "0.7877468", "0.78691196", "0.7838583", "0.7678307", "0.7661671", "0.7661671", "0.765726", "0.764877", "0.76332706", "0.7630812", "0.76256377", "0.76256377", "0.76157844", "0.7575633", "0.75590837", "0.7544836", "0.7544298", "0.7483342", "0.7460809", "0.7460809", "0.74484676", "0.7426704", "0.7420807", "0.73896796", "0.7372525", "0.7351213", "0.7346912", "0.7341476", "0.7336958", "0.73262036", "0.73181343", "0.729235", "0.72870797", "0.72766197", "0.7264547", "0.72604674", "0.7207308", "0.72017705", "0.71697235", "0.7169563", "0.7159391", "0.71371514", "0.71357656", "0.71096843", "0.7089758", "0.7088411", "0.70571405", "0.7051744", "0.70488805", "0.7046176", "0.7036617", "0.70296013", "0.7029336", "0.70173824", "0.7006262", "0.70060754", "0.7004123", "0.7001126", "0.699711", "0.6994534", "0.6980338", "0.6975786", "0.6968204", "0.69646883", "0.69537866", "0.69417757", "0.6925851", "0.6925734", "0.6924952", "0.6909317", "0.6895586", "0.6893783", "0.6888988", "0.6883349", "0.68688506", "0.6866635", "0.6860763", "0.6856318", "0.68505377", "0.6848186", "0.68373543", "0.68332034", "0.6826448", "0.6822887", "0.681751", "0.6815306", "0.68036866", "0.68033344", "0.6793921", "0.67908186", "0.67868954", "0.67855155", "0.67798615", "0.67798615" ]
0.0
-1
Return poisson standard deviations for a series of counts => sqrt(count).
def poisson_sd(data: DataSeries) -> DataSeries: return data.pow(0.5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simpson(counts):\n return 1 - dominance(counts)", "def standard_deviation( values, sample=False ):\n return ma.sqrt( variance( values, sample ) )", "def sampleStandardDeviation(numlist):\n\tv = sampleVariance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def calc_standard_deviation(data: list) -> float:\n mean = calc_mean(data)\n acc = 0.0\n for n in data:\n acc += (n - mean) ** 2\n acc /= len(data) - 1\n return math.sqrt(acc)", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def psd(self, frequency_grid: FrequencySamples, norm='standard'):\n return frequency_grid.lomb_scargle(self.times, self.data, norm=norm)", "def pooled_standard_deviation(input_variances):\r\n # compute and return pooled standard deviation\r\n return sqrt(mean(square([float(i) for i in input_variances])))", "def StandardDeviation(numlist):\n\tv = Variance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def std(mean, vals):\n return sqrt(sum([(i-mean)**2 for i in vals])/len(vals))", "def std_dev(L, is_sample=0):\n\treturn math.sqrt(variance(L, is_sample))", "def sd(vals):", "def stddev(r):\n avg = average(r)\n sdsq = sum([(i - avg) ** 2 for i in r])\n return (sdsq / (len(r) - 1 or 1)) ** 0.5", "def _std(listvalue,ddof=1):\n\tmean=_mean(listvalue)\n\ttemp=[math.pow(i-mean,2) for i in listvalue]\n\tres=math.sqrt(sum(temp)/(len(listvalue)-ddof))\n\treturn res", "def lsamplestdev (inlist):\r\n return math.sqrt(samplevar(inlist))", "def stdDev(data):\r\n sum = 0\r\n ave = average(data)\r\n for i in data:\r\n sum += (i-ave)**2\r\n return math.sqrt(sum/len(data))", "def sd(x):\n x_mean = mean(x)\n return (\n sum((x_i - x_mean) ** 2 for x_i in x) / (len(x) - 1)\n ) ** 0.5", "def stdev(items):\n return Series.std(Series(items))", "def standard_deviation(list):\n num_items = len(list)\n mean = sum(list) / num_items\n differences = [x - mean for x in list]\n sq_differences = [d ** 2 for d in differences]\n ssd = sum(sq_differences)\n\n\n variance = ssd / num_items\n\n sd = sqrt(variance)\n\n return sd", "def std(values, ave):\n return math.sqrt(float(sum((value-ave)**2 for value in values))/len(values))", "def poisson_significance(s,b):\n return np.sqrt(2*((s+b)*np.log(1+s/b)-s))", "def std(x):\n return sqrt(TinyStatistician.var(x))", "def std(l: List[float]) -> float:\n n = len(l)\n if n == 0:\n return 0\n avg = average(l)\n return sqrt(sum([(avg - i) * (avg - i) for i in l]))", "def GetStandardDeviation(vals_l, mean):\n\n\n sum_deviations_squared = 0\n\n for x in vals_l:\n sum_deviations_squared += (x - mean)**2\n\n return math.sqrt(float(sum_deviations_squared)/float(len(vals_l)))", "def stddev(data, ddof=0):\n n = len(data)\n if n < 2:\n return 0\n ss = _ss(data)\n pvar = ss/(n-ddof)\n return pvar**0.5", "def mcintosh_d(counts):\n u = sqrt((counts*counts).sum())\n n = counts.sum()\n return (n-u)/(n-sqrt(n))", "def evenness(df):\n obs = shannon(df)\n count = df.shape[0]\n max_freq = 1.0 / count\n max_vector = np.repeat(max_freq,count)\n pre = -(sum(max_vector * np.log(max_vector)))\n return obs / pre", "def std (nums,n_mean=None):\r\n if not n_mean:\r\n n_mean = mean(nums)\r\n n = len(nums)\r\n if n == 1:\r\n return 0.0\r\n variance = 0.0\r\n for i in xrange(n):\r\n tmp = (nums[i]-n_mean)\r\n variance += (tmp*tmp)\r\n \r\n variance /= n-1\r\n return sqrt(variance)", "def std_dev(list_num):\n\n # Calculate the mean of the numbers\n mean = sum(list_num)/len(list_num)\n\n # Initialise a variable to hold the sum of the squared distance to the mean\n sum_sqrd_dist = 0\n \n # Iterate over the numbers\n for num in list_num:\n # Subtract the mean from the number and square the result\n sqrd_dist = (num - mean)**2\n # Add the number to the sum of the squared distances \n sum_sqrd_dist = sum_sqrd_dist + sqrd_dist\n\n # return the square root of the sum of squared distances divided by the length of the list\n return (sum_sqrd_dist/len(list_num))**(1/2)", "def calc_std(sig):\n return np.std(sig)", "def rmse2 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = 0.0\n for x, y in z :\n v += sqre_diff(x, y)\n return math.sqrt(v / s)", "def empirical_std_deviation(x):\n import numpy as np\n x = np.array(x)\n M = np.size(x)\n xm = np.mean(x)\n\n #return np.sqrt(1./(M-1.)*np.sum((x-xm)**2))\n return np.sqrt( M/(M-1.) * ( (1./M*np.sum(x**2)) - xm**2 ) )", "def dominance(counts):\n freqs = counts/float(counts.sum())\n return (freqs*freqs).sum()", "def deviation(xs):\n\ta = avg(xs)\n\treturn sqrt(sum([(x - a) ** 2 for x in xs]) / (len(xs) - 1))", "def std_dev(l):\n return variance(l)**.5", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def _std(self, data):\n var = stats.var(data)\n if var>0.0:\n sd = math.sqrt(var)\n else:\n sd = 0.0\n return sd", "def std(x, ddof=0):\n with mp.extraprec(16):\n return mp.sqrt(var(x, ddof))", "def F_std(d, N):\n # memoize specht() and weyl() results (but only for current call)\n specht_mem, weyl_mem = memoize(specht), memoize(weyl)\n\n return sum(\n d ** (-N - 2)\n * sum(sqrt(specht_mem(mu) * weyl_mem(d, mu)) for mu in box_added(alpha, d)) ** 2\n for alpha in Partitions(n=N - 1, max_length=d)\n )", "def psd(self, frequency):\n #if frequency < 1 / self.obstime: return np.nan\n #if frequency > 1 / self.cadence: return np.nan\n outs = np.ones(len(frequency))\n outs[frequency < 1/self.obstime] = np.nan\n outs[frequency > 1/self.cadence] = np.nan\n return (2 * 1./self.cadence * self.rms**2)*outs", "def rmse4 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = sum(map(lambda (x, y) : sqre_diff(x, y), z), 0.0)\n return math.sqrt(v / s)", "def std_norm_pdf(x):\r\n return 1./np.sqrt(2.*np.pi)*np.exp(-.5*x**2)", "def norm_dist(numbers, x):\r\n m = np.mean(np.array(numbers))\r\n s = np.std(np.array(numbers))\r\n if s < 1e-5:\r\n s = 1e-5\r\n return np.array(norm.pdf(x, loc=m, scale=s))", "def stddev(self, num_list):\n try:\n mean = self.average(num_list)\n\n minus_mean = []\n\n for number in num_list:\n try:\n minus_mean.append((number - mean) ** 2)\n except Exception as e:\n print(\"Error: \", e)\n\n meany_mean = self.average(minus_mean)\n\n meany_mean = meany_mean ** .5\n\n except Exception as e:\n print(\"Error: \", e)\n\n return meany_mean", "def lstdev (inlist):\r\n return math.sqrt(var(inlist))", "def norm_conv_poisson(k, l, s, nsigma=3, steps=50):\n cp = conv_poisson(k, l, s, nsigma=nsigma, steps=steps)\n n1 = np.exp(log_poisson(l, l))\n n2 = conv_poisson(l, l, s, nsigma=nsigma, steps=steps)\n return cp*n1/n2", "def rmse5 (a, p) :\n s = len(a)\n v = sum(map(sqre_diff, a, p), 0.0)\n return math.sqrt(v / s)", "def std_err(p_hat, n):\n\n return np.sqrt((p_hat)*(1-p_hat)/n)", "def std(self):\n\t\treturn np.sqrt(0.6) #obtained by integrating 1.5x^4 from -1 to 1", "def _sigma_est_kpss(resids, nobs, lags):\n s_hat = (resids**2).sum()\n for i in range(1, lags + 1):\n resids_prod = np.dot(resids[i:], resids[:nobs - i])\n s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))\n return s_hat / nobs", "def get_std_dev(data, n = -1):\n mean = get_mean(data, n =n)\n\n deviations = []\n\n for i in range(0,n):\n deviations.append( (data[i] - mean)**2 )\n\n std_dev = sqrt( sum(deviations)/n )\n\n return std_dev", "def _gen_normal(self, count, **kwargs):\n normal = scipy.stats.norm(loc=kwargs['mean'], scale=kwargs['stdev'])\n rvs = normal.rvs(count)\n return rvs", "def zstddev(list) -> float:\n\n var = zvariance.zvariance(list)\n std_dev = math.sqrt(var)\n return std_dev", "def rmse1 (a, p) :\n s = len(a)\n i = 0\n v = 0.0\n while i != s :\n v += sqre_diff(a[i], p[i])\n i += 1\n return math.sqrt(v / s)", "def std(self):\n return np.sqrt(self.var)", "def standard_deviation(lst):\n\tnum_items = len(lst)\n\tif num_items == 0:\n\t\treturn -1\n\tmean = sum(lst) / num_items\n\tdifferences = [x - mean for x in lst]\n\tsq_differences = [d ** 2 for d in differences]\n\tssd = sum(sq_differences)\n\treturn ssd", "def sigma_from_cov(params, cov):\n rands = np.random.multivariate_normal(params, cov, 10000)\n breakdowns = -1*rands[:, 1]/rands[:, 0]\n return np.std(breakdowns)", "def std(self, ddof=0, weight_by_area=True):\n return numpy.sqrt(self.var(ddof=ddof, weight_by_area=weight_by_area))", "def rfd_poisson(ps,n):\n lam = sum(ps)\n G = len(ps)\n sample_q = lambda:nprandom.poisson(lam) # chromosomal occupancy approximately poisson.\n sampler = make_sampler(ps)\n return [direct_sampling_ps(ps,sample_q(),sampler) for i in xrange(n)]", "def simpson_e(counts):\n return reciprocal_simpson(counts)/(counts!=0).sum()", "def scv(SP):\n scv = ((np.std(SP,axis=1)/np.mean(SP,axis=1)))\n return scv", "def std(self) -> \"Stream[float]\":\n return self.agg(lambda x: np.std(x, ddof=1)).astype(\"float\")", "def rmse3 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = reduce(lambda w, (x, y) : w + sqre_diff(x, y), z, 0.0)\n return math.sqrt(v / s)", "def standard_deviation(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n return math.sqrt(clean.variance())", "def pstdev(data):\n n = len(data)\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n pvar = ss/n # the population variance\n return round(pvar**0.5, 1)", "def stddev(std_numbers):\n mean = sum(std_numbers) / float(len(std_numbers))\n sum_std = 0.0\n\n for x in std_numbers:\n sum_std += (mean - x) * (mean - x)\n\n variance = sum_std / float(len(std_numbers))\n stddev = math.sqrt(variance)\n\n return stddev", "def ssd(counts, centers):\n\tn = np.sum(counts)\n\tmu = np.sum(centers * counts) / n\n\treturn np.sum(counts * ((centers - mu) ** 2))", "def add_poisson_noise(self, sinogram, max_count=1000):\n\n temp = np.copy(sinogram)\n temp = temp * max_count\n temp = np.random.poisson(temp).astype('float')\n return temp / max_count", "def stddev(data, ddof=0):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/(n-ddof)\n return pvar**0.5", "def std_dev(self) -> float:\n return math.sqrt(self.variance())", "def pdf_n(data, means, x, mu, sigma):\n nominator = euler(-(((data[x] - means[mu]) ** 2) / (2 * sigma)))\n denominator = sqrt(sigma) * sqrt(2 * PI)\n return nominator / denominator", "def norm_pdf(x, sigma):\n return np.exp(-np.dot(x.T, np.linalg.solve(sigma,x))/2.0) / \\\n np.sqrt(np.linalg.det(2*np.pi*sigma))", "def std(self):\n stds = [(x.m-self.mean)**2 + x.std**2 for x in self.xs]\n return np.sqrt(np.dot(self.a, np.array(stds)))", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def small_poisson(n, S=1):\n\n # Parameters for the lower limit equation. These are for the 1, 2, and 3-sigma levels.\n beta = [0.0, 0.06, 0.222]\n gamma = [0.0, -2.19, -1.88]\n\n # Upper confidence level using equation 9 in Gehrels 1986.\n lambda_u = (n + 1.) * (1. - 1. / (9. * (n + 1.)) + S / (3. * np.sqrt(n + 1.)))**3\n\n # Lower confidence level using equation 14 in Gehrels 1986.\n lambda_l = n * (1. - 1. / (9. * n) - S / (3. * np.sqrt(n)) + beta[S - 1] * n**gamma[S - 1])**3\n\n # To clear the lower limit array of any possible NaNs from n = 0 incidences.\n np.nan_to_num(lambda_l, copy=False)\n\n # Calculate the upper and lower errors from the confidence values.\n upper_err = lambda_u - n\n lower_err = n - lambda_l\n\n return upper_err, lower_err", "def pdf(x, mu, std):\n if std == 0:\n return 0\n return (1.0 / (std * sqrt(2 * pi))) * np.exp(-(x - mu) ** 2 / (2 * std ** 2))", "def PoissonPDF(v):\n from scipy.special import gamma\n\n a = 3.24174\n b = 3.24269\n c = 1.26861\n g = gamma(a / c)\n k1 = c * b ** (a / c) / g\n pdf = k1 * np.power(v, (a - 1)) * np.exp(- b * np.power(v, c))\n return pdf", "def sd(self, dist=None):\n return np.sqrt(self.var(dist))", "def pdf(s, x):\r\n x = Basic.sympify(x)\r\n return 1/(s.sigma*sqrt(2*pi)) * exp(-(x-s.mu)**2 / (2*s.sigma**2))", "def _ss(data):\n c = sum(data)/len(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def calc_std_deviation(average):\r\n sqr_sum = 0\r\n count = len(records)\r\n for i in records:\r\n value = int(i[i.find(',')+1:])\r\n sqr_sum+=(value-average)**2 \r\n std_deviation = math.sqrt(sqr_sum/count)\r\n return std_deviation", "def std_cdf(x):\n return 0.5 + 0.5 * pt.erf(x / pt.sqrt(2.0))", "def dm_sigma_shape(sps):\n env = get_envelope(sps)\n i_xs, i_ys = interp_envelope(env)\n G_fit = fit_gauss(i_xs, i_ys)\n L_fit = fit_lorentz(i_xs, i_ys)\n e_xs, e_ys = env.dm, env.sigma\n g_ys, l_ys = G_fit(e_xs), L_fit(e_xs)\n return (chisquare(e_ys, g_ys, 2)[0], chisquare(e_ys, l_ys, 2)[0])", "def std( x, weights ):\n weights[weights!=weights] = 0\n std2 = np.average( x**2, weights = weights ) - np.average( x, weights = weights )**2\n return np.sqrt( np.abs(std2) )", "def lsterr(inlist):\r\n return stdev(inlist) / float(math.sqrt(len(inlist)))", "def std_cdf(x):\n return 0.5 + 0.5 * tt.erf(x / tt.sqrt(2.))", "def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)", "def calculate_rms(samples):\n chunk = pow(abs(samples), 2)\n return math.sqrt(chunk.mean())", "def counts_uncs(self):\n\n return unumpy.std_devs(self.counts)", "def std(x, axis=None):\r\n try:\r\n sample_variance = var(x, axis=axis)\r\n except IndexError as e: # just to avoid breaking the old test code\r\n raise IndexOrValueError(e)\r\n return sqrt(sample_variance)", "def stdProbabilityNorm(self,std=False):\n sv = str(scipy.__version__).split('.')\n if int(sv[0])==0 and int(sv[1])==15:\n self.raiseAWarning('SciPy 0.15 detected! In this version, the normalization factor for normal distributions was modified.')\n self.raiseAWarning('Using modified value...')\n return 1.0/np.sqrt(np.pi/2.)\n else:\n return 1.0/np.sqrt(2.*np.pi)", "def std_err_finite(std: float, neff_tot: float, neff: float) -> float:\n return std * np.sqrt(1 / neff_tot * (neff_tot - neff) / neff_tot)", "def rmse(sim, obs):\n sim = np.asarray(sim)\n obs = np.asarray(obs)\n obs, sim = remove_nans(obs, sim)\n\n return np.sqrt(np.mean((sim - obs)**2))", "def createDist(N):\n return np.random.normal(loc=1000.,scale=5.,size=np.random.poisson(lam=N))", "def get_exact_poisson_uncertainty(x:float, nsigmas:float=1) -> float:\n # see formula at:\n # https://en.wikipedia.org/wiki/Poisson_distribution#Confidence_interval\n pl = stats.norm.cdf(-1*nsigmas, loc=0, scale=1)\n pu = stats.norm.cdf(1*nsigmas, loc=0, scale=1)\n\n lb = stats.chi2.ppf(pl, 2*x)/2 if x!= 0 else 0\n ub = stats.chi2.ppf(pu, 2*(x+1))/2 \n\n # average err is almost equal to sqrt(x)+0.5\n err = (ub-lb)/2\n\n return err" ]
[ "0.67654955", "0.6592472", "0.6389939", "0.6386978", "0.6347877", "0.6347877", "0.6317405", "0.6290753", "0.62828505", "0.6280624", "0.6271539", "0.6244763", "0.61702657", "0.616199", "0.6077561", "0.6076158", "0.6060597", "0.6051322", "0.60506374", "0.6049991", "0.60432696", "0.60369915", "0.6033434", "0.59958255", "0.5993078", "0.59785736", "0.5969206", "0.59645766", "0.59510803", "0.5945937", "0.5942803", "0.59313875", "0.5924538", "0.590424", "0.5903695", "0.5890513", "0.5890513", "0.5889807", "0.58727664", "0.58721536", "0.5859563", "0.58288884", "0.5828114", "0.5801947", "0.5792381", "0.5791244", "0.5782057", "0.57794213", "0.57738227", "0.57571894", "0.5754937", "0.5751544", "0.5748031", "0.5742075", "0.5735145", "0.5734976", "0.5721508", "0.571957", "0.5715659", "0.5714965", "0.57131714", "0.56914103", "0.5690187", "0.56830114", "0.566905", "0.5666699", "0.5658117", "0.5656507", "0.56384784", "0.5611767", "0.5611714", "0.56077975", "0.56060153", "0.5600595", "0.5595004", "0.5595004", "0.5595004", "0.5595004", "0.5595004", "0.55846053", "0.55718243", "0.5567542", "0.5564106", "0.555157", "0.55429196", "0.5521059", "0.5512778", "0.5511705", "0.5511549", "0.550995", "0.55082947", "0.55076087", "0.5486494", "0.54842085", "0.5479451", "0.5476222", "0.547401", "0.5469083", "0.54688513", "0.54670155" ]
0.7089623
0
Aggregate standard deviations of two independent var => sqrt(sd1^2 + sd2^2).
def combine_sd_ratios(data1: DataSeries, data2: DataSeries) -> DataSeries: return (data1.pow(2) + data2.pow(2)).pow(0.5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def standard_deviation( values, sample=False ):\n return ma.sqrt( variance( values, sample ) )", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def pooled_standard_deviation(input_variances):\r\n # compute and return pooled standard deviation\r\n return sqrt(mean(square([float(i) for i in input_variances])))", "def standard_deviation(self):\r\n\t\treturn self.variance()**(1/2)", "def _std(self, data):\n var = stats.var(data)\n if var>0.0:\n sd = math.sqrt(var)\n else:\n sd = 0.0\n return sd", "def test_stddev(self):\n self.assertEqual(stddev(list1, sample=False), np.std(list1))\n self.assertEqual(stddev(list1), np.std(list1, ddof=1))", "def err_std(x1, x2, axis=0):\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.std(x1-x2, axis=axis)", "def std(self):\n variance, mean = self.variance()\n standard_deviation = variance**0.5\n print(f\"Standard Deviation is: {standard_deviation}\")\n return standard_deviation, mean", "def calculate_std_dev(X):\n\tstd_dev = np.sqrt(calculate_variance(X))\n\treturn std_dev", "def std_dev(l):\n return variance(l)**.5", "def sd(vals):", "def standard_dev(self):\n return self.variance()**0.5", "def std_deviation(array):\n if not array or len(array) == 1:\n return 0\n\n average = AGGREGATES['mean_arithmetic'](array)\n variance = map(lambda x: (x-average)**2,array)\n stdev = AGGREGATES['mean_arithmetic'](variance)\n return math.sqrt(stdev)", "def stddev(std_numbers):\n mean = sum(std_numbers) / float(len(std_numbers))\n sum_std = 0.0\n\n for x in std_numbers:\n sum_std += (mean - x) * (mean - x)\n\n variance = sum_std / float(len(std_numbers))\n stddev = math.sqrt(variance)\n\n return stddev", "def std(mean, vals):\n return sqrt(sum([(i-mean)**2 for i in vals])/len(vals))", "def std_dev(L, is_sample=0):\n\treturn math.sqrt(variance(L, is_sample))", "def get_stddev(self):\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i]))", "def stddev(r):\n avg = average(r)\n sdsq = sum([(i - avg) ** 2 for i in r])\n return (sdsq / (len(r) - 1 or 1)) ** 0.5", "def stdev(items):\n return Series.std(Series(items))", "def std_dev(self) -> float:\n return math.sqrt(self.variance())", "def std(values, ave):\n return math.sqrt(float(sum((value-ave)**2 for value in values))/len(values))", "def stdDev(data):\r\n sum = 0\r\n ave = average(data)\r\n for i in data:\r\n sum += (i-ave)**2\r\n return math.sqrt(sum/len(data))", "def standard_deviation(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n return math.sqrt(clean.variance())", "def calc_std(sig):\n return np.std(sig)", "def StandardDeviation(numlist):\n\tv = Variance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def standard_deviation(list):\n num_items = len(list)\n mean = sum(list) / num_items\n differences = [x - mean for x in list]\n sq_differences = [d ** 2 for d in differences]\n ssd = sum(sq_differences)\n\n\n variance = ssd / num_items\n\n sd = sqrt(variance)\n\n return sd", "def standard_deviation(scores):\n num_scores = len(scores)\n if num_scores == 0: return 0\n\n mean_score = mean(scores, False)\n sum_x2 = sum(score**2 for score in scores)\n std_dev_score = (sum_x2/num_scores - mean_score ** 2) ** 0.5\n return round(std_dev_score, 2)", "def std(x):\n return sqrt(TinyStatistician.var(x))", "def global_std_deviation(self):\n\n return np.std(self.average_scores_all_subjects(), axis=0)", "def stddev(data, ddof=0):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/(n-ddof)\n return pvar**0.5", "def test_std_2d(self):\r\n inp = array([[1, 2, 3], [4, 5, 6]])\r\n exps = ( # tuple(scipy_std(inp, ax) for ax in [None, 0, 1])\r\n 1.8708286933869707,\r\n array([2.12132034, 2.12132034, 2.12132034]),\r\n array([1., 1.]))\r\n results = tuple(std(inp, ax) for ax in [None, 0, 1])\r\n for obs, exp in zip(results, exps):\r\n testing.assert_almost_equal(obs, exp)", "def rmse_calc(arr1, arr2):\n assert arr1.shape==arr2.shape\n \n return np.sqrt(np.mean((arr2-arr1)**2))", "def std(self):\n stds = [(x.m-self.mean)**2 + x.std**2 for x in self.xs]\n return np.sqrt(np.dot(self.a, np.array(stds)))", "def _deviation_2(self, features):\n diffs = [self.c_fn_2(features[t], features[t+1]) \n for t in range(features.shape[0]-1)]\n return np.mean(diffs)", "def stdev(data, xbar=None):\n return math.sqrt(variance(data, xbar))", "def std(self):\n return np.sqrt(self.var)", "def sd(x):\n x_mean = mean(x)\n return (\n sum((x_i - x_mean) ** 2 for x_i in x) / (len(x) - 1)\n ) ** 0.5", "def dif_std_for_region(var1,var2,mask):\r\n\t\t\r\n\t\tdif = np.nansum(np.nansum(np.multiply(mask,np.nanmean(var1,axis=0) - np.nanmean(var2,axis=0)),axis=1),axis=0)\r\n\t\tvar1_domain_mean = np.nansum(np.nansum(np.multiply(mask,var1),axis=2),axis=1)\r\n\t\tvar2_domain_mean = np.nansum(np.nansum(np.multiply(mask,var2),axis=2),axis=1)\r\n\t\tstd = np.std(var1_domain_mean - var2_domain_mean);print std\r\n\t\tp25 = np.abs(dif - np.nanpercentile(var1_domain_mean - var2_domain_mean,25,axis=0))/1.25\r\n\t\tp75 = np.abs(np.nanpercentile(var1_domain_mean - var2_domain_mean,75,axis=0) - dif)/1.25\r\n\t\t# print dif, p25,p75\r\n\t\treturn dif,p25,p75", "def add_std(std):\n\n variance = [np.power(s, 2) for s in std]\n return np.sqrt(sum(variance)/len(variance))", "def std(self, dset):\n avg = self.mean(dset)\n variance = sum([math.pow(x - avg, 2) for x in dset])\n std = math.sqrt(variance)\n return std", "def calculate_std(self) -> float:\n\n if self.data:\n return np.std(self.data)\n else:\n return self.sigma", "def MeanAndStandardDeviation(data):\n n = len(data)\n if n == 0:\n return 0.0, 0.0\n mean = float(sum(data)) / n\n variance = sum([(element - mean)**2 for element in data]) / n\n return mean, math.sqrt(variance)", "def zstddev(list) -> float:\n\n var = zvariance.zvariance(list)\n std_dev = math.sqrt(var)\n return std_dev", "def overall_standard_deviation(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n return np.std(test_data - truth_data)", "def relative_change_stdev(mean1, mean2, std1, std2):\n mean1, mean2 = float(mean1), float(mean2)\n quotient = mean2 / mean1\n first = std1 / mean1\n second = std2 / mean2\n std = quotient * math.sqrt(first * first + second * second)\n return (quotient - 1) * 100, std * 100", "def std(dfs):\n df_mean = mean(dfs)\n df_sq = sum([(df - df_mean)*(df - df_mean) for df in dfs])\n return df_sq / len(dfs)", "def sampleStandardDeviation(numlist):\n\tv = sampleVariance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def stdev(values):\n mean = avg(values)\n diffs = [(value - mean) ** 2 for value in values]\n return avg(diffs) ** 0.5", "def std(x, axis=None):\r\n try:\r\n sample_variance = var(x, axis=axis)\r\n except IndexError as e: # just to avoid breaking the old test code\r\n raise IndexOrValueError(e)\r\n return sqrt(sample_variance)", "def std(self, ddof=0, weight_by_area=True):\n return numpy.sqrt(self.var(ddof=ddof, weight_by_area=weight_by_area))", "def calc_stdev(a, b, c, d, e):\n mean_of_num = (a + b + c + d + e) / 5\n return (((a - mean_of_num)**2 + (b - mean_of_num)**2 + (c - mean_of_num)**2\n + (d - mean_of_num)**2 + (e - mean_of_num)**2) / 5) ** 0.5", "def calc_standard_deviation(data: list) -> float:\n mean = calc_mean(data)\n acc = 0.0\n for n in data:\n acc += (n - mean) ** 2\n acc /= len(data) - 1\n return math.sqrt(acc)", "def get_std_dev(self, data):\n mean = 0\n data_arr = []\n for i in data:\n data_arr.append(i[1])\n return statistics.stdev(data_arr)", "def get_stdev(self):\n var_x = numpy.var(self._x)\n var_y = numpy.var(self._y)\n return numpy.sqrt(var_x + var_y)", "def stdev(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmean_values=column_matrix.std(0)\n\tstd_values=mean_values.tolist()\n\treturn std_values", "def avg_std_dev(positions):\n # print(\"len pos = \", len(positions))\n # print(positions)\n if sum(positions) == 0:\n the_mean = 0\n standard_dev = 0\n return the_mean, standard_dev \n try:\n the_mean = sum(positions) / float(len(positions))\n standard_dev = numpy.std(positions)\n except ValueError:\n the_mean = 0\n standard_dev = 0\n return the_mean, standard_dev", "def std(self):\n\n return self._reduce_for_stat_function(F.stddev, only_numeric=True)", "def get_std_dev(data, n = -1):\n mean = get_mean(data, n =n)\n\n deviations = []\n\n for i in range(0,n):\n deviations.append( (data[i] - mean)**2 )\n\n std_dev = sqrt( sum(deviations)/n )\n\n return std_dev", "def get_stdev(cls, data: tuple or list, is_population=False) -> float:\n cls._data_validation(data)\n from math import sqrt\n return sqrt(cls.get_var(data, is_population))", "def stddev(self, num_list):\n try:\n mean = self.average(num_list)\n\n minus_mean = []\n\n for number in num_list:\n try:\n minus_mean.append((number - mean) ** 2)\n except Exception as e:\n print(\"Error: \", e)\n\n meany_mean = self.average(minus_mean)\n\n meany_mean = meany_mean ** .5\n\n except Exception as e:\n print(\"Error: \", e)\n\n return meany_mean", "def calc_std_deviation(average):\r\n sqr_sum = 0\r\n count = len(records)\r\n for i in records:\r\n value = int(i[i.find(',')+1:])\r\n sqr_sum+=(value-average)**2 \r\n std_deviation = math.sqrt(sqr_sum/count)\r\n return std_deviation", "def GetStandardDeviation(vals_l, mean):\n\n\n sum_deviations_squared = 0\n\n for x in vals_l:\n sum_deviations_squared += (x - mean)**2\n\n return math.sqrt(float(sum_deviations_squared)/float(len(vals_l)))", "def std (nums,n_mean=None):\r\n if not n_mean:\r\n n_mean = mean(nums)\r\n n = len(nums)\r\n if n == 1:\r\n return 0.0\r\n variance = 0.0\r\n for i in xrange(n):\r\n tmp = (nums[i]-n_mean)\r\n variance += (tmp*tmp)\r\n \r\n variance /= n-1\r\n return sqrt(variance)", "def _get_standard_deviation(intermediate_normalization_dict):\n\n num_values = float(intermediate_normalization_dict[NUM_VALUES_KEY])\n multiplier = num_values / (num_values - 1)\n\n return numpy.sqrt(multiplier * (\n intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] -\n intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2\n ))", "def datasd(var):\n sd = 0\n getdata = \"sd = (data[i].\" + var + \" - dataavg(var)) ** 2\"\n for i in data:\n exec getdata\n return sqrt(sd)", "def variance(self):\n observations_raw = input(\"Observations: \").split()\n observations = [int(elem) for elem in observations_raw]\n observations_squared = sum([num**2 for num in observations])\n aggregate_squared = sum(observations)**2\n n = len(observations)\n mean = sum(observations)/n\n variance = (observations_squared - (aggregate_squared/n))/(n-1)\n print(f\"Variance is: {variance}\")\n return variance, mean", "def mean_var_sd(x):\n n = x.size\n assert 2 <= n\n mean = x.sum() / n\n diff = x - mean\n var = np.vdot(diff, diff) / (n - 1)\n sd = var ** 0.5\n return {\n 'mean': mean,\n 'var': var,\n 'sd': sd,\n }", "def std(self):\n return self._summarize(lambda c: c.std)", "def mean_rsd(numbers):\n mean = statistics.fmean(numbers)\n sd = statistics.stdev(numbers)\n rsd = 100 * sd / abs(mean)\n return mean, rsd", "def atstdev(a,limits=None,inclusive=(1,1)):\r\n return N.sqrt(tvar(a,limits,inclusive))", "def stddev(data, ddof=0):\n n = len(data)\n if n < 2:\n return 0\n ss = _ss(data)\n pvar = ss/(n-ddof)\n return pvar**0.5", "def SSD(x,y):\n return np.sum((x-y)**2)", "def _std(listvalue,ddof=1):\n\tmean=_mean(listvalue)\n\ttemp=[math.pow(i-mean,2) for i in listvalue]\n\tres=math.sqrt(sum(temp)/(len(listvalue)-ddof))\n\treturn res", "def calc_sq_std(df):\n\n sq_std = df.dropna()\n\n sq_std = (df['std'].divide(df['mean']))**2\n\n sq_std.name = 'sq_std'\n\n sq_std = pd.DataFrame(sq_std)\n\n sq_std = sq_std.dropna()\n\n return sq_std", "def standard_variation(df):\r\n\r\n\tdf_sdv_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_sdv_dict[col] = [df[col].std(), df[col].var()]\r\n\r\n\tdf_sdv = pd.DataFrame(df_sdv_dict, index=['Standard Deviation', 'Variance'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_sdv", "def rmse(a,b):\n \n ### Import modules\n import numpy as np\n \n ### Calculate RMSE\n rmse_stat = np.sqrt(np.mean((a - b)**2))\n \n return rmse_stat", "def stddev(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n stddev = variance**(1/2)\n if sample == False:\n variance = sum(distance_squared)/(self.size)\n stddev = variance**(1/2)\n return stddev", "def std( x, weights ):\n weights[weights!=weights] = 0\n std2 = np.average( x**2, weights = weights ) - np.average( x, weights = weights )**2\n return np.sqrt( np.abs(std2) )", "def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n ret = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)\n\n ret = np.sqrt(ret)\n return ret", "def standard_deviation(lst):\n\tnum_items = len(lst)\n\tif num_items == 0:\n\t\treturn -1\n\tmean = sum(lst) / num_items\n\tdifferences = [x - mean for x in lst]\n\tsq_differences = [d ** 2 for d in differences]\n\tssd = sum(sq_differences)\n\treturn ssd", "def empirical_std_deviation(x):\n import numpy as np\n x = np.array(x)\n M = np.size(x)\n xm = np.mean(x)\n\n #return np.sqrt(1./(M-1.)*np.sum((x-xm)**2))\n return np.sqrt( M/(M-1.) * ( (1./M*np.sum(x**2)) - xm**2 ) )", "def deviation(values, val):\n\tm = mean(values)\n\tdev = abs(val-m)\n\tsd = standard_deviation(values)\n\treturn float(dev)/sd if sd!=0 else 0.0", "def normal_deviation(a, b):\r\n deviation = math.sqrt((b-a)**2 / 12)\r\n print(\"The deviation of this normal distribution is : \", deviation)\r\n return deviation", "def get_mean_stddev(self):\n return self.get_mean(), self.get_std_dev()", "def std(self):\r\n return np.std(self.data_array)", "def rmse(x: np.ndarray, y: np.ndarray):\n x, y = np.copy(x), np.copy(y)\n if x.ndim > 1:\n return np.sqrt(np.nanmean((x-y)**2, axis=1))\n return np.sqrt(np.nanmean((x-y)**2))", "def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var", "def calculate_std_dev(temps, temp_average):\n\n variance_sum = 0\n for temp in temps:\n variance = (temp - temp_average) ** 2\n variance_sum += variance\n\n variance = variance_sum / len(temps)\n standard_deviation = variance ** 0.5\n\n return standard_deviation", "def test_stdev_from_mean(self):\r\n x = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = stdev_from_mean(x)\r\n self.assertFloatEqual(\r\n result,\r\n [-1.292463399014413,\r\n -0.60358696806764478,\r\n -0.045925095396451399,\r\n 0.77416589382589174,\r\n 1.1678095686526162])", "def standard_deviation_error(y_true, y_pred):\n ...", "def mean_stddev(self):\n if len(self.vs) == 0:\n raise StdDevFilterException\n\n mx = self.mean()\n # compute variance\n variance = sum([(x - mx)**2 for x in self.vs])/len(self.vs)\n # return mean value and standard deviation (square root of variance)\n return mx,math.sqrt(variance)", "def rmse(a, b):\n\n n = len(a)\n return np.linalg.norm(a - b) / np.sqrt(n)", "def std(x, ddof=0):\n with mp.extraprec(16):\n return mp.sqrt(var(x, ddof))", "def rmse(x1, x2, axis=0):\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.sqrt(mse(x1, x2, axis=axis))", "def variance(x):\r\n n = len(x)\r\n deviations = dev_mean(x)\r\n return sum_of_squares(deviations) / (n-1)", "def sd(self, v):\n return np.sqrt(np.dot(self.mat_var, v) + self.var_ext)", "def pstdev(data):\n n = len(data)\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n pvar = ss/n # the population variance\n return round(pvar**0.5, 1)", "def sd(self, dist=None):\n return np.sqrt(self.var(dist))" ]
[ "0.74803764", "0.7347811", "0.7347811", "0.7310347", "0.7310347", "0.72294706", "0.7142489", "0.71371955", "0.70903856", "0.705318", "0.69752705", "0.6966709", "0.6956561", "0.6909036", "0.68810153", "0.68303746", "0.6817877", "0.681063", "0.6780972", "0.6778679", "0.677482", "0.6774568", "0.6761884", "0.675145", "0.6748026", "0.6740117", "0.67348737", "0.6731002", "0.67252606", "0.67107034", "0.6685399", "0.66407436", "0.661485", "0.66083723", "0.659234", "0.6582401", "0.65823036", "0.6575236", "0.6566012", "0.6559905", "0.65403473", "0.65131795", "0.6508813", "0.6508543", "0.6503535", "0.6499337", "0.6496329", "0.6494829", "0.64908266", "0.6489795", "0.64859146", "0.64759445", "0.64550877", "0.64526284", "0.64368594", "0.6429861", "0.6423913", "0.64188683", "0.64147466", "0.6409606", "0.6407184", "0.64019185", "0.63952214", "0.6394444", "0.6362794", "0.63284576", "0.6322201", "0.6318764", "0.6317501", "0.6311257", "0.63067275", "0.6299914", "0.6298891", "0.6292996", "0.6290382", "0.6277512", "0.6275551", "0.6274921", "0.627094", "0.6259277", "0.62534606", "0.62504244", "0.6236487", "0.62309414", "0.6223453", "0.6214396", "0.62120366", "0.6209891", "0.6207091", "0.61991763", "0.6191538", "0.61909676", "0.61895525", "0.61847687", "0.6168833", "0.61677384", "0.6154598", "0.61533713", "0.6146229", "0.6145987", "0.6144711" ]
0.0
-1
Overriden to take care of the password hashing.
def create(self, validated_data): username = validated_data.get('username') email = validated_data.get('email') password = validated_data.get('password') first_name = validated_data.get('first_name', '') last_name = validated_data.get('last_name', '') return User.objects.create_user(username, email, password, first_name=first_name, last_name=last_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash_password(self):\n self.__password = self.str_to_hash(self.__password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def hash_password(self, password):\n self.password = pwd_context.encrypt(password)", "def hash_password(self, password):\n self.password_hash = generate_password_hash(password)", "def hash_password(self, original_password):\n self.password = generate_password_hash(original_password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n\n self.password_hash = generate_password_hash(password)", "def password(self, value):\n self.password_hashed = func.crypt(value, func.gen_salt('bf'))", "def set_password(self, password):\n self.password = self.hash_password(password)", "def _set_password(self, password):\n self._password = generate_password_hash(password)", "def set_password_hash(self, password):\n salt = bcrypt.gensalt()\n self.password_hash = bcrypt.hashpw(password.encode(), salt)", "async def _before_save(self) -> None:\n await super()._before_save()\n\n # Convert password to hash if is plain text (works for first insert and updates)\n if self.password is not None and 'argon2' not in self.password:\n self.password = pwd.create(self.password)", "def get_password_hash(self, username):\n raise NotImplementedError()", "def passsword(self, password):\n self.passwor_harsh = generate_password_hash(password)", "def _set_password(self, password):\r\n hashed_password = password\r\n\r\n if isinstance(password, unicode):\r\n password_8bit = password.encode('UTF-8')\r\n else:\r\n password_8bit = password\r\n\r\n # Hash a password for the first time, with a randomly-generated salt\r\n salt = bcrypt.gensalt(10)\r\n hashed_password = bcrypt.hashpw(password_8bit, salt)\r\n\r\n # Make sure the hased password is an UTF-8 object at the end of the\r\n # process because SQLAlchemy _wants_ a unicode object for Unicode\r\n # fields\r\n if not isinstance(hashed_password, unicode):\r\n hashed_password = hashed_password.decode('UTF-8')\r\n\r\n self._password = hashed_password", "def passwd(self, plaintext):\n self._password = bcrypt.generate_password_hash(plaintext.encode('utf8')).decode('utf8')", "def _hash_password(self, password):\n passwordhash = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())\n return passwordhash", "def set_password(self, password):\n self.password_hash = generate_password_hash(f\"{password}{self.user_salt}\")", "def set_password(self, raw_password):\n self.password = security.generate_password_hash(raw_password, length=12)", "def set_password(self, raw_password):\n self.password = security.generate_password_hash(raw_password, length=12)", "def password(self, password):\n self.password_hash = generate_password_hash(password)\n self.password_set = True", "def set_password(self, password):\n self.password_hash = generate_password_hash(password)", "def set_password(self, password):\n self.password_hash = generate_password_hash(password)", "def get_password(self):\n raise NotImplementedError('get_password')", "def set_password(self, password):\n self.password_hash = generate_password_hash(str(password))", "def password(self) -> str:", "def set_password(self, password):\n self.password = generate_password_hash(password, method='pbkdf2:sha256')", "def set_password(self, raw_password):\n if raw_password is None:\n self.set_unusable_password()\n else:\n import random\n algo = PASSWORD_ALGO\n salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]\n hsh = get_hexdigest(algo, salt, raw_password)\n self.password = '%s$%s$%s' % (algo, salt, hsh)", "def clean(self):\n if self.email is None or self.password is None:\n raise ValidationError\n self.password = generate_password_hash(self.password)", "def set_pass(self, pw):\n\t\tself.passhash = generate_password_hash(pw)", "def GetPassword(self):\n pass", "def __init__(self, email, password):\n self.email = email\n self.password = Bcrypt().generate_password_hash(password).decode()", "def setPassword(self, unhashPass):\n\t\tself.passHash = generate_password_hash(unhashPass)", "def set_password(self, password):\n self.password = generate_password_hash(password)", "def acceptsPassword(self):\r\n raise NotImplementedError()", "def test_password_match(self):\r\n\r\n tst = User()\r\n tst._password = self.test_hash\r\n\r\n self.assertTrue(\r\n tst._password == self.test_hash, \"Setting should have hash\")\r\n self.assertTrue(\r\n tst.password == self.test_hash, \"Getting should have hash\")\r\n self.assertTrue(\r\n tst.validate_password(self.test_password),\r\n \"The password should pass against the given hash: \" + tst.password)", "def enter_password(self):", "def password(self, password):\n if password is None:\n self._password = None\n else:\n self._password = generate_password_hash(password)", "def set_password(self, password):\n\n self.password = bcrypt.generate_password_hash(password)", "def _get_password(self):\n return self._password", "def _get_password(self):\r\n return self._password", "def password(self):\n return self._password()", "def update_password(self, pwd):\n self.password = bcrypt.generate_password_hash(pwd).decode('utf8')", "def set_password(self, password):\n self.__init__(password=password)", "def test_password_set(self):\r\n tst = User()\r\n tst.password = self.test_password\r\n\r\n self.assertEqual(\r\n len(tst.password),\r\n 60,\r\n \"Hashed should be 60 char long: \" + tst.password)\r\n self.assertEqual(\r\n '$2a$',\r\n tst.password[:4],\r\n \"Hash should start with the right complexity: \" + tst.password[:4])", "async def password(self, ctx):\n pass", "def _get_user_password(self):\n return self.__user_password", "def hash_password(password):\n #return passlib.hash.pbkdf2_sha512.encrypt(password)\n return sha256_crypt.hash(password)", "def set_passwords(self, passwords):\n self.passwords = {}\n for user_name in passwords:\n self.passwords[user_name] = sha512_crypt.hash(\n passwords[user_name], rounds=5000)", "def _hash_password(password: str) -> str:\n # return pbkdf2_sha512.encrypt(password, rounds=ROUNDS, salt=SALT)\n return pbkdf2_sha512.using(rounds=ROUNDS, salt=SALT).hash(password)", "def new_password(self, login, password):\n login = self._sha512('{:s}{:s}'.format(login, self.salt))\n pw = self._pepper_hash(self._get_peppers(login).next(), password, self.salt)\n hashed = bcrypt.hashpw(pw, bcrypt.gensalt(7))\n return login, hashed", "def update_password(self, user, password):\n user.password = hashers.make_password(password)", "def encode(self, password, salt):\n raise NotImplementedError()", "def __init__(self, **kwargs):\n self.session = None\n\n super(Password, self).__init__(**kwargs)\n\n if not self._mfa_supported():\n self._mfa_passcode = None", "def test_password_setter(self):\n self.user.password = '123456'\n self.assertIsNotNone(self.user.password_hash)", "def set_password(self, new_password):\n super(Mafiasi, self).set_password(new_password)\n self.new_password = new_password", "def password(cls):\n return User.CryptComparator(cls.password_hashed)", "def hash_passwd(password, hash_method=\"sha256\"):\n\n return generate_password_hash(password, hash_method)", "def password(self, password) :\n\t\ttry :\n\t\t\tself._password = password\n\t\texcept Exception as e:\n\t\t\traise e", "def check_password(self, raw_password):\n\t\tdef setter(raw_password):\n\t\t\tself.set_password(raw_password)\n\t\t\t# Password hash upgrades shouldn't be considered password changes.\n\t\t\tself._password = None\n\t\t\tself.save(update_fields=[\"password\"])\n\t\treturn check_password(raw_password, self.password, setter)", "def set_password(self, value):\n hashed = bcrypt.encode(value)\n self._password = unicode(hashed)", "def hash_password(self, password):\n\n # Use passlib's CryptContext to hash a password\n password_hash = self.password_crypt_context.encrypt(password)\n\n return password_hash", "def hash_password(self, password):\n salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')\n pwdhash = hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), \n salt, 100000)\n pwdhash = binascii.hexlify(pwdhash)\n return (salt + pwdhash).decode('ascii')", "def password(self):\n raise NotImplementedError(\n \"Comparison only supported via the database\")", "def setPassword(self, password, hashed=False):\n if hashed or self.hashed:\n self.hashed = True\n self.password = utils.saltHash(password)\n else:\n self.password = password", "def test_hash_verification(self):\n pw = generate_password(8)\n for hash_method in (ldap_des_crypt, ldap_sha512_crypt, ldap_md5,\n ldap_salted_sha1):\n encrypted = hash_method.hash(pw)\n self.assertTrue(verify_password(pw, encrypted),\n \"{}: '{}' should verify '{}'\"\n .format(hash_method.name, encrypted, pw))", "def set_password(self, password):\n self.password = md5crypt(password, gen_salt())", "def hash_password(self, password):\n new_password = ['*' for i in password if password is not None]\n return ''.join(new_password)", "def hashPassword(passwd):\r\n \r\n return hashlib.sha224(passwd).hexdigest()", "def set_password(self, value):\n # Salt need to be generated before set password\n m = hashlib.sha256()\n m.update('-'.join([\n str(datetime.now()),\n config.get('security.password_salt')\n ]))\n self.salt = m.hexdigest()\n self.password_pending = False\n self.password = self.__encrypt(value)", "def __setattr__(self, name, value):\n if name == \"password\":\n value = md5(value.encode()).hexdigest()\n print(\"qzedazedazeaz\")\n super().__setattr__(name, value)", "def test_set_user_password(self):\n pass", "def password_hash_algorithm(self, password_hash_algorithm):\n\n self._password_hash_algorithm = password_hash_algorithm", "def check_password(self, raw_password):\n def setter(raw_password):\n self.set_password(raw_password)\n self.save(update_fields=[\"password\"])\n return check_password(raw_password, self.password, setter)", "def set_password(self, raw_password: str):\n self.new_password = raw_password", "def setpassword(self, pwd):\n pass", "def __init__(self, username, password):\n self.username = username\n self.password = password\n self.salt = os.urandom(32)", "def check_password(self, raw_password):\n def setter(raw_password):\n self.set_password(raw_password)\n self.save(update_fields=[\"password\"])\n return check_password(raw_password, self.password, setter)", "def check_password(self, raw_password):\n def setter(raw_password):\n self.set_password(raw_password)\n self.save()\n return check_password(raw_password, self.password, setter)", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def hash_password(self, google_token):\n self.google_token = pwd_context.encrypt(google_token)", "def _check_password(self, body):\n if not self.config.security_initialize:\n raise RuntimeError(\"First set a password\")\n\n password = hash_password(body[ATTR_PASSWORD])\n if password != self.config.security_password:\n raise RuntimeError(\"Wrong password\")", "def _authenticate_password(user_input: str, hash_: str) -> bool:\n return pbkdf2_sha256.verify(user_input, hash_)", "def update_password(self, password):\n self.password = scryptsalsa208sha256_str(password.encode('utf-8')).decode('utf-8')\n return True", "def _session_password_auth(self) -> None:\n try:\n self.session.userauth_password(self.auth_user, self.auth_password)\n except AuthenticationError as exc:\n logging.critical(\n f\"Password authentication with host {self.host} failed. Exception: {exc}.\"\n f\"\\n\\tTrying keyboard interactive auth...\"\n )\n try:\n self.session.userauth_keyboardinteractive(self.auth_user, self.auth_password)\n except AuthenticationError as exc:\n logging.critical(\n f\"Keyboard interactive authentication with host {self.host} failed. \"\n f\"Exception: {exc}.\"\n )\n raise AuthenticationFailed\n except Exception as exc:\n logging.critical(\n \"Unknown error occurred during keyboard interactive authentication with host \"\n f\"{self.host}; Exception: {exc}\"\n )\n raise exc\n except Exception as exc:\n logging.critical(\n \"Unknown error occurred during password authentication with host \"\n f\"{self.host}; Exception: {exc}\"\n )\n raise exc", "def set_password(self, password):\n self.password = password", "def password(self) :\n\t\ttry :\n\t\t\treturn self._password\n\t\texcept Exception as e:\n\t\t\traise e", "def check_password(self, raw_password):\n\n def setter(raw_password):\n self.set_password(raw_password)\n self.save(update_fields=[\"password\"])\n\n return check_password(raw_password, self.password, setter)", "def password_encryption(self, password):\n return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())", "def set_password(self, user, password):\n hashed_password = self.hash_password(password)\n server_name = self.get_server_name()\n hookenv.log(\"Storing hash: {}\".format(hashed_password), hookenv.DEBUG)\n result = self.pgsql_query(\n \"UPDATE users SET password_hash = '{}' WHERE name = '@{}:{}';\".format(\n hashed_password, user, server_name\n )\n )\n return result", "def update(self, instance, validated_data):\n\n password = validated_data.get('password', None)\n if password is not None:\n validated_data['password'] = AESCipher(password, self.context['request'].user.password).encrypt()\n return super().update(instance, validated_data)", "def __generate_hash(password):\n if password is None:\n return None\n return bcrypt.generate_password_hash(password, rounds=10).decode(\"utf8\")", "def save_password(self):\n Credential.passwords.append(self)", "def get_verified_password(self):\n raise NotImplementedError('get_verified_password')", "def encrypt_password(cls, password):\n return generate_password_hash(password)" ]
[ "0.7774024", "0.7681036", "0.738924", "0.73867005", "0.73863477", "0.715053", "0.715053", "0.715053", "0.715053", "0.7084934", "0.7043762", "0.7021125", "0.69785094", "0.6971123", "0.6949527", "0.6932085", "0.69224435", "0.6888295", "0.6843389", "0.68132836", "0.67045635", "0.6685192", "0.6685192", "0.6671058", "0.66660976", "0.66660976", "0.66357714", "0.6631527", "0.6619377", "0.6606724", "0.65927154", "0.6574805", "0.65733075", "0.6571872", "0.6547964", "0.65327334", "0.65182185", "0.6471768", "0.6468071", "0.64582634", "0.64506376", "0.64432883", "0.6421771", "0.63895684", "0.6365016", "0.63629735", "0.63569874", "0.63563895", "0.63555026", "0.63489866", "0.63348377", "0.63260037", "0.6308145", "0.630688", "0.62960047", "0.6293057", "0.62890667", "0.6279333", "0.6277489", "0.6276326", "0.6274275", "0.6272427", "0.62566864", "0.62565863", "0.62432516", "0.6237179", "0.62218964", "0.6198509", "0.6193749", "0.6178296", "0.61776525", "0.6158574", "0.61565846", "0.6151243", "0.61479276", "0.614131", "0.61406595", "0.61298585", "0.61265296", "0.6124642", "0.61151767", "0.61018723", "0.61000085", "0.61000085", "0.61000085", "0.61000085", "0.60987496", "0.6095881", "0.6088996", "0.6088173", "0.6087637", "0.6083031", "0.6082928", "0.6080088", "0.60693765", "0.60660076", "0.606592", "0.60513234", "0.60508263", "0.6046242", "0.60460025" ]
0.0
-1
Takes command argparse arguments and push them in the config with syntax args.
def inject_args_in_config(args, config): log = logging.getLogger(__name__) for t_opt in list(args._options.values()): n = t_opt.name first_ = n.find('_') if first_ > 0: s, o = n[:first_], n[first_ + 1:] v = t_opt.value() log.info('inject argument {} = {} in configuration section {}, option {}'.format(n, v, s, o)) if not config.has_section(s): config.add_section(s) config.set(s, o, v) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _configure_args(self, parser: ArgumentParser) -> ArgumentParser:\n pass", "def add_args(self, parser):", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def parse_arguments():\n custom_config = config.read()\n arguments = docopt(__doc__, version='Montanus %s' % __version__)\n logger.debug(custom_config)\n conf_file = arguments.get('--with-conf')\n if conf_file is not None:\n conf_config = config.read(conf_file)\n\n for (k, v) in conf_config.items():\n if v is not None:\n custom_config[k] = v\n\n logger.debug(arguments)\n command_config = {\n 'templates_path': arguments.get('<templates_path>'),\n 'static_files_path': arguments.get('--with-static-files-path') \\\n if arguments.get('-with-static-files-path') is not None \\\n else arguments.get('<templates_path>'),\n 'delete_source': arguments.get('--delete'),\n 'protocol': arguments.get('--with-protocol'),\n 'domains': arguments.get('--with-domains').split(',') \\\n if arguments.get('--with-domains') is not None \\\n else None,\n 'md5_len': int(arguments.get('--with-md5-len')),\n 'md5_concat_by': arguments.get('--with-md5-concat-by')\n }\n logger.debug(command_config)\n\n for (k, v) in command_config.items():\n if v is not None:\n custom_config[k] = v\n\n logger.debug(custom_config)\n return DictWrapper(custom_config)", "def setup_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parser.set_defaults(command=lambda x: parser.print_usage())\n\n subparsers = parser.add_subparsers(\n title=f\"{COMMAND} commands\",\n description=\"sub commands for managing configs\"\n )\n\n apply_parser = subparsers.add_parser(\n 'apply',\n help='download and apply a config from github or bitbucket'\n )\n\n apply_parser.set_defaults(command=main)\n\n apply_parser.add_argument(\n '-u',\n '--user',\n required=True,\n help='username to pull from'\n )\n apply_parser.add_argument(\n '-r',\n '--repo',\n default='config',\n help='repo name to pull, defaults to config'\n )\n apply_parser.add_argument(\n '--no-download',\n action='store_true',\n help='Use already downloaded copy'\n )\n apply_parser.add_argument(\n '-b',\n '--bitbucket',\n action='store_const',\n dest='host',\n default=Hosts.GITHUB,\n const=Hosts.BITBUCKET,\n help='pull from bitbucket'\n )\n apply_parser.add_argument(\n '-g',\n '--github',\n action='store_const',\n dest='host',\n default=Hosts.GITHUB,\n const=Hosts.GITHUB,\n help='pull from bitbucket'\n )\n apply_parser.add_argument(\n '--no-apply',\n action=\"store_true\",\n dest=\"no_apply\",\n help=\"Don't actually run\"\n )\n\n list_parser = subparsers.add_parser('list', help=\"list downloaded configs\")\n\n list_parser.set_defaults(command=list_configs)\n\n list_parser.add_argument(\n '-u',\n '--user',\n help='username to pull from'\n )\n\n undo_parser = subparsers.add_parser(\n 'undo',\n help='Restore the snapshot taken when config was last applied'\n )\n\n undo_parser.set_defaults(command=undo_config)\n\n undo_parser.add_argument(\n '-u',\n '--user',\n required=True,\n help='username to pull from'\n )\n undo_parser.add_argument(\n '-r',\n '--repo',\n default='config',\n help='repo name to pull, defaults to config'\n )\n\n return parser", "def parse_arguments(args):", "def parse_args():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'config',\n help='Config file')\n parser.add_argument(\n '--quiet',\n '-q',\n action='store_true',\n help='do not print to console'\n )\n parser.add_argument(\n '--password',\n '-p',\n action='store_true',\n help='Set password in keyring.'\n )\n parser.add_argument(\n '--update',\n '-u',\n action='store_true',\n help='Only add transactions after last date in database.'\n )\n parser.add_argument(\n '--mark_seen',\n '-m',\n action='store_true',\n help='Mark fetched emails as seen.'\n )\n\n return parser.parse_args()", "def add_arguments(self, parser):", "def push_parser():\n description = \\\n \"This command pushes all local files that have been added to \"\\\n \"Transifex to the remote server. All new translations are merged \"\\\n \"with existing ones and if a language doesn't exists then it gets \"\\\n \"created. If you want to push the source file as well (either \"\\\n \"because this is your first time running the client or because \"\\\n \"you just have updated with new entries), use the -f|--force option. \"\\\n \"By default, this command will push all files which are watched by \"\\\n \"Transifex but you can filter this per resource or/and language. \"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-l\", \"--language\", action=\"store\", dest=\"languages\",\n default=None, help=\"Specify which translations you \"\n \"want to push (defaults to all)\")\n parser.add_argument(\"-r\", \"--resource\", action=\"store\", dest=\"resources\",\n default=None, help=\"Specify the resource for which \"\n \"you want to push the translations (defaults to all)\")\n parser.add_argument(\"-f\", \"--force\", action=\"store_true\",\n dest=\"force_creation\", default=False,\n help=\"Push source files without checking modification \"\n \"times.\")\n parser.add_argument(\"--skip\", action=\"store_true\", dest=\"skip_errors\",\n default=False, help=\"Don't stop on errors. \"\n \"Useful when pushing many files concurrently.\")\n parser.add_argument(\"-s\", \"--source\", action=\"store_true\",\n dest=\"push_source\", default=False,\n help=\"Push the source file to the server.\")\n\n parser.add_argument(\"-t\", \"--translations\", action=\"store_true\",\n dest=\"push_translations\", default=False,\n help=\"Push the translation files to the server\")\n parser.add_argument(\"--no-interactive\", action=\"store_true\",\n dest=\"no_interactive\", default=False,\n help=\"Don't require user input when forcing a push.\")\n parser.add_argument(\"-x\", \"--xliff\", action=\"store_true\", dest=\"xliff\",\n default=False, help=\"Apply this option to upload \"\n \"file as xliff.\")\n parser.add_argument(\n \"-b\", \"--branch\", action=\"store\", dest=\"branch\",\n default=None, nargs=\"?\", const='-1',\n help=(\"Pull for a specific branch. Default is current\"\n \"branch if exists.\")\n )\n parser.add_argument(\"--parallel\", action=\"store_true\", default=False,\n help=\"perform push/pull requests in parallel\")\n return parser", "def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Text]:", "def add_args(parser: argparse.ArgumentParser):\n pass", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def __add_arguments__(cls, parser):", "def add_arguments(cls, arg_parser: ArgParser) -> None:", "def configure(self, args):\n pass", "def add_arguments(self, parser):\n parser.add_argument('asins', nargs='+', type=str)", "def genargs() -> ArgumentParser:\n parser = ArgumentParser(prog=\"configure\", description=\"Configure a LinkML model repository\")\n parser.add_argument(\"configfile\", help=\"Model configuration file\", type=argparse.FileType('r'))\n parser.add_argument(\"--templatedir\", help=\"Template source directory (Default: template_configurator/templates)\",\n default=default_template_directory)\n parser.add_argument(\"-t\", \"--targetdir\", help=\"Output target directory (Default: current working directory\",\n default=os.getcwd())\n parser.add_argument(\"--reset\", help=\"Hard reset -- regenerate all files from scratch\", action=\"store_true\")\n return parser", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\"\"\n Generates json files with all the combinations of\n hyperparameter values from a configuratio file.\n \"\"\")\n\n parser.add_argument('outdir', help='output directory')\n parser.add_argument('config', help='configuration file')\n\n return parser.parse_args()", "def Args(parser):", "def setup_parser(self, parser, args):\r\n\r\n pass", "def setup_args() -> argparse.ArgumentParser:\n main_parser = argparse.ArgumentParser(prog=\"gh\")\n subparsers = main_parser.add_subparsers(dest=\"subparser\")\n command_parser = subparsers.add_parser(\"commands\", help=\"Runs a command\")\n command_parser.add_argument(\n \"choice\",\n help=\"The chosen command to run\",\n choices=gh.commands.OPTIONS.keys(),\n )\n analytics_parser = subparsers.add_parser(\"analytics\", help=\"Runs an analysis\")\n analytics_parser.add_argument(\n \"choice\",\n help=\"The chosen analysis to run\",\n choices=gh.analytics.OPTIONS.keys(),\n )\n return main_parser", "def add_arguments(parser):\n add_token_flags(parser)\n parser.add_argument('token', nargs='?')\n if is_admin_enabled():\n parser.add_argument('--admin', '-a', help='run command in admin mode', action='store_true')\n format_group = parser.add_mutually_exclusive_group()\n format_group.add_argument('--json', help='provide the data in a JSON file', dest='json')\n format_group.add_argument('--yaml', help='provide the data in a YAML file', dest='yaml')\n format_group.add_argument('--input', help='provide the data in a JSON/YAML file', dest='input')\n parser.add_argument('--output', help='outputs the computed token configuration in a JSON/YAML file (or to stdout using -)'\n 'without performing any token edit operations')\n parser.add_argument('--context', dest='context',\n help='can be used only when a data file has been provided via --input, --json, or --yaml; '\n 'this JSON/YAML file provides the context variables used '\n 'to render the data file as a template')\n add_override_flags(parser)", "def parse_args():\n parser = argparse.ArgumentParser(\"generate_scenarios.py\")\n add_arg = parser.add_argument\n add_arg(\"config\", nargs=\"?\", default=\"config.yaml\")\n # add_arg('-d', '--distributed', action='store_true')\n add_arg(\"-v\", \"--verbose\", action=\"store_true\")\n # parameters which override the YAML file, if needed\n #\n return parser.parse_args()", "def config_arg_parser():\n parser = argparse.ArgumentParser(description='Dataframes merger')\n parser.add_argument('-dir_path', required=True, help=\"Path to the directory with feature collections, that should be merged\")\n return parser.parse_args()", "def main_parse_args():\n parser = ArgumentParser()\n parser = cf.add_config_args(parser)\n args = parser.parse_args()\n config_opts = sys.argv[1:]\n # add working_dir to config_opts\n found_wd = False\n for opt in ['-wd', '--working_dir']:\n if opt in config_opts:\n found_wd = True\n if not found_wd:\n config_opts.extend(['-wd', args.working_dir])\n # remove src_classes from config_opts\n for opt in ['-srcs', '--src_classes']:\n if opt in config_opts:\n idx = config_opts.index(opt)\n config_opts.pop(idx)\n # pop next item\n config_opts.pop(idx)\n args.config_opts = \" \".join(config_opts)\n return args", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\n \"config_path\",\n type=str,\n help=\"Path to the JSON configuration file containing the image transformation settings.\",\n )\n parser.add_argument(\n \"img_path\",\n type=str,\n help=\"Path to the input image file to apply transformations.\",\n )\n return parser.parse_args()", "def config(*subconfig):\n\n with open('configure.yaml', 'r') as stream:\n args = yaml.load(stream)\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description='')\n parser.add_argument(\n '--node',\n '-n',\n help='The node ID.'\n )\n parser.add_argument(\n '--processes',\n '-p',\n help='The total number of processes.'\n )\n # Store command line arguments in a dict\n cl_args = parser.parse_args()\n cl_args_dict = vars(cl_args)\n # Combine\n args.update(cl_args_dict)\n # Find subconfig if argument is passed\n for s in subconfig:\n try:\n args = args[s]\n except:\n pass\n # Return\n return args", "def parser_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config-file\", type=str, help=\"yaml configuration file name\")\n return parser.parse_args()", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def parse_args():\n hpo_warning = 'Flag overwrites config value if set, used for HPO and PBT runs primarily'\n parser = argparse.ArgumentParser('train.py')\n add_arg = parser.add_argument\n add_arg('config', nargs='?', default='GraphLearning/configs/myconfig.yaml')\n add_arg('results_dir', nargs='?', default='GraphLearning/results/withnoise')\n \n return parser.parse_args()", "def build_shingles_config():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type=str, default=SONGS_PATH,\n help=\"Input filename.tsv\")\n parser.add_argument(\"-o\", \"--output\", type=str, help=\"Output filename.tsv\")\n\n return parser.parse_args()", "def add_args(parser):\n parser.add_argument(\n \"--share-encoder-embeddings\",\n action=\"store_true\",\n help=\"share encoder embeddings across languages\",\n )\n parser.add_argument(\n \"--share-decoder-embeddings\",\n action=\"store_true\",\n help=\"share decoder embeddings across languages\",\n )\n parser.add_argument(\n \"--share-encoders\",\n action=\"store_true\",\n help=\"share encoders across languages\",\n )\n parser.add_argument(\n \"--share-decoders\",\n action=\"store_true\",\n help=\"share decoders across languages\",\n )", "def add_arguments(parser):\n return", "def add_arguments(self, sub_parser):\n sp = sub_parser\n sp.add_argument(\n '--fork', action='store_true',\n help=\"\"\"positional arguments: [user_name/]repo_name [dest_repo_name]\n default name of fork is repo_name\"\"\")\n #sp.add_argument('--dest')\n sp.add_argument(\n '--clone', metavar=('USER/REPO'),\n help=\"clone remote, init backup and push\")\n sp.add_argument(\n '--local-name', metavar=('REPO'),\n help=\"\"\"local repo name (default is remote name), if no path,\n create under repo-base\"\"\")\n sp.add_argument(\n '--create-pr', action='store_true',\n help=\"\"\"clone clean, diff against default, patch on clean, commit\"\"\")\n sp.add_argument(\n '--update-pr', action='store_true',\n help=\"\"\"diff against previous patch, commit\"\"\",)\n #sp.add_argument('--test', action='store_true', help=argparse.SUPPRESS)\n sp.add_argument(\n '--branch-close', metavar='TXT',\n help=\"\"\"close a branch with %(metavar)s as reason\"\"\",)\n sp.add_argument(\n '--username',\n help=\"username to use for push to bitbucket.org \"\n ).add_default()\n sp.add_argument('--password',\n help=\"password to use for push to bitbucket.org \"\n ).add_default(star=True)", "def add_args(parser):\r\n parser.add_argument(\"data\", help=\"path to data directory\")\r\n parser.add_argument(\r\n \"--silence-token\", default=\"\\u2581\", help=\"token for silence (used by w2l)\"\r\n )\r\n parser.add_argument(\r\n \"--max-source-positions\",\r\n default=sys.maxsize,\r\n type=int,\r\n metavar=\"N\",\r\n help=\"max number of frames in the source sequence\",\r\n )\r\n parser.add_argument(\r\n \"--max-target-positions\",\r\n default=1024,\r\n type=int,\r\n metavar=\"N\",\r\n help=\"max number of tokens in the target sequence\",\r\n )", "def setup_args(cls, parser):\n pass", "def setup_from_args_and_configs(log_controller=None):\n\n if log_controller == None:\n log_controller = log_config.log_setup()\n\n # base parser\n base_parser = argparse.ArgumentParser(add_help=False)\n # this is the base workspace folder, containing a config file, named defaults.yaml, this file should accumulate the history\n # of the subcommands configs, so it should\n base_parser.add_argument('-w', '--workdir', action='store', help='workspace folder', default=os.getcwd())\n\n # partial parsing of known args\n base_args = base_parser.parse_known_args()[0]\n\n #log_controller.set_args()\n\n if base_args.workdir[0] == '/':\n work_dir = base_args.workdir\n env_dir = work_dir\n else:\n logger.warning(\" workdir path is not absolute-->\" + base_args.workdir + \"<--\")\n work_dir = os.path.join(parent_root_path, 'deploy', base_args.workdir)\n logger.warning(\" setting output work folder to -->\" + work_dir + \"<--\")\n env_dir = os.path.join(parent_root_path, 'environments', base_args.workdir)\n logger.warning(\" setting input environment folder to -->\" + env_dir + \"<--\")\n\n global_key_subst['DEPLOY_WORKDIR'] = work_dir\n\n# print(\"%%%%% workdir %%%%\",base_args.workdir)\n\n #get yaml files involved\n yaml_files = find_config_file_list(\n list_paths=[work_dir, env_dir],\n default_paths=['config'],\n glob_suffix='defaults.yaml' )\n # print(\"#######################defaults yaml files\", yaml_files)\n\n base_config = CascadeYamlConfig(yaml_files=yaml_files)\n\n #env spack yaml files involved\n env_spack_yaml_files = find_config_file_list(\n list_paths=[work_dir, env_dir],\n default_paths=[],\n glob_suffix='spack.yaml' )\n # print(\"#######################spack yaml files\", env_spack_yaml_files)\n\n env_spack_config = CascadeYamlConfig(yaml_files=env_spack_yaml_files)\n env_spack_session = env_spack_config[['spack']]\n log_controller.set_args(log_configs=base_config[['logging_configs']])\n config_session = base_config[['config']]\n\n # adding config_folders arg\n key_name = 'config_folders'\n\n #print(\"######\"+str(env_spack_session))\n #print(\"######\"+str(env_spack_session.get('include', [os.path.join(root_path, 'config')])))\n # print(\"///////////////\", config_session)\n\n base_parser.add_argument('-' + key_name[0],\n '--' + key_name,\n action='append',\n help='yaml config folders',\n default = env_spack_session.get('include', config_session.get(key_name, [os.path.join(root_path, 'config')])))\n\n key_name = 'hosts_dir'\n\n base_parser.add_argument('--' + key_name,\n action='store',\n help='hosts config base dir',\n default=abs_deploy_path(config_session.get(key_name, 'config/hosts'),\n prefixes=[root_path]))\n\n # now reparse with this new arguments\n base_args = base_parser.parse_known_args()[0]\n # print(\"@@@@@@@@@ args.hosts_dir ::::::\", str(base_args.hosts_dir).split('/'))\n if base_args.hosts_dir[0] == '/':\n hosts_dir = base_args.hosts_dir\n else:\n hosts_dir = os.path.join(root_path, str(base_args.hosts_dir))\n # print(\"@@@@@@@@@ hosts_dir ::::::\", hosts_dir)\n default_paths = ['config']\n if os.path.exists(hosts_dir):\n default_paths.append(hosts_dir)\n\n\n\n\n yaml_files = find_config_file_list(\n list_paths=[env_dir, work_dir] + base_args.config_folders,\n default_paths=default_paths,\n glob_suffix='defaults.yaml' )\n\n # print(\"#######################second yaml files\", yaml_files)\n base_config = CascadeYamlConfig(yaml_files=yaml_files)\n log_controller.set_args(log_configs=base_config[['logging_configs']])\n\n config_session = base_config[['config']]\n\n\n platform_match = utils.myintrospect(tags=config_session.get('host_tags', dict())).platform_tag()\n platform_folders=[]\n if platform_match:\n logger.info(\" platform -->\" + str(platform_match) + \"<--\")\n platform_folders = merge_folder_list([],\n merge_folders=[os.path.join(platform_match, config_session.get('config_dir', 'config'))],\n prefixes=[os.getcwd(), hosts_dir])\n logger.info(\" platform folders -->\" + str(platform_folders) + \"<--\")\n global_key_subst['DEPLOY_HOST_CONFIGPATH'] = platform_folders[0]\n\n key_name = 'plugin_folders'\n base_parser.add_argument('-' + key_name[0],\n '--' + key_name,\n action='append',\n help='plugin folders',\n default = [abs_deploy_path(path, prefixes=[lib_path])\n for path in config_session.get(key_name, [os.path.join(lib_path, 'plugins')])])\n\n # now reparse with this new arguments\n base_args = base_parser.parse_known_args()[0]\n\n # platform_folders=[]\n # if base_args.platform_dir[0] == '/':\n # platform_dir = base_args.platform_dir\n # else:\n # platform_dir = os.path.join(root_path, base_args.platform_dir)\n # if os.path.exists(platform_dir):\n # platform_folders.append(platform_dir)\n # platform_match = utils.myintrospect(tags=config_session.get('host_tags', dict())).platform_tag()\n # logger.info(\" platform -->\" + str(platform_match) + \"<--\")\n # if platform_match:\n # platform_config_folder = os.path.abspath(os.path.join(platform_dir, platform_match, config_session.get('config_dir', 'config')))\n # if os.path.exists(platform_config_folder):\n # platform_folders.append(platform_config_folder)\n # else:\n # logger.warning(\" NON EXISTING PLATFORM FOLDER :\" + str(platform_config_folder))\n\n\n config_folders = merge_folder_list([os.path.join(root_path, 'config')] +\n platform_folders +\n [env_dir, work_dir],\n merge_folders=base_args.config_folders,\n prefixes=[os.getcwd(),os.path.join(root_path, 'config')])\n plugin_folders = merge_folder_list([],\n merge_folders=base_args.plugin_folders,\n prefixes=[os.getcwd(),lib_path])\n\n return base_parser, config_folders, plugin_folders, platform_folders", "def Args(parser):\n flags.AddRegion(parser)\n flags.AddCluster(parser)", "def apply_argparse(args: Namespace, config: Config) -> Config:\n\n for k in args.__dict__:\n apply_arg(k, args, config)\n return config", "def add_to_parser(parser: ArgumentParser) -> None:\n parser.add_argument(\n \"--config\", \"-c\", metavar=\"FILE\", action=\"append\", help=\"Load a parameters configuration YAML file\"\n )\n parameters = [(parameter.order, parameter.name, parameter) for parameter in Parameter.by_name.values()]\n for _, _, parameter in sorted(parameters):\n text = parameter.description.replace(\"%\", \"%%\") + f\" (default: {parameter.default})\"\n if parameter.parser is None:\n if parameter.short:\n parser.add_argument(\"--\" + parameter.name, \"-\" + parameter.short, help=text, action=\"store_true\")\n else:\n parser.add_argument(\"--\" + parameter.name, help=text, action=\"store_true\")\n else:\n if parameter.short:\n parser.add_argument(\n \"--\" + parameter.name, \"-\" + parameter.short, help=text, metavar=parameter.metavar\n )\n else:\n parser.add_argument(\"--\" + parameter.name, help=text, metavar=parameter.metavar)", "def add_arguments(cls):\n return [\n (('--yes',), dict(action='store_true', help='clean .git repo')),\n (('--variable', '-s'),\n dict(nargs='+', help='set extra variable,format is name:value')),\n (('--skip-builtin',),\n dict(action='store_true', help='skip replace builtin variable')),\n\n (('--dir',), dict(nargs='?', default=os.getcwd(),\n help='set working directory')),\n (('--debug',), dict(action='store_true', help='open debug mode')),\n (('--dry-run',), dict(action='store_true',\n help='print command instead execute it')),\n (('--verbose', '-v'), dict(action='count')),\n ]", "def parse_cmd_line_args(args: typing.Optional[typing.List] = None) -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', '-c', dest='cfg', metavar='nvpy.cfg', help='path to config file')\n return parser.parse_args(args)", "def configure_parser_genome(subparsers):\n help_msg = \"Genome assembly commands.\"\n\n desc_msg = help_msg + dedent(\"\"\"\n \n This subcommand controls the genome assemblies used by MotifScan.\n MotifScan requires a sequences FASTA file and a gene annotation file \n (if available) for each genome assembly, users can either download them \n from a remote database or install directly with local prepared files.\n \"\"\")\n\n epilog_msg = dedent(\"\"\"\n Examples:\n --------- \n 1) Display installed genomes:\n \n motifscan genome --list\n \n 2) Display all available genomes in a remote database:\n \n motifscan genome --list-remote\n \n 3) Search genomes in a remote database by keyword (e.g. 'human'):\n \n motifscan genome --search human\n \n 4) Install 'hg19' genome assembly from a remote database:\n \n motifscan genome --install -n hg19 -r hg19\n \n 5) Install 'hg19' genome assembly with local prepared files:\n\n motifscan genome --install -n hg19 -i <hg19.fa> -a <refGene.txt> \n \n 6) Uninstall a genome assembly:\n \n motifscan genome --uninstall <genome_name>\n \n Notes:\n ------ \n The path of newly installed genome will be automatically saved. If you \n move the directory to another location later, please reconfigure it:\n \n motifscan config --set-genome <genome_name> <new_path>\n \"\"\")\n\n parser = subparsers.add_parser(\n \"genome\", description=desc_msg, help=help_msg, epilog=epilog_msg,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n subcommands = parser.add_argument_group(\"Genome Subcommands\")\n subcommands = subcommands.add_mutually_exclusive_group()\n subcommands.add_argument(\n \"--list\", dest=\"list\", action=\"store_true\", default=False,\n help=\"Display installed genome assemblies.\")\n subcommands.add_argument(\n \"--list-remote\", dest=\"list_remote\", action=\"store_true\",\n default=False, help=\"Display available remote genome assemblies.\")\n subcommands.add_argument(\n \"--search\", metavar=\"KEYWORD\", dest=\"search\",\n help=\"Search for genome assemblies in a remote database.\")\n subcommands.add_argument(\n \"--install\", dest=\"install\", action=\"store_true\", default=False,\n help=\"Install a new genome assembly.\")\n subcommands.add_argument(\n \"--uninstall\", metavar=\"NAME\", dest=\"uninstall\",\n help=\"Uninstall a genome assembly.\")\n subcommands.required = True\n\n parser_install = parser.add_argument_group(\"Install Options\")\n parser_install.add_argument(\n \"-n\", \"--name\", metavar=\"NAME\", dest=\"name\",\n help=\"Name of the genome assembly to be installed.\")\n parser_install.add_argument(\n \"-i\", metavar=\"FASTA\", dest=\"fasta_files\", nargs=\"+\",\n help=\"Local genome sequences file(s) in FASTA format.\")\n parser_install.add_argument(\n \"-a\", metavar=\"ANNOTATION\", dest=\"gene_file\",\n help=\"Local gene annotation (refGene.txt) file.\")\n parser_install.add_argument(\n \"-r\", \"--remote\", metavar=\"GENOME\", dest=\"remote\",\n help=\"Download required data files from a remote assembly.\")\n parser_install.add_argument(\n \"-o\", \"--output-dir\", metavar=\"DIR\", dest=\"output_dir\",\n help=\"Write to a given directory instead of the default directory.\")\n\n parser_remote = parser.add_argument_group(\"Remote Database Options\")\n parser_remote.add_argument(\n \"--database\", dest=\"database\", choices=[\"ucsc\"], default=\"ucsc\",\n help=\"Which remote database is used to list/install/search genome \"\n \"assemblies. Default: ucsc\")\n parser_remote.add_argument(\n \"--clean\", dest=\"clean\", action=\"store_true\", default=False,\n help=\"Clean the download directory after installation.\")\n parser = _add_verbose_argument(parser)\n parser.set_defaults(func=genome.run)", "def configure():\n parser = argparse.ArgumentParser(description=\"welcome to AutomateBGP!\")\n parser.add_argument('-u', '--username', dest='username',\n help='username to login to nodes')\n parser.add_argument('-p', '--password', dest='password',\n help='password to login to nodes')\n parser.add_argument('-f', '--filename', dest='filename',\n help='text file containing the node data (expected format...)')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Convert environment variables in to a configuration file')\n parser.add_argument('-p',\n '--prefix',\n help='Prefix of env vars to parse',\n required=True)\n parser.add_argument('-f',\n '--format',\n help='Output file format',\n default='ini',\n choices=['ini', 'json'])\n parser.add_argument('-o',\n '--output-file',\n help='Outfile file path',\n default='/dev/stdout')\n parser.add_argument(\n '-r',\n '--reference-file',\n type=argparse.FileType('r'),\n help='Load this reference file for existing/hard coded values')\n\n return parser.parse_args()", "def parser_setup():\n ap = argparse.ArgumentParser(description=__doc__)\n ap.add_argument(\"-c\", \"--config-dir\", default=\".\",\n help=\"Configuration directory. Contains YAML configuration\"\n \"files.\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"count\", default=1,\n help=\"Print copious debugging info.\")\n ap.add_argument(\"-q\", \"--quiet\", action=\"count\", default=0,\n help=\"Suppress output. -qq to suppress ALL output.\")\n ap.add_argument(\"-p\", \"--profile\", default=\"all\",\n help=\"Dashboard profile to load from dashdef.yml\")\n ap.add_argument(metavar=\"HOST\", nargs=\"*\", dest=\"host_globs\",\n help=\"Host glob.\")\n return ap", "def add_arguments_imp(self, parser): # noqa\n parser.add_argument(\n 'what',\n nargs='?',\n help=(\n 'Print the value of /ROOT/<what>_dir. For example: ' +\n '\"dodo which src\" prints the value of /ROOT/src_dir.')\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n '--config',\n action=\"store_true\",\n help='Print where the config file is')\n group.add_argument(\n '--script',\n help='Print where the dodo command script with given name is')", "def push_parser():\n usage = \"usage: %prog [tx_options] push [options]\"\n description = \"This command pushes all local files that have been added to \"\\\n \"Transifex to the remote server. All new translations are merged \"\\\n \"with existing ones and if a language doesn't exists then it gets \"\\\n \"created. If you want to push the source file as well (either \"\\\n \"because this is your first time running the client or because \"\\\n \"you just have updated with new entries), use the -f|--force option. \"\\\n \"By default, this command will push all files which are watched by \"\\\n \"Transifex but you can filter this per resource or/and language. \"\n parser = OptionParser(usage=usage, description=description)\n parser.add_option(\"-l\", \"--language\", action=\"store\", dest=\"languages\",\n default=None, help=\"Specify which translations you \"\n \"want to push (defaults to all)\")\n parser.add_option(\"-r\", \"--resource\", action=\"store\", dest=\"resources\",\n default=None, help=\"Specify the resource for which you \"\n \"want to push the translations (defaults to all)\")\n parser.add_option(\"-f\", \"--force\", action=\"store_true\",\n dest=\"force_creation\", default=False,\n help=\"Push source files without checking modification \"\n \"times.\")\n parser.add_option(\"--skip\", action=\"store_true\", dest=\"skip_errors\",\n default=False, help=\"Don't stop on errors. \"\n \"Useful when pushing many files concurrently.\")\n parser.add_option(\"-s\", \"--source\", action=\"store_true\",\n dest=\"push_source\", default=False,\n help=\"Push the source file to the server.\")\n\n parser.add_option(\"-t\", \"--translations\", action=\"store_true\",\n dest=\"push_translations\", default=False,\n help=\"Push the translation files to the server\")\n parser.add_option(\"--no-interactive\", action=\"store_true\",\n dest=\"no_interactive\", default=False,\n help=\"Don't require user input when forcing a push.\")\n parser.add_option(\"-x\", \"--xliff\", action=\"store_true\", dest=\"xliff\",\n default=False, help=\"Apply this option to upload \"\n \"file as xliff.\")\n return parser", "def update_args(self, args):\n self.args = self.parser.parse_args(args)", "def add_parse_arguments(self, parser):\n parser.add_argument('command', help='The daemon command: start|status|stop|restart')\n parser.add_argument('--pid_file', help='The pid_file of the daemon')", "def command_line_args(parser):\n AbyssAssembler.command_line_args(parser)\n SpadesAssembler.command_line_args(parser)\n TrinityAssembler.command_line_args(parser)\n VelvetAssembler.command_line_args(parser)", "def parse_arguments():\n\n args = Arguments()\n parser = argparse.ArgumentParser(\"Update river flow directions\")\n parser.add_argument('python_config_filename',\n metavar='python-config-filename',\n help='Full path to python configuration file',\n type=str)\n #Adding the variables to a namespace other than that of the parser keeps the namespace clean\n #and allows us to pass it directly to main\n parser.parse_args(namespace=args)\n return args", "def parse_command_line(argv=sys.argv):\n import argparse\n parser = argparse.ArgumentParser(description=\"\"\"\\\nAdd a tag to every Git project, bump the version in sources, and push.\n\nFor each Git repository in the distro:\n - Verify the current branch is major.minor or brainvisa-major.minor\n - Verify that there are no uncommitted changes\n - Verify the current version in project_info.cmake / info.py\n - Create the annotated tag and push it\n - Bump the micro version in project_info.cmake / info.py, commit and push\n\"\"\")\n parser.add_argument('source_root',\n help='Root of the BrainVISA source directory, '\n 'typically /casa/host/src.')\n parser.add_argument('version_to_tag', type=str,\n help='Version that is about to be tagged '\n '(e.g. \"5.0.3\")')\n parser.add_argument('--fix-source-version-numbers', action='store_true',\n help='In this mode no tags are created, but the source'\n 'version numbers are set to version_to_tag in projects'\n 'that follow the BrainVISA version.')\n parser.add_argument('--distro', default='cea',\n help='Distro to release. Note that you should use a '\n 'source directory that has the largest set of '\n 'components, typically the cea distro.')\n parser.add_argument('--dry-run', default=True, action='store_true',\n help=\"List every step but don't actually perform \"\n \"anything that would write to the repositories.\")\n parser.add_argument('--really-do-it', dest='dry_run', action='store_false',\n help=\"Actually perform the actions, including \"\n \"IRREVERSIBLE actions such as pushing tags. MAKE SURE \"\n \"TO RUN THE DRY-RUN MODE FIRST!\")\n parser.add_argument('--debug', dest='logging_level', action='store_const',\n const=logging.DEBUG, default=logging.INFO,\n help='Enable debugging messages')\n\n args = parser.parse_args(argv[1:])\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-r\", \"--repo-path\", type=str, required=True,\n help=\"Path of the git repo\")\n parser.add_argument(\"-e\", \"--extension\", type=str, required=True,\n help=\"Extension of files to add headers to (ex: '.py')\")\n args = parser.parse_args()\n\n return args", "def parse_cmd_args():\n parser = argparse.ArgumentParser(description='This program applies an RL method to an OpenAI gym environment')\n for name, val in config1.items():\n if type(val) is bool:\n parser.add_argument('--' + name, action='store_true', dest=name)\n parser.add_argument('--not_' + name, action='store_false', dest=name)\n parser.set_defaults(**{name: val})\n else:\n parser.add_argument('--' + name, type=type(val), default=val)\n\n args = parser.parse_args()\n return args", "def add_args_to_parser(parser):\n parser.add_argument(\"-i\", \"--inbox-dir\", help=\"directory with input images\")\n parser.add_argument(\n \"-o\", \"--output-dir\", help=\"output directory for clustered images\"\n )\n parser.add_argument(\n \"-w\",\n \"--watch-dir\",\n help=\"directory with structured media (official media repository)\",\n action=\"append\",\n )\n parser.add_argument(\n \"-t\",\n \"--development-mode\",\n help=\"Run script with development configuration - work on tests directories\",\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"-n\",\n \"--no-operation\",\n help=\"Do not introduce any changes on the disk. Dry run.\",\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"-y\",\n \"--copy-mode\",\n help=\"Copy instead of default move\",\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"-f\",\n \"--force-deep-scan\",\n help=\"Force recalculate cluster info for each existing cluster.\",\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"-d\",\n \"--drop-duplicates\",\n help=\"Do not cluster duplicates, store them in separate folder.\",\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"-c\",\n \"--use-existing-clusters\",\n help=(\n \"If possible, check watch folders if the inbox media can be \"\n \"assigned to already existing cluster.\"\n ),\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"--version\", action=\"version\", version=f\"%(prog)s {version.__version__}\"\n )\n return parser", "def parse_arguments():\n description = 'Code checkout script for NEMSfv3gfs'\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('--config', action='store', help='name of checkout config', \n default='checkout_nemsfv3gfs.cfg')\n args = parser.parse_args()\n return args.config", "def parse() -> Namespace:\n parser = ArgumentParser()\n parser.add_argument(\n \"--config\",\n \"-c\",\n default=\"qwauto.cfg\",\n help=\"Config file. Defaults to qwauto.cfg.\",\n )\n return parser.parse_args()", "def _add_arguments(parser):\n parser.add_argument(\n \"command\",\n help='The plugin to run. e.g. \"shell\".',\n choices=sorted(registry.get_command_keys()),\n )\n\n parser.add_argument(\n \"-x\",\n \"--maximum-repositories\",\n default=sys.maxsize,\n type=int,\n help='If a value of `2` is used, it means \"Only search 2 repositories '\n 'for Rez packages to run on, at most\".',\n )\n\n parser.add_argument(\n \"-z\",\n \"--maximum-rez-packages\",\n default=sys.maxsize,\n type=int,\n help='If a value of `2` is used, it means \"Only search for 2 Rez packages '\n 'to run some comm on, at most\".',\n )\n\n parser.add_argument(\n \"-p\",\n \"--packages-path\",\n default=[config.release_packages_path], # pylint: disable=no-member\n help=\"A `{os.pathsep}` separated list of paths that report/run will be run on. \"\n \"If not defined, `rez.config.config.release_packages_path` is used, instead.\".format(\n os=os\n ),\n )\n\n parser.add_argument(\n \"-s\",\n \"--search-packages-path\",\n default=[config.release_packages_path], # pylint: disable=no-member\n help=\"A `{os.pathsep}` separated list of paths to search for Rez package dependencies. \"\n \"If not defined, `rez.config.config.release_packages_path` is used, instead.\".format(\n os=os\n ),\n )\n\n parser.add_argument(\n \"-i\",\n \"--ignore-patterns\",\n default=[],\n nargs=\"*\",\n help=\"A set of glob expressions or a file to a set of glob expressions. \"\n \"If a Rez package name matches one of \"\n \"these, it will not be run on.\",\n )\n\n parser.add_argument(\n \"-k\",\n \"--keep-temporary-files\",\n action=\"store_true\",\n help=\"If added, do not delete any temporary files that are generated during this run.\",\n )\n\n parser.add_argument(\n \"-r\",\n \"--rez-packages\",\n default=set(),\n nargs=\"+\",\n help=\"The names of Rez packages to process. If no names are given, \"\n \"every Rez package that is found will be processed.\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--temporary-directory\",\n help=\"A folder on-disk that will be used to clone git repositories.\",\n )", "def parse_args(args=None):\n return AP.parse_args(args=args)", "def add_command_line_arguments(self, parser):\n # parser.add_option(...)\n pass", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()", "def add_arguments(self, parser):\n\n cmd = self # make sure we can use sub parser in django. via stack_overflow\n\n class SubParser(CommandParser):\n \"\"\"Use to avoid the error when using sub parser in django's add_arguments method.\"\"\"\n def __init__(self, **kwargs):\n super(SubParser, self).__init__(cmd, **kwargs)\n\n # add custom sub commands.\n\n subparsers = parser.add_subparsers(\n title=\"sub commands\",\n parser_class=SubParser,\n dest='sub_command',\n help='Sub commands you can use.'\n )\n\n # actions to start or stop socket server.\n\n server = subparsers.add_parser('server', help=\"Server Commands\")\n server.add_argument(\n 'action',\n metavar='ACTION',\n choices=self.socket_server_actions,\n help='Actions is: <%s>' % '|'.join(self.socket_server_actions),\n )\n\n # actions of targets when calling server is running.\n\n proxy = subparsers.add_parser('proxy', help=\"Proxy Commands\")\n proxy.add_argument(\n '-a', '--action',\n metavar='ACTION',\n required=True,\n choices=self.proxy_job_actions,\n help='Actions is: <%s>' % '|'.join(self.proxy_job_actions)\n )\n proxy.add_argument(\n '-t', '--targets',\n metavar='TARGET',\n nargs='*',\n help='Targets can be empty which means ALL, you can list targets by <./manage.py mirrordata proxy -a ping>.'\n )", "def configure(self, parser: argparse.ArgumentParser) -> None:\n pass", "def parse_args():\n parser = argparse.ArgumentParser(description='Eval')\n parser.add_argument(\n '--cfg', help='experiment configure file path', type=str, \\\n default=\"validation.config.Config\")\n return parser.parse_args()", "def parse_args(arguments):\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('configfile', help=\"Config file\",\n type=argparse.FileType('r'))\n parser.add_argument('outfile', help=\"Output file\",\n type=argparse.FileType('w'))\n\n return parser.parse_args(arguments)", "def parse_args():\n parser = ArgumentParser()\n parser.add_argument('-t', '--timer', action='store_true', \\\n help='Time the first random generation')\n parser.add_argument('-i', '--ibmq', default='', help='IBMQ token')\n parser.add_argument('-b', '--backend', default='', help='IBMQ backend')\n return parser.parse_args()", "def parse_cmd_args(self, args):\n\t\tfor k, v in vars(args).items():\n\t\t\tif k == \"cfg\":\n\t\t\t\tcontinue\t\t\t\t# reserved keyword (for loading settings)\n\n\t\t\tif v is None:\n\t\t\t\tcontinue\t\t\t\t# None stands for not set\n\n\t\t\tif not hasattr(self, k):\n\t\t\t\traise RuntimeError(\"[ERROR] Not recognized argument from comamnd line: {}.\".format(k))\n\t\t\tsetattr(self, k, v)", "def add_arguments(self, parser):\n pass", "def add_arguments(self, parser):\n pass", "def parse_arguments():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--accessions\", help=\"A json file with old/new family mapppings\")\n parser.add_argument(\"--add-header\", help=\"Print descriptive header\",\n action=\"store_true\", default=False)\n parser.add_argument(\"--add-links\", help=\"Creates hyperlinks to available Rfam html content\",\n action=\"store_true\", default=False)\n return parser", "def add_args_to_subparser(the_parser, subcommand_name):\n\n the_parser.add_argument(CmdArgs.verbose_optional, help=CmdArgs.verbose_help,\n action='store_true',\n )\n\n if subcommand_name in DCA_VISUALIZATION_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.pdb_chain_id, help=CmdArgs.pdb_chain_id_help)\n the_parser.add_argument(CmdArgs.pdb_file, help=CmdArgs.pdb_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.dca_file, help=CmdArgs.dca_file_help)\n the_parser.add_argument(CmdArgs.rna_secstruct_file_optional,\n help=CmdArgs.rna_secstruct_file_help,\n )\n the_parser.add_argument(CmdArgs.linear_dist_optional,\n help=CmdArgs.linear_dist_help, type = int,\n )\n the_parser.add_argument(CmdArgs.contact_dist_optional,\n help=CmdArgs.contact_dist_help, type = float,\n )\n the_parser.add_argument(CmdArgs.num_dca_contacts_optional,\n help = CmdArgs.num_dca_contacts_help, type = int,\n )\n the_parser.add_argument(CmdArgs.wc_neighbor_dist_optional, type= int,\n help = CmdArgs.wc_neighbor_dist_help,\n )\n the_parser.add_argument(CmdArgs.pdb_id_optional, help = CmdArgs.pdb_id_help)\n\n if subcommand_name in FILE_CONTENT_SUBCOMMANDS:\n if subcommand_name == 'pdb_content':\n the_parser.add_argument(CmdArgs.pdb_file, help = CmdArgs.pdb_file_help)\n if subcommand_name in MSA_TRIMMING_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.max_gap_optional,\n type = float, help = CmdArgs.max_gap_help,\n )\n if subcommand_name == 'trim_by_refseq':\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.remove_all_gaps_optional,\n help= CmdArgs.remove_all_gaps_help, action='store_true',\n )\n if subcommand_name == 'trim_by_gap_size':\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n return None", "def parse_commandline_args():\n\n epilog = \"\"\"\n The configuration file must contained a JSON-encoded map. Example: \"{\"name\":\"foo\"}\".\n \"\"\"\n\n parser = utils.ConnectionArgumentParser(\n description=\"Update config (key/value pairs) on a board\", epilog=epilog\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=\"JSON file to load config from (default stdin)\",\n type=open,\n default=sys.stdin,\n dest=\"file\",\n )\n parser.add_argument(\n \"ids\", metavar=\"DEVICEID\", nargs=\"+\", type=int, help=\"Device IDs to flash\"\n )\n\n return parser.parse_args()", "def cli() -> None:\r\n config_argparse = _configfile_parser()\r\n config_args, _ = config_argparse.parse_known_args()\r\n\r\n defaults = {}\r\n\r\n if config_args.config: \r\n defaults = _load_config(config_args)\r\n\r\n parser = _cli(config_argparse, defaults)\r\n _add_standard_args(parser) \r\n \r\n subparser = parser.add_subparsers()\r\n _add_create_command(subparser)\r\n _add_update_command(subparser) \r\n\r\n args = parser.parse_args()\r\n command = args.cmd\r\n command.execute(args)", "def _parseArgs():\n # HINT: If you consider adding an option,\n # please consider adding a config file option first.\n parser = ArgumentParser(description=STRING_USAGE_DESCRIPTION,\n epilog=STRING_USAGE_EPILOG)\n parser.add_argument('--version', action='version',\n version='%(prog)s (' + VERSIONSTRING + ')')\n parser.add_argument('-c', '--configfile', action='store',\n dest='configfile',\n default=DEFAULT_CONFIGFILE,\n help=STRING_USAGE_CONFIGFILE)\n parser.add_argument('-e', '--editconfig', action='store_true',\n dest='invoke_editconfig',\n default=False,\n help=STRING_USAGE_EDITCONFIG)\n parser.add_argument('--defaultconfig', action='store_true',\n dest='invoke_defaultconfig',\n default=False,\n help=STRING_USAGE_DEFAULTCONFIG)\n parser.add_argument('--printconfig', action='store_true',\n dest='invoke_printconfig',\n default=False,\n help=STRING_USAGE_PRINTCONFIG)\n _addOverwriteBool(parser, 'gui', 'gui', 'enable')\n parser.add_argument('-s', '--sources', section='wesen',\n dest='sources',\n action=_OverwriteConfigAction)\n parser.add_argument('-r', '--resume',\n dest='resume', action='store_true',\n default=False, help=STRING_USAGE_RESUME)\n return parser.parse_known_args()", "def parseParams():\n parser = argparse.ArgumentParser(\n prog=__appname__,\n description=__desc__,\n prefix_chars=\"-/\",\n fromfile_prefix_chars='@',\n add_help=False)\n parser.add_argument('source',\n type=str,\n nargs='?',\n help=\"File containing valid syntax. (Default Standard Input)\"\n )\n parser.add_argument('output',\n type=str,\n help=\"Output Image. (required)\"\n )\n parser.add_argument('-f', '--fmt', '/f',\n dest='fmt',\n default='SVG',\n choices=['SVG', 'PDF', 'EPS', 'PNG'],\n nargs='?',\n help='Format of the Output file. Formats other than SVG require Inkscape in the system path.')\n parser.add_argument('-s', '--syntax', '/s',\n dest='syntax',\n default='EBNF',\n choices=['EBNF', 'DIAGRAM'],\n nargs='?',\n help='Syntax used for the Input.')\n parser.add_argument('-h', '--help', '/h',\n action='help',\n help=\"show this help message and exit.\")\n parser.add_argument('-v', '--version', '/v',\n action='version',\n version=\"{0:s} version {1:s}\".format(__title__, __version__))\n return parser.parse_args()", "def _parse_arguments(self, argv):\n parser = argparse.ArgumentParser()\n for section in self.config.sections():\n for key in self.config[section]:\n arg_name = '--' + key.replace(' ', '_').lower()\n parser.add_argument(arg_name)\n override_kwargs = vars(parser.parse_args(argv))\n override_kwargs = {k: v for k,\n v in override_kwargs.items() if v is not None}\n self._overwrite_with_kwargs(**override_kwargs)", "def create_parser() -> configargparse.ArgParser:\n parser = configargparse.ArgParser(default_config_files=[\n \"/etc/lookout/analyzer.conf\", \"~/.config/lookout/analyzer.conf\"],\n formatter_class=ArgumentDefaultsHelpFormatterNoNone,\n auto_env_var_prefix=\"lookout_\")\n slogging.add_logging_args(parser)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n\n def add_parser(name, help):\n return subparsers.add_parser(\n name, help=help, formatter_class=ArgumentDefaultsHelpFormatterNoNone)\n\n list_parser = add_parser(\"list\", \"Print globally available analyzers.\")\n list_parser.set_defaults(handler=list_analyzers)\n\n run_parser = add_parser(\n \"run\", \"Launch a new service with the specified (one or more) analyzers.\")\n run_parser.set_defaults(handler=run_analyzers)\n add_analyzer_arg(run_parser)\n run_parser.add(\"-c\", \"--config\", is_config_file=True,\n help=\"Path to the configuration file with option defaults.\")\n run_parser.add(\"-s\", \"--server\", required=True,\n help=\"Lookout server address, e.g. localhost:1234.\")\n run_parser.add(\"-w\", \"--workers\", type=int, default=1,\n help=\"Number of threads which process Lookout events.\")\n add_model_repository_args(run_parser)\n run_parser.add_argument(\"--request-server\", default=\"auto\",\n help=\"Address of the data retrieval service. \\\"same\\\" means --server.\")\n\n init_parser = add_parser(\"init\", \"Initialize the model repository.\")\n init_parser.set_defaults(handler=init_repo)\n add_model_repository_args(init_parser)\n\n tool_parser = add_parser(\"tool\", \"Invoke the tooling of a given analyzer.\")\n tool_parser.set_defaults(handler=run_analyzer_tool)\n tool_parser.add(\"analyzer\", help=\"Fully qualified package name with an analyzer.\")\n tool_parser.add(\"args\", nargs=argparse.REMAINDER)\n\n package_parser = add_parser(\n \"package\",\n \"Package several analyzers to a Docker container and write a sample Docker Compose config \"\n \"for Lookout.\")\n package_parser.set_defaults(handler=package_cmdline_entry)\n add_analyzer_arg(package_parser)\n package_parser.add(\"-w\", \"--workdir\", help=\"Generate files in this directory.\",\n default=tempfile.mkdtemp(prefix=\"lookout_package_\"))\n package_parser.add(\"--requirements\", help=\"Path to a custom requirements.txt\")\n package_parser.add(\"-r\", \"--repo\", help=\"GitHub repository name to watch. \"\n \"Example: \\\"src-d/lookout\\\".\",\n required=True)\n package_parser.add(\"-u\", \"--user\", help=\"GitHub user name which will send review comments.\",\n required=True)\n paturl = \"https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/\" # noqa\n package_parser.add(\"-t\", \"--token\", help=\"GitHub token for -u/--user. See \" + paturl,\n required=True)\n package_parser.add(\"-y\", \"--yes\", help=\"Run the commands in the end.\",\n action=\"store_true\")\n package_parser.add(\"-n\", \"--no\", help=\"Do not run the commands in the end.\",\n action=\"store_true\")\n return parser", "def set_args() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser( # type: argparse.ArgumentParser\n description=r'''\n -----------------------------------\n < Pull DNA barcodes from FASTQ files >\n -----------------------------------\n /\n \\ ______/ V`-, /\n } /~~\n /_)^ --,r'\n |b |b\n ''',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=False\n )\n # Arguments for verbosity and logging\n parser.add_argument( # Verbosity\n '-v',\n '--verbosity',\n dest='verbosity',\n type=str.lower,\n choices=_VERBOSITY_LEVELS,\n default=_VERBOSITY_DEFAULT,\n required=False,\n metavar='verbosity',\n help=\"Set the verbosity level, choose from '%s'; defaults to '%s'\" % (\"', '\".join(_VERBOSITY_LEVELS), _VERBOSITY_DEFAULT)\n )\n parser.add_argument( # Number of cores\n '--parallel',\n dest='num_cores',\n type=_num_cores,\n const=None,\n default=1,\n nargs='?',\n required=False,\n metavar='num jobs',\n help=\"Run %(prog)s in parallel; if passed, can optionally specify the number of jobs to run at once\"\n )\n parser.add_argument( # Output directory\n '-o',\n '--output-directory',\n dest='outdirectory',\n type=str,\n default=_OUTDIR_DEFAULT,\n required=False,\n metavar='output directory',\n help=\"Choose where all output files are to be stored; defaults to '%s'\" % _OUTDIR_DEFAULT\n )\n # Input arguments\n inputs = parser.add_argument_group(\n title='input arguments',\n description='Provide inputs for %(prog)s'\n )\n inputs.add_argument( # Forward FASTQ\n '-f',\n '--forward-fastq',\n dest='forward',\n type=str,\n default=None,\n required=True,\n metavar='FORWARD FASTQ',\n help=\"Provide a filepath for the forward/single FASTQ file\"\n )\n inputs.add_argument( # Reverse FASTQ\n '-r',\n '--reverse-fastq',\n dest='reverse',\n type=str,\n default=None,\n required=False,\n metavar='REVERSE FASTQ',\n help=\"Provide a filepath for the optional reverse FASTQ file\"\n )\n inputs.add_argument( # Sample sheet\n '-s',\n '--sample-sheet',\n dest='sample_sheet',\n type=str,\n default=None,\n required=True,\n metavar='SAMPLE SHEET',\n help=\"Provide a filepath for the sample sheet\"\n )\n inputs.add_argument( # Barcodes file\n '-b',\n '--barcodes',\n dest='barcodes',\n type=str,\n required=True,\n default=None,\n metavar='BARCODES',\n help=\"Provide a filepath for the barcodes CSV file\"\n )\n barcodes = parser.add_argument_group(\n title='barcode options',\n description=\"Set parameters for barcode demultiplexing\"\n )\n barcodes.add_argument( # Number of errors allowed\n '-e',\n '--error',\n dest='error',\n type=int,\n default=_ERROR_DEFAULT,\n required=False,\n metavar='ERROR',\n help=\"This is how many mismatches in the barcode we allowed before rejecting, defaults to %s\" % _ERROR_DEFAULT\n )\n return parser", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config',\n help='Location of the configuration file',\n required=True)\n parser.add_argument('--path_to_repo',\n help='Path to repo',\n required=True)\n parser.add_argument('--branch',\n help='Git repo branch',\n required=False)\n parser.add_argument('--image_directory',\n help='Directory to save images to. Will be created if does not exist',\n required=True)\n parser.add_argument('--gif_filename',\n help='filepath of output GIF',\n required=True)\n known_args, pipeline_args = parser.parse_known_args()\n return known_args, pipeline_args", "def main(args):\n\n with open(args.cfg_fn, 'r') as cfg_fd:\n config = cfg_fd.read().split(\"\\n\")\n\n with open(args.opt_fn, 'r') as opt_fd:\n for oline in opt_fd:\n option, value = oline.strip().split(\"=\")\n\n conf_addition = \"%s=%s\" % (option, value)\n added = False\n for line_nr, line in enumerate(config):\n if \"# %s is not set\" % option in line or \\\n \"%s=\" % option in line:\n config[line_nr] = conf_addition\n added = True\n break\n\n if not added:\n config.append(conf_addition)\n\n with open(args.cfg_fn, 'w') as cfg_fd:\n cfg_fd.write(\"\\n\".join(config))", "def config_argparser(parser):\n add_usual_input_args(parser)\n parser.add_argument('--edges', action='store_true',\n help='First/last dialogues only')\n parser.set_defaults(func=main)", "def _add_argument(self, args=''):\n\n sys.argv += args.split(' ')", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def parse_command_line_args(self) -> None:\n self.parser.add_argument(\n \"-i\",\n \"--input\",\n help=\"(str) [default: .] The relative folder path with the csv files\",\n default=getcwd()\n )\n self.parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"(str) [default: .] The folder path for saving the *.alfredsnippets files\",\n default=getcwd()\n )\n self.parser.add_argument(\n \"-f\",\n \"--fieldorder\",\n help=\"(str) [default: 'abbreviation, content, name'] A comma separated list for the order of the fields \"\n \"of the csv files\",\n default=\"abbreviation, content, name\"\n )\n self.parser.add_argument(\n \"-d\",\n \"--deletefolders\",\n help=\"(bool) [default=False] Delete the folders that contains the json files\",\n type=self.str2bool,\n nargs='?',\n const=True,\n default=False\n )\n self.parser.add_argument(\n \"-l\",\n \"--lplaceholder\",\n help=\"(str) [default: %] The left side placeholder for the embedded snippets.\",\n default=\"%\"\n )\n self.parser.add_argument(\n \"-r\",\n \"--rplaceholder\",\n help=\"(str) [default: %] The right side placeholder for the embedded snippets.\",\n default=\"%\"\n )\n\n self.parser.add_argument(\n \"-c\",\n \"--changeplaceholders\",\n help=\"(bool) [default=True] Set to false if the placeholder shouldn't get changed at all\",\n type=self.str2bool,\n nargs='?',\n const=True,\n default=True\n )\n\n self.args = self.parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n '--conf', dest='conffile',\n default=os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'settings.yaml'),\n help='specify the configuration file')\n\n # Positional arguments:\n parser.add_argument(\n 'version', nargs='?',\n help='version you are about to release')\n parser.add_argument(\n 'previousversion', nargs='?',\n help='version that came before')\n\n # Optional arguments:\n log_options = parser.add_mutually_exclusive_group()\n log_options.add_argument(\n '--debug', dest='log_level',\n action='store_const', const=logging.DEBUG,\n help='Print out internal processing')\n log_options.add_argument(\n '-q', '--quiet', dest='log_level',\n action='store_const', const=logging.WARNING,\n help='Only shows up warning and errors')\n\n parser.add_argument(\n '-y', '--yes', dest='yes', action='store_true',\n help='answer yes to any question'\n )\n parser.add_argument(\n '--no-previous', dest='no_previous', action='store_true',\n help='disable the diff with previous version'\n )\n parser.add_argument(\n '--build', dest='buildroot',\n default=os.getcwd(),\n help='where the build should happen (defaults to pwd)'\n )\n parser.add_argument(\n '--branch', dest='branch',\n default='master',\n help='which branch to use (defaults to master for snapshot)'\n )\n parser.add_argument(\n '--dont-sign', dest='sign', action='store_false',\n default=True,\n help='skip gpg signing'\n )\n parser.add_argument(\n '--tar-command', dest='tar_command',\n default='tar',\n help='path to tar, we are expecting a GNU tar. (defaults to tar)'\n )\n parser.add_argument(\n '--patch-dir', dest='patch_dir', default=None,\n help='Where to source patch files from'\n )\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--zarr_dir',\n type=str,\n help='path to directory of zarr files',\n )\n parser.add_argument(\n '--tiff_dir',\n type=str,\n help='path to directory of tiff files',\n )\n parser.add_argument(\n '--output_dir',\n type=str,\n help='path to directory for writing',\n )\n parser.add_argument(\n '--config_path',\n type=str,\n default=None,\n help='path to yaml preprocess config file',\n )\n \n args = parser.parse_args()\n return args", "def _parse_arguments(text):\n parser = argparse.ArgumentParser(\n description=\"Build Python-based Rez packages in just a single command.\",\n )\n\n parser.add_argument(\n \"--hdas\",\n nargs=\"+\",\n help=\"The relative paths to each folder containing VCS-style Houdini HDAs.\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--items\",\n nargs=\"+\",\n help=\"The relative paths to each file/folder to copy / install.\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--eggs\",\n nargs=\"+\",\n help=\"The relative paths to each file/folder to make into a .egg file.\",\n )\n\n parser.add_argument(\n \"--symlink\",\n action=\"store_true\",\n default=linker.must_symlink(),\n help=\"If True, symlink everything back to the source Rez package.\",\n )\n\n parser.add_argument(\n \"--symlink-files\",\n action=\"store_true\",\n default=linker.must_symlink_files(),\n help=\"If True, symlink files back to the source Rez package.\",\n )\n\n parser.add_argument(\n \"--symlink-folders\",\n action=\"store_true\",\n default=linker.must_symlink_folders(),\n help=\"If True, symlink folders back to the source Rez package.\",\n )\n\n known, _ = parser.parse_known_args(text)\n\n return known", "def parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--src',\n help='path to update file',\n required=True)\n\n parser.add_argument(\n '--dst',\n help='working directory',\n required=True)\n\n parser.add_argument(\n '--disable_rollback',\n help='disable rollabck in case of errors',\n action='store_false')\n\n return parser.parse_args()", "def AddSpssArgs(syntax, *args):\n\tsep = \" \"\n\tsyntax = syntax.strip()\n\tif syntax.endswith(spssterm):\n\t\tsyntax = syntax[:-1]\n\treturn syntax+sep+sep.join(args)", "def add_args(parser):\n # fmt: off\n TranslationTask.add_args(parser)\n parser.add_argument('--langs', required=True, metavar='LANG',\n help='comma-separated list of monolingual language, for example, \"en,de,fr\"'\n 'be careful these langs are what you used for pretraining (the same order),'\n 'not for finetuning.'\n 'you should always add all pretraining language idx during finetuning.')\n parser.add_argument('--multilang-sampling-alpha', type=float, default=0.7,\n help='sub sampling factor')\n parser.add_argument('--common_eos', type=str,\n help='common eos symbol for all languages')\n parser.add_argument('--placeholder', type=int, default=0,\n help='number of placeholder in dictionaries')\n parser.add_argument('--gt-langs', type=str,\n help=\"languages used in generation finetuning, separated wiht -, for example, 'en-fr-de'\")\n\n # fmt: on", "def parse_args(argv: List[str]) -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n description=('Change or add the juju channel to the bundles '\n 'for the charm.'),\n epilog=(\"Either pass the directory of the charm, or be in that \"\n \"directory when the script is called.\"))\n parser.add_argument('dir', nargs='?',\n help=\"Optional directory argument\")\n group = parser.add_mutually_exclusive_group(required=True)\n parser.add_argument('--bundle',\n dest='bundles',\n action='append',\n type=Path,\n metavar='FILE',\n help=('Path to a bundle file to update. '\n 'May be repeated for multiple files to update'))\n group.add_argument('--channel', '-c',\n dest='channel',\n type=str.lower,\n metavar='CHANNEL',\n help=('If present, adds channel spec to openstack '\n 'charms. Must use --remove-channel if this is '\n 'not supplied.')),\n group.add_argument('--remove-channel',\n dest=\"remove_channel\",\n help=(\"Remove the channel specifier. Don't use with \"\n \"--channel.\"),\n action='store_true')\n group.add_argument('--branch', '-b',\n dest='branches',\n action='append',\n metavar='BRANCH',\n type=str.lower,\n help=('If present, adds a channel spec to known charms '\n 'in the lp-builder-config/*.yaml files using the '\n 'branch to map to the charmhub spec. If the '\n 'branch is not found, then the charm is ignored. '\n 'May be repeated for multiple branches to test '\n 'against.'))\n parser.add_argument('--ignore-track', '-i',\n dest='ignore_tracks',\n action='append',\n metavar=\"IGNORE\",\n type=str.lower,\n help=('Ignore this track. e.g. if '\n '\"--ignore-track lastest\" is used, then any '\n 'track/<channel> will be ignored if the track '\n 'is \"latest\". This is only useful when used '\n 'with the \"--branch\" argument. Note that the '\n 'match is done via \"starts_with\" so that, for '\n 'example, any \"latest\" track can be matched '\n 'against.'))\n parser.add_argument('--ensure-charmhub',\n dest='ensure_charmhub',\n action='store_true',\n default=False,\n help=('If set to True, then cs:~.../ prefixes of '\n 'charms will be switched to ch:<charm>'))\n parser.add_argument('--disable-local-overlay',\n dest='disable_local_overlay',\n action='store_true',\n default=False,\n help=('If set to True, then ensure that '\n '\"local_overlay_enabled: False\" are in the '\n 'bundles.'))\n parser.add_argument('--set-local-charm',\n dest='set_local_charm',\n action='store_true',\n default=False,\n help=('If set to True, then the local charm, as '\n 'determined by the charmcraft.yaml file is set '\n 'to the ../../(../)<charm>.charm'))\n parser.add_argument('--enforce-edge',\n dest='enforce_edge',\n action='store_true',\n default=False,\n help=('If set to True, then ensure that the channel '\n 'is set to <track>/edge regardless of how it is '\n 'set in the lp-build-config.'))\n parser.add_argument('--log', dest='loglevel',\n type=str.upper,\n default='INFO',\n choices=('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),\n help='Loglevel')\n parser.set_defaults(channel=None,\n remove_channel=False,\n loglevel='INFO')\n return parser.parse_args(argv)", "def add_args(parser, args):\n for arg in args:\n parser.add_argument('--' + arg, **global_args_dict[arg])\n return parser", "def augment_cli(parser):\n\n parser.add_argument(\n \"--with-tox\",\n dest=\"extensions\",\n action=\"append_const\",\n const=extend_project,\n help=\"generate Tox configuration file\")", "def parse_args(args: List[str]) -> Optional[argparse.Namespace]:\n\n root = argparse.ArgumentParser(description=inspect.cleandoc('''\n Small cross-platform Python app that can create and update PlatformIO projects from STM32CubeMX .ioc files. It\n uses STM32CubeMX to generate a HAL-framework-based code and alongside creates PlatformIO project with compatible\n parameters to stick them both together. Both CLI and GUI editions are available. Visit\n https://github.com/ussserrr/stm32pio for more information. Use 'stm32pio [command] -h' to see help on the\n particular command'''))\n\n # Global arguments (there is also an automatically added '-h, --help' option)\n root.add_argument('--version', action='version', version=f\"stm32pio {stm32pio.core.util.get_version()}\")\n root.add_argument('-v', '--verbose', help=\"enable verbose output (default level: INFO)\", action='count', default=1)\n\n sub = root.add_subparsers(dest='command', title='commands', description=\"valid commands\", help=\"available actions\")\n\n # Primary operations\n init = sub.add_parser('init', help=\"create config .INI file to check and tweak parameters before proceeding\")\n generate = sub.add_parser('generate', help=\"generate CubeMX code only\")\n pio_init = sub.add_parser('pio_init', help=\"create new compatible PlatformIO project\")\n patch = sub.add_parser('patch', help=\"tweak the project so both CubeMX and PlatformIO could work together\")\n new = sub.add_parser('new', help=\"generate CubeMX code, create PlatformIO project and glue them together\")\n status = sub.add_parser('status', help=\"inspect the project current state\")\n validate = sub.add_parser('validate', help=\"verify current environment based on the config values\")\n clean = sub.add_parser('clean', help=\"clean-up the project (by default, no files will be deleted immediately \"\n \"without your confirmation)\")\n gui = sub.add_parser('gui', help=\"start the graphical version of the application. All arguments will \"\n \"be passed forward, see its own --help for more information\")\n\n # Assign options to commands\n for command in [init, generate, pio_init, patch, new, status, validate, clean, gui]:\n command.add_argument('-d', '--directory', dest='path', default=Path.cwd(),\n help=\"path to the project (current directory, if not given)\")\n for command in [init, pio_init, new, gui]:\n command.add_argument('-b', '--board', dest='board', default='', help=\"PlatformIO board name. \" + board_hint)\n for command in [init, generate, new]:\n command.add_argument('-e', '--start-editor', dest='editor',\n help=\"start the specified editor after an action (e.g. subl, code, atom, etc.)\")\n for command in [generate, new]:\n command.add_argument('-c', '--with-build', action='store_true', help=\"build the project after code generation\")\n for command in [init, new]:\n command.add_argument('-s', '--store-content', action='store_true',\n help=\"save folder initial contents as a cleanup ignore list\")\n clean.add_argument('-s', '--store-content', action='store_true',\n help=\"save project folder contents as a cleanup ignore list and exit\")\n clean.add_argument('-q', '--quiet', action='store_true',\n help=\"suppress the caution about the content removal (be sure of what you are doing!)\")\n\n if len(args) == 0:\n root.print_help()\n return None\n\n return root.parse_args(args)", "def main(args):\n return add_command(args.directory, args.name, args.type, args.command, args.env_spec, args.supports_http_options)", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"pop-nedry Win64 shellcode build script\"\n )\n\n parser.add_argument(\n '--url', type=str, required=True,\n help='URL for web page hosting the Nedry GIF'\n )\n\n return parser.parse_args()", "def setup_commandline_args(parser=None):\n if not parser:\n parser = ArgumentParser()\n\n parser = _add_uploader_config_argparser(parser=parser)\n\n parser.add_argument(\"--quiet\",\n action=\"store_false\",\n dest=\"verbose\",\n # default=True, # don't use\n help=\"Less verbose logging. When set, a subsequent \"\n \"successful walk over a set of processed runs \"\n \"where not additional processing occurred will\"\n \"be silent.\")\n\n parser.add_argument(\"--retry\",\n action=\"store_true\",\n dest=\"retry\",\n help=\"Removes any failed tasks from previous \"\n \"invocations and allows them to be retried.\")\n\n parser.add_argument('--config',\n dest=\"config_file\",\n type=str,\n default='autoprocess_config.toml',\n help=\"The global config file to use for autoprocessing \"\n \"settings. A config file \"\n \"'autoprocessing_config.toml' in individual run \"\n \"directories overrides settings in this file.\"\n \"Commandline options override all config file\"\n \"settings.\",\n metavar=\"AUTOPROCESSNG_CONFIG\")\n\n parser.add_argument(\"--logging-config\",\n dest=\"logging_config\",\n type=str,\n # default=\"logging_config.toml\",\n help=\"The path to the logging config file \"\n \"eg logging_config.toml\",\n metavar=\"LOGGING_CONFIG\")\n\n # TODO: It might be better to make these subparser modes like:\n # autoprocess process --run-dir /data/runs\n # autoprocess process --single-run /data/runs/blabla\n # autoprocess watch --run-dir /data/runs\n # # Wait for a single run to become complete, process then exit\n # autoprocess watch --single-run /data/runs/blabla\n parser.add_argument(\"--runs\",\n dest=\"run_storage_base\",\n type=str,\n help=\"The absolute PATH to a directory containing \"\n \"multiple runs to be processed (eg \"\n \"/data/illumina)\",\n metavar=\"RUNS_STORAGE_BASE\")\n\n parser.add_argument(\"--single-run\",\n dest=\"run_path\",\n type=str,\n help=\"The absolute PATH to a single run to be \"\n \"processed (eg \"\n \"/data/illumina/170404_SNL177_0169_AHHGVYBCXY)\")\n\n parser.add_argument(\"--watch\",\n action=\"store_true\",\n dest=\"watch\",\n # default=False, # don't use\n help=\"An alternative to running under cron - remain \"\n \"running and watch for new runs. \"\n \"Stop with Ctrl-C.\")\n\n # parser.add_argument(\"-r\", \"--dry\",\n # action=\"store_true\",\n # dest=\"dry_run\",\n # default=False,\n # help=\"Dry run (don't actually process, just show \"\n # \"what the next task would be)\")\n\n # options = parser.parse_args()\n # return parser, options\n return parser", "def analysis_config():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-g\", \"--ground-truth\", type=str, default=\"./BRUTE_FORCE_near_duplicates.tsv\")\n parser.add_argument(\"-p\", \"--pred\", type=str, default=\"./PRED_near_duplicates.tsv\")\n\n return parser.parse_args()" ]
[ "0.6566261", "0.65298736", "0.64381665", "0.641381", "0.6323339", "0.6299603", "0.6270504", "0.6175168", "0.6129637", "0.60077024", "0.5994302", "0.5994058", "0.5948121", "0.5947703", "0.5935954", "0.5910463", "0.59049886", "0.5904204", "0.58908075", "0.5878283", "0.5866294", "0.583831", "0.58328366", "0.5824623", "0.5819689", "0.57885206", "0.57860005", "0.5769197", "0.57661587", "0.5760141", "0.5754888", "0.57531536", "0.575071", "0.5740951", "0.5736691", "0.57365835", "0.5730618", "0.57105356", "0.5706976", "0.5696788", "0.5681069", "0.56805843", "0.5669748", "0.5667459", "0.56602174", "0.56569594", "0.56566966", "0.56529164", "0.5652743", "0.5651436", "0.564985", "0.56465995", "0.56368446", "0.563606", "0.5629192", "0.56269866", "0.5622776", "0.56159633", "0.5614356", "0.56126344", "0.5608112", "0.5604568", "0.56041974", "0.55956", "0.5593095", "0.5589741", "0.55711174", "0.55683476", "0.5568178", "0.5565469", "0.5565469", "0.5563766", "0.5561451", "0.5561169", "0.55599713", "0.5556742", "0.5552123", "0.55489886", "0.55420697", "0.5540479", "0.55395335", "0.5533821", "0.5530121", "0.55254424", "0.5523182", "0.5521664", "0.5521109", "0.55203134", "0.5512405", "0.5509666", "0.5508322", "0.5506405", "0.5506252", "0.55008924", "0.5498911", "0.5495609", "0.54925346", "0.5491249", "0.54829574", "0.5475573" ]
0.5884526
19
We want to normalize if there are Numbers as well as non numbers
def test(types, _): return 'Date' in types and 'Postal Code' in types
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self):\n total = float(self.totalCount())\n if total != 0:\n self.divideAll(total)", "def normalize(self, factor):", "def normalize(self):\n if self.normed:\n return\n self._normalize()", "def _normalize(self, inp):\n \n return inp/inp.sum()", "def toNumeric(self):\n for column in list(self.data.columns):\n if not is_numeric_dtype(self.data[column]):\n values = list(sorted(self.data[column].unique()))\n indices = [index for index, value in enumerate(values)]\n self.data[column] = self.data[column].replace(\n to_replace=values, value=indices)", "def normalize(data):\n data = lowercase(data)\n data = remove_punct(data)\n data = remove_apostrophes(data)\n data = remove_stopwords(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data) #done again to remove hyphens produced by num2words\n data = remove_stopwords(data) #done agan to remove stopwords produced by num2words\n return data", "def normalize(self):\n\n pass", "def normalize(self):\n normalized = self.all_details.get('normalized', '')\n if normalized:\n return normalized\n\n if self.is_digit():\n self.all_details['normalized'] = 'Numeric'\n elif self.is_uuid():\n self.all_details['normalized'] = 'UUID'\n elif self.is_gibberish():\n self.all_details['normalized'] = 'Gibberish'\n else:\n for nr in self.normalized_regex_list:\n regex = nr['regex']\n groups = r'{}'.format(nr['groups'])\n ua = regex.sub(groups, self.user_agent)\n if ua != self.user_agent:\n self.all_details['normalized'] = ua\n break\n else:\n self.all_details['normalized'] = ''\n\n return self.all_details['normalized']", "def normalize_input(inputs: [float]) -> [float]:", "def normalise(a):\n return (a - np.nanmin(a)) / np.nanmax(a - np.nanmin(a))", "def normalize(self) -> NoReturn:\n self._ionic_fractions = self._ionic_fractions / np.sum(self._ionic_fractions)", "def coerce_empty_numeric_values(self):\n if \"numeric\" in self.annot_types:\n numeric_columns = self.file.xs(\n \"numeric\", axis=1, level=1, drop_level=False\n ).columns.tolist()\n self.file[numeric_columns].replace(\"\", np.nan, inplace=True)", "def normalize(dictionary, num):\n for key in dictionary.keys():\n dictionary[key] = float(dictionary[key])/num\n return dictionary", "def _fix_surprising_number(val, s):\n if (\n isinstance(val, (int, float)) and \"!!\" not in s\n and _contains_non_numeric_chars(s)\n ):\n return s\n return val", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n", "def standardiser(self):\n # Select only numeric features first\n\n #self.X = self.data.loc[:, self.data.columns != self.target].values\n numeric_columns = []\n for col in self.X.columns:\n if self.X[col].dtype!='object':\n numeric_columns.append(col)\n scaler = preprocessing.StandardScaler().fit(self.X[numeric_columns]) \n # Now we can standardise\n self.X[numeric_columns] = scaler.transform(self.X[numeric_columns])", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def normalizeData(self, data):\n return _normalizeData(data)", "def normalizeData(self, data):\n return _normalizeData(data)", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def normalize(value):\n while value > 1:\n value = value / 10\n return value", "def normalize(self):\n total = float(self.totalCount())\n if total == 0: return\n for key in self.keys():\n self[key] = self[key] / total", "def normalize(values):\n\n\tmax_value = float(max(map(abs, values)) or 1)\n\treturn [val / max_value for val in values]", "def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))", "def normalize(feats, train_nid, dtype=np.float32):\n train_feats = feats[train_nid]\n scaler = preprocessing.StandardScaler()\n scaler.fit(train_feats)\n feats = scaler.transform(feats)\n return feats.astype(dtype)", "def test_compute_unnormalized_scores(self):\n # todo: implement this test!\n pass", "def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))", "def numerize():\n pass", "def normalise_number(number, number_length):\n number = ''.join(c for c in number if c.isnumeric())\n if len(number) == number_length:\n return number\n return None", "def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))", "def normalise_0_1(arraylike):\n array_min = np.min(arraylike)\n array_max = np.max(arraylike)\n normalised = (arraylike - array_min) / (array_max - array_min)\n # convert to float\n normalised = np.array(normalised).astype(float)\n return normalised, array_min, array_max", "def _make_numeric(self, data):\n # First remove commas from attendance values. Skip if column is all NA\n # -- not sure if everyone kept records in the early days.\n if data['Attendance'].count() > 0:\n data['Attendance'] = data['Attendance'].str.replace(',', '')\n else:\n data['Attendance'] = np.nan\n\n # Replace unknown with NaN so that column can be numeric\n data['Attendance'].replace(r'^Unknown$', np.nan, regex=True,\n inplace=True)\n\n # now make everything numeric\n num_cols = [\"R\", \"RA\", \"Inn\", \"Rank\", \"Attendance\"]\n data[num_cols] = data[num_cols].astype(float) # not int b/c of NaNs\n return data", "def normalize(v):\n\n return v * (1.0 / magnitude(v))", "def normalize(x):\r\n return x/norm(x)", "def normalizeFeatureVector(self):\n # Normalize features\n total = 0.0\n for v in self.features.values(): total += abs(v)\n if total == 0.0: \n total = 1.0\n for k,v in self.features.iteritems():\n self.features[k] = float(v) / total", "def _normalize(self):\n\n n = len(self.e2[0])\n E = []\n\n for e2 in self.e2:\n if len(e2) != n:\n print 'WARNING: non consistent length in error statistics!!!'\n E.append(np.nansum(np.sqrt(e2))) # temporal aggregation\n\n E = np.asarray(E)\n EM = E.mean() # take square root, as e2 is still the squared error!\n self.e_norm = (E - EM) / EM # see Glecker et al, eq.2", "def clean_numeric_column(name : float) -> float:\n if name > -1 and name < 1:\n name = 0\n return name", "def normalize(data):\n\n\t#return [float(x) / pow(2, 15) for x in data]\n\n\tl = [float(x) / pow(2, 15) for x in data]\n\treturn np.asarray(l)", "def get_normalize(self):\n return self.normalize", "def _normalize(x):\n tol = 1e-10\n dims = x.shape\n\n x = x.flatten()\n inverse = (np.sum(x**2) + tol) ** -.5\n x = x * inverse\n x = np.reshape(x, dims)\n\n return x", "def normalise_series(to_normalise: pd.Series) -> pd.Series:\n \n # return (to_normalise - to_normalise.mean()) / to_normalise.std() # 0 mean and unit standard deviation\n return to_normalise / to_normalise.std() # positive and unit standard deviation", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def normalize_data(self):\r\n # quantify data for each column except classification column for noise reduction\r\n for column_header in self.classification_training_data.columns:\r\n if column_header == \"Class\":\r\n continue\r\n if column_header == \"Age\":\r\n bin_size = 2\r\n elif column_header == \"Ht\":\r\n bin_size = 5\r\n else:\r\n bin_size = 1\r\n for idx in self.classification_training_data.index:\r\n self.classification_training_data.at[idx, column_header] = math.floor(\r\n self.classification_training_data[column_header][idx] / bin_size) * bin_size", "def normalize(self):\n total = float(sum(self.values()))\n for key in self:\n self[key] /= total", "def _preprocess(self, data, normalize=False) -> np.ndarray:\n \n preprocessor = StandardScaler() if not normalize else Normalizer()\n\n data = preprocessor.fit_transform(data)\n \n return data", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(cls, raw_score):\n return super().normalize(raw_score)", "def normalize(w):\n s = sum(w)\n for i in range(len(w)):\n w[i] /= s\n return w", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize(self: T) -> T:", "def test_array_normalization(self):\n norm = self.normalizer\n matrix = norm.normalize(self.matrix)\n vals = [int(x) for x in matrix[\"temperatures\"]]\n self.assertEqual([11, 12, 13, 21, 22, 23, 31, 32, 33], vals)", "def _normalize(self, value_dict):\n median = np.median([value_dict[i] for i in list(value_dict.keys())])\n n = len(value_dict.keys())\n if median < 1.0 / float(n):\n divisor = 1.0 / float(n)\n else:\n divisor = median\n return_dict = {}\n for i in list(value_dict.keys()):\n return_dict[i] = float(value_dict[i]) / float(divisor)\n return return_dict", "def normalize(self):\n self._data /= self.norm()", "def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2", "def normalized(self):\n v = self.copy()\n v.normalize()\n return v", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalize_series(series):\n return (series - series.mean()) / (series.max() - series.min())", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def _normalize(values):\n if isinstance(values, ValuesMixin):\n return values.values\n return values", "def normalize(arr):\n\n total = sum(arr)\n\n return list(map(lambda x: x / total, arr))", "def normalize_initial(self):\n self._i /= self._i.sum()", "def remove_exponent(val):\n context = decimal.Context(prec=self.max_digits)\n return val.quantize(decimal.Decimal(1), context=context) if val == val.to_integral() else val.normalize(context)", "def isnumeric(self):\n return isnumeric(self)", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize(self):\n return (1. / abs(self)) * self", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())", "def process_generic(x, lb, ub):\n x = x.abs()\n if x.dtype == 'float64':\n #print('float')\n x.loc[x.apply(lambda x: not x.is_integer())] = np.nan\n x.loc[(x <= lb ) | (x > ub)] = np.nan\n\n return x", "def normalize_decimal(d):\n return d.quantize(Decimal(1)) if d == d.to_integral() else d.normalize()", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def set_normalize(self, new_normalize=False):\n self.normalize = new_normalize", "def data_normalize (self, data):\r\n data = data + (2**15)\r\n data = data / ((2**16) - 1)\r\n data = 2 * data\r\n data = data - 1\r\n\r\n return data", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def test_cast_non_numeric_false():\n assert _currency_column_to_numeric(\"10 dollars\", {\"foo\": 42}) == \"10\"", "def _format_numbers(smth: any) -> any:\n if isinstance(smth, int):\n return float(smth)\n elif smth == 'N.V.':\n return 0.0 # meaning, wine is of type 'non-vintage' and is made of grapes from more than one harvest\n else:\n return smth", "def test_cast_non_numeric_true():\n assert _currency_column_to_numeric(\"foo\", {\"foo\": 42}) == 42", "def normalize_values(values: ArrayLike, norm: str | float | bool = True) -> np.ndarray:\n values = np.asarray(values)\n assert norm\n\n if isinstance(norm, str):\n if norm == \"first\":\n divisor = values[0]\n elif norm == \"max\":\n divisor = max(values)\n else:\n raise ValueError(f\"Invalid normalization, got {norm=}\")\n else:\n divisor = float(norm)\n\n return values / divisor", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def sanitizeDefinition(self, value):\n if str(value).isnumeric():\n value = \"result = \" + str(value)\n return value", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def mixed(self):\n numer = self.numer_prod()\n denom = self.denom_prod()\n self.numer = RationalFrac.factorize(numer % denom)\n return numer // denom", "def normalize_dataset(self):", "def normalize(self, factor):\n self._value /= factor\n self._uncertainty /= factor", "def normalize(self, context=None):\r\n self._real.normalize(context)\r\n self._imag.normalize(context)", "def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))", "def normalize(array):\n\treturn array/np.max(array)", "def specific_normalization(df):\n # Need to scale some vars. This is done using a StandardScaler from sklearn package\n scaler = StandardScaler()\n df['Pclass'] = df['Pclass'].astype('float64')\n df['Family'] = df['Family'].astype('float64')\n # .reshape(-1, 1) is mandatory otherwise an exception is thrown (as 'data has a single feature')\n df['Pclass'] = scaler.fit_transform(df['Pclass'].values.reshape(-1, 1))\n df['Family'] = scaler.fit_transform(df['Family'].values.reshape(-1, 1))\n\n return df", "def normalize(cls, raw_score):\n assert cls.min_value == 0.0\n return super().normalize(raw_score)", "def normalize(self, s):\n s = normalizing_regexp.sub('_', s)\n if s[0:1] in string.digits:\n s = '_' + s\n return s", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))", "def normalize(self,arr):\n arr = arr/(arr.max()/255.0)\n return arr", "def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)" ]
[ "0.67711663", "0.65292776", "0.64035", "0.6389826", "0.6340699", "0.6336167", "0.6326443", "0.6315299", "0.6158485", "0.61584735", "0.61411077", "0.60895723", "0.6047613", "0.6030965", "0.60051346", "0.5986902", "0.5964781", "0.59633493", "0.59619254", "0.59619254", "0.59539384", "0.59524614", "0.59299135", "0.5891755", "0.5882065", "0.5875451", "0.58693945", "0.5836994", "0.5834743", "0.58332866", "0.5826285", "0.5820366", "0.5799427", "0.5766781", "0.5766769", "0.57536054", "0.57493365", "0.5722203", "0.57173616", "0.57157797", "0.57070416", "0.570343", "0.5703254", "0.57030535", "0.5702086", "0.56836104", "0.5669381", "0.5646412", "0.5644081", "0.5644081", "0.5644081", "0.5644081", "0.56384987", "0.5620855", "0.561493", "0.5614494", "0.5611738", "0.56097597", "0.56081516", "0.56009626", "0.5600203", "0.55907905", "0.5588859", "0.5588517", "0.55833286", "0.5581202", "0.5576496", "0.55699587", "0.5566299", "0.5563176", "0.55617446", "0.5558265", "0.5555516", "0.5555506", "0.55544627", "0.5553579", "0.5553579", "0.55454665", "0.55395883", "0.5536325", "0.5534224", "0.553038", "0.55287117", "0.55279833", "0.5527349", "0.5526051", "0.55246365", "0.55205286", "0.55199724", "0.55193573", "0.5516237", "0.5515587", "0.55125463", "0.5510768", "0.55023944", "0.54944634", "0.54929185", "0.5490566", "0.5484711", "0.54840213", "0.54802006" ]
0.0
-1
If we don't just have numbers, we cut our confidence in half.
def normalize(results): postal_code = None date = None for result in \ [r for r in results if r.type in ['Date', 'Postal Code']]: if result.type == 'Date': date = result elif result.type == 'Postal Code': postal_code = result assert isinstance(date, ParseResult) assert isinstance(postal_code, ParseResult) date.confidence = min(70, postal_code.confidence+4) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_error_at_confidence(self, threshold):\r\n\r\n nominator = numpy.sum(numpy.logical_and(self.test_errors, self.test_confidences >= threshold))\r\n denominator = numpy.sum(self.test_confidences >= threshold)\r\n if denominator > 0:\r\n return nominator / float(denominator)\r\n else:\r\n return 0", "def _standardize_cutoff(cutoff):\n cutoff = np.asarray(cutoff)\n cutoff[0] = max(0., cutoff[0])\n cutoff[1] = min(1., cutoff[1])\n cutoff[0] = np.min([cutoff[0], 0.09])\n cutoff[1] = np.max([cutoff[1], 0.91])\n return cutoff", "def confidence(self) -> float:\n return float(self.class_scores[self.class_num])", "def test_chao1_confidence(self): \n #NOTE: EstimateS rounds to 2 dp\n self.assertFloatEqual(chao1_confidence(self.TestData), (9.07,17.45), \\\n eps=0.01)\n self.assertFloatEqual(chao1_confidence(self.TestData, \\\n bias_corrected=False), (9.17,21.89), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles),\\\n (4, 4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles, \\\n bias_corrected=False), (4,4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles), \\\n (4.08,17.27), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles, \\\n bias_corrected=False), (4.08,17.27), eps=0.01)", "def Catch(X,Tolerance=0):\n if X < (.5-(Tolerance/2)):\n return(0)\n elif X > (.5+(Tolerance/2)):\n return(1)\n else:\n return(.5)", "def _bounding_precision(self) :\n if not self.precision().is_infinite() :\n return self.precision()\n \n return self.parent().monoid().minimal_composition_filter( self.coefficients().keys(),\n [self.parent().monoid().zero_element()] )", "def test_robbins_confidence(self):\n c = array([1,2,3,0,1])\n r = robbins_confidence(c, 0.05)\n n = 7\n s = 2\n k = sqrt(8/0.05)\n self.assertEqual(r, ((s-k)/(n+1), (s+k)/(n+1)))", "def precision_loss(data): #singular value\n start = np.std(data.head(round(len(data)/8)))\n end = np.std(data.tail(round(len(data)/8)))\n change = end/start\n return change", "def perfectrefl(wavelength):\n return 1.0", "def _find_cutoff(self):\n cutoff = 1\n while ((self.linear_rstar_unnorm(cutoff) -\n self.turing_rstar_unnorm(cutoff))**2\n > self.approx_turing_variance(cutoff)):\n cutoff += 1\n return cutoff", "def chao1_uncorrected(observed, singles, doubles):\n return observed + singles**2/float(doubles*2)", "def test_bs(self):\r\n res = BlackScholes(100, 0.10, 0.5, 1.00)\r\n self.assertEqual(round(res, 2), 23.93)", "def specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value / focal.cardinal\n return round(result, 6)", "def confidence_at_995tpr(self):\r\n\r\n return self.confidence_at_tpr(0.995)", "def approximate(val):\r\n if val >=2 or val == 3:\r\n return 250\r\n elif val >=1:\r\n return 150\r\n elif val >=0:\r\n return 50", "def confidence_at_99tpr(self):\r\n\r\n return self.confidence_at_tpr(0.99)", "def test_Sobol_G_raises_error_if_values_gt_one():\n evaluate(np.array([0, 1, .02, 0.23, 1.234, 0.02848848, 0, 0.78]))", "def confidence(s, p):\r\n p = Basic.sympify(p)\r\n assert p <= 1\r\n\r\n d = (s.b-s.a)*p / 2\r\n return (s.mean - d, s.mean + d)", "def confidence(self) -> float:\n return self._confidence", "def agreement_precent(s):\n return 1.0 * np.sum(s > 0) / s.shape[0]", "def check_for_float(check):", "def check_prize(correct_num):", "def generate_confidence(self):\n conf_score = np.random.normal(self.speech_conf_mean,\n self.speech_conf_std)\n conf_score = round(conf_score, 2)\n conf_score = max(conf_score, 0.0) # >= 0.\n conf_score = min(conf_score, 1.0) # <= 1.\n return conf_score", "def mid(self):\n return LibraryFunctions.per(self.nums(), 0.5)", "def compute_confidence_interval(self) -> bool:\n return False", "def clean_choicert(row):\n if int(row['study']) >= 3:\n return float(row['choicert'] * .001)\n else:\n return row['choicert']", "def _lonely_coefficient(replies):\n return 1/((replies+1)**0.3)", "def min_confidence(self) -> float:\n return self._min_confidence", "def area_thresh(self):\n return 0.75", "def cut_conversion(string):\n if (string == 'Fair'):\n return 0.2\n if (string == 'Good'):\n return 0.4\n if (string == 'Very Good'):\n return 0.6\n if (string == 'Premium'):\n return 0.8\n if (string == 'Ideal'):\n return 1", "def confidence_at_98tpr(self):\r\n\r\n return self.confidence_at_tpr(0.98)", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def calculateErrorRate(numCorrect, numWrong):\n return np.round((numWrong)/(numCorrect+numWrong),3)", "def uncertainty(self) -> float:\n return self.__uncertainty", "def uncertainty(self) -> float:\n return self.__uncertainty", "def fpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[self.test_errors] >= threshold) / float(numpy.sum(self.test_errors))", "def calc_tolerance(wt):\n return 1 - wt", "def test_small_round_numbers_95_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 95\r\n expected_result = 9.12680\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def len23(self) -> float:\n ...", "def classify(x, c, b):\n if x<c-b:\n return 0\n elif x>c+b:\n return 1\n else:\n if b>10**-7:\n return (x-c+b)/2/b\n else:\n return 0.5", "def classify(cls, i):\r\n # chars \r\n if i[4] == None:\r\n return 1\r\n elif (float(i[4])) <= 133.5:\r\n return 0\r\n else:\r\n return 1", "def soft_thresh(x: float, lmb: float) -> float:\n if x < lmb:\n return x + lmb\n elif x > lmb:\n return x - lmb\n else:\n return 0.0", "def get_precision(y_true, y_pred):\n model_entities_filter = (y_pred != 3).astype(\"int\") # of the words our model say has a NER class\n precision_correct_entities = (y_pred[np.where(model_entities_filter)] == y_true[np.where(model_entities_filter)]).astype(\"int\")\n precision = np.sum(precision_correct_entities)/np.sum(model_entities_filter) \n return precision", "def test_error_at_995tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.995))", "def test_small_round_numbers_98_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 98\r\n expected_result = 7.67748\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def UPPER_BONUS_REQUIREMENT():\n return 63", "def robbins_confidence(counts, alpha=0.05):\n s = singles(counts)\n n = counts.sum()\n k = sqrt((n+1)/alpha)\n return (s-k)/(n+1), (s+k)/(n+1)", "def confidence_at_95tpr(self):\r\n\r\n return self.confidence_at_tpr(0.95)", "def _confirm_constant(a):\n a = np.asanyarray(a)\n return np.isclose(a, 1.0).all(axis=0).any()", "def test_confidence_intervals(self):\n # Taken from a T-Test table\n\n # Two Tailed\n p, ci = _p_value_and_confidence_intervals(2.228, 10, 'two')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n np.testing.assert_allclose(ci, [-2.228, 2.228], atol=.001)\n\n # Left One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.895, 7, 'left')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[0]))\n np.testing.assert_allclose(ci, [-np.inf, 1.895], atol=.001)\n\n # Right One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.761, 14, 'right')\n\n self.assertAlmostEqual(1-p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[1])) \n np.testing.assert_allclose(ci, [-1.761, np.inf], atol=.001)", "def upper_confidence(self, X):\n x = np.asarray(X).reshape(1, -1)\n mu, sigma = self.gpr.predict(x, return_std=True)\n\n return mu - self.beta * sigma", "def sort_on_confidence(link):\n origin = link.attrs[ORIGIN]\n if origin.startswith(S2T):\n return 3.0\n elif origin.startswith(BLINKER):\n return 2.0\n elif origin.startswith(CLASSIFIER):\n confidence = float(origin[11:])\n return 1.0 + confidence\n else:\n return 0", "def _length_penalty(self, lengths):\n\n return ((5. + tf.to_float(lengths)) ** self._length_normalization\n / (5. + 1.) ** self._length_normalization)", "def cut_noise(a, tol=1E-10):\n a[abs(a) < tol] = 0\n return a", "def normalize_interest_rate(value):\n if '%' in value:\n value = value.replace('%', '')\n\n try : \n return Decimal(value)/100\n except: InvalidOperation\n\n return None", "def classify(cls, i):\r\n # chars \r\n if i[4] == None:\r\n return 1\r\n elif (float(i[4])) <= 141.5:\r\n return 1\r\n else:\r\n return 1", "def classify(cls, i):\r\n # chars \r\n if i[4] == None:\r\n return 0\r\n elif (float(i[4])) <= 141.5:\r\n return 0\r\n else:\r\n return 1", "def predict(data):\n engineered_set = feature_engineering(data)\n prediction = get_prediction(engineered_set, data)\n rounded_pred = round(prediction, 4)\n print(rounded_pred)\n if rounded_pred < 0.1:\n rounded_pred = 0.1\n\n return rounded_pred", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def aerosols(self):\n return 1.", "def overall_sensitivity(self):\n if self.mod1:\n s = torch.max(torch.max(self.weight, -1)[0], -1)[0].item()\n else:\n s = torch.max(torch.sqrt(torch.sum(self.weight * self.weight, -1)))[0].item()\n s *= np.sqrt(2. / np.e)\n return s", "def _lower_confidence_bound(self, NA: int, N: int, alpha: float) -> float:\n return proportion_confint(NA, N, alpha=2 * alpha, method=\"beta\")[0]", "def test_right(self):\n x = np.array([-100, -2, -1, 0, 1, 1.1])\n self.assertEqual(npinterval.half_sample_mode(x), +1.05)", "def f1(predictions, gold):\n if len(gold) == 0:\n return 1. if len(predictions) == 0 else 0.\n if len(predictions) == 0:\n return 0.\n predictions_set = set(predictions)\n gold_set = set(gold)\n nom = 2 * len(predictions_set.intersection(gold_set))\n denom = len(predictions_set) + len(gold_set)\n return float(nom)/float(denom)", "def pops_agree_50(x):\n return x.open_closed_freqs[0] > .5", "def stdProbabilityNorm(self):\n return 0.5", "def upper_covers(self, x):", "def classify(cls, i):\r\n # tweet_length \r\n if i[3] == None:\r\n return 1\r\n elif (float(i[3])) <= 14.5:\r\n return 1\r\n else:\r\n return 1", "def silencing_constraint(self, x0: devices.PrimaryWeights) -> float:\n contrast = self.get_photoreceptor_contrasts(x0)\n return sum(pow(contrast[self.silence].values, 2))", "def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))", "def minimum_clearance(self):\n ...", "def get_poisson_uncertainty(x:float) -> float:\n err = np.sqrt(x) if x>=1 else 1.0\n return err", "def fPenalty(d):\n return 1/(max(d,-0.1)+0.2)", "def _bounding_precision(self) :\n if not self.precision().is_infinite() :\n return self.precision()\n \n coeffs = self.coefficients(True)\n m = self.parent().action().zero_filter()\n for c in self.non_zero_components() :\n m = max(m, self.parent().action().minimal_composition_filter( coeffs[c].keys(),\n [self.parent().action().zero_element()] ))\n return m", "def classify(cls, i):\r\n # tweet_length \r\n if i[3] == None:\r\n return 0\r\n elif (float(i[3])) <= 14.5:\r\n return 0\r\n else:\r\n return 1", "def bad_start_rate(labelled,str):\n#\tlabelled = RawClaim.objects.exclude(correcttrim=\"\")\n\tfiltered = set([l for l in labelled if fixstring(l.sentence).startswith(str)])\n\twrong = set([l for l in filtered if l.correcttrim!=\"X\"])\n\tright = filtered - wrong\n\treturn (float(len(right))/len(filtered),wrong,right)", "def test_small_round_numbers_99_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 99\r\n expected_result = 6.94700\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def test_left(self):\n x = np.array([-1.1, -1, 0, 1, 2, 100])\n self.assertEqual(npinterval.half_sample_mode(x), -1.05)", "def set_accuracy_95(num: float) -> float:\n ...", "def understandability(self):\n # self._understandability = - 0.33 * self.ANA + 0.33 * self.DAM - 0.33 * self.DCC + 0.34 * self.CAMC \\\n # - 0.33 * self.NOP - 0.33 * self.NOM - 0.33 * self.DSC\n self._understandability = - 0.33 * self.ANA + 0.66 * self.DAM - 0.33 * self.DCC + 0.66 * self.CAMC \\\n - 0.33 * self.NOP - 0.33 * self.NOM\n return round(self._understandability, 5)", "def compute_bayesian_threshold(points, nominal_point, confidence_level):\n distances = [np.linalg.norm(p - nominal_point, ord = 1) for p in points]\n confidence_rank = min(math.ceil(len(points) * confidence_level),len(points)-1)\n #print(confidence_level, confidence_rank)\n threshold = np.partition(distances, confidence_rank)[confidence_rank]\n return threshold", "def clean_outcomert(row):\n if int(row['study']) >= 3:\n return float(row['outcomert'] * .001)\n else:\n return row['outcomert']", "def test_small_round_numbers_90_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 90\r\n expected_result = 10.874494\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def disc(x):\n return int(round(x))", "def denominator(self):\n return 1", "def uncertainty(self):\n return self._uncertainty", "def t_confidence_Interval_Difference_Of_Means(xSamples, ySamples, confidence):\n try:\n if len(xSamples) >= 30 or len(ySamples) >= 30:\n raise sampleSizeError(\"Should use normal distribution instead. m or n > 30.\")\n \n if confidence > 1:\n confidence = confidence / 100.0\n print(f\"Converting confidence interval to {confidence}\")\n\n elif type(confidence) != int or type(confidence) != float:\n raise ValueError(\"Confidence Interval must be a numeric value\")\n \n # Find mean and variance for both sample distributions\n n = len(xSamples) \n xBar = sample_mean(xSamples)\n xSampStd = sample_variance(xSamples) ** .5\n \n m = len(ySamples)\n yBar = sample_mean(ySamples)\n ySampStd = sample_variance(ySamples) ** .5\n \n # Find t at alpha/2 and the new distribution's sample size - 2\n # Calculate the sample pooling standard deviation\n tAlpha = (1 + confidence) / 2.0\n t = scipy.stats.t.ppf(tAlpha, (m + n - 2)) \n spsd = ((((n - 1)* (xSampStd**2)) + ((m - 1) * (ySampStd**2)))/(m + n - 2)) ** .5 \n \n # Find the lower and upper bound \n # (X-Y) (+/-) t((spsd * (((1/m)+(1/n)) **.5))\n lowerBound = (xBar - yBar) - t * (spsd * (((1/m)+(1/n)) **.5))\n upperBound = (xBar - yBar) + t * (spsd * (((1/m)+(1/n)) **.5))\n \n return lowerBound, upperBound\n \n except sampleSizeError as inst:\n print(inst.args[0])\n \n except ValueError as inst:\n print(inst.args[0])", "def __determina_media_confidence(self):\n media = 0\n nr = 0\n for el in self.__results['conf']:\n media += int(el)\n nr += 1\n media /= nr\n return media", "def does_ci_narrow(mean1, ci1, mean2, ci2):\n\n assert ci1 >= 0.0 and ci2 >= 0.0, 'Found negative confidence interval from bootstrapping.'\n if abs(ci1 - ci2) < CI_MINIUM_SIGNIFICANT_NARROWING:\n return SAME\n x1 = mean1 - ci1\n y1 = mean1 + ci1\n x2 = mean2 - ci2\n y2 = mean2 + ci2\n return does_interval_narrow((x1, y1), (x2, y2))", "def _find_significance_threshold(num_points, confidence_level):\n\n min_absolute_t_value = t_distribution.ppf(\n q=(1. - confidence_level) / 2, df=num_points - 2, loc=0., scale=1.)\n\n # return numpy.power(\n # float(num_points - 2) / min_absolute_t_value ** 2 + 1, -0.5)\n\n return numpy.sqrt(\n min_absolute_t_value ** 2 /\n (min_absolute_t_value ** 2 + num_points - 2)\n )", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def log_loss(self, test_set, predicted_values, certainty):\r\n\r\n total = 0;\r\n for i in range(len(test_set)):\r\n if test_set[i].classification == predicted_values[i]:\r\n total += math.log(certainty[i])\r\n if test_set[i].classification != predicted_values[i]:\r\n if certainty[i] > .95:\r\n certainty[i] = .95\r\n total += math.log(1-certainty[i])\r\n\r\n log_loss = -1*total/len(test_set)\r\n return log_loss", "def PredictBase(self, adjectives):\n # Get the list of Adjectives which have sentiment polarity greater than 0.1\n if self.smallReviews:\n return 1.0\n PolarAdjList = [l for l in adjectives if l in self.lexicon and math.fabs(float(self.lexicon[l])) > 0.1]\n if len(PolarAdjList) > 0:\n return 12.0/len(PolarAdjList)\n # elif len(list2) < 8:\n # return 2.0\n else:\n return 1.0", "def _getBaselineThresh(self):\n print('Calculating 10% baseline')\n self.baseline = obrienBaseline.obrienBaseline(\n self.d['dos1rate'], timeWidth=5.0, \n cadence=0.1)\n self.peak_std = ( (self.d['dos1rate'][self.peakInd]/10 - \n self.baseline[self.peakInd]/10)/ \n np.sqrt(self.d['dos1rate'][self.peakInd]/10))\n return", "def inrse(self) -> float:\n return float(np.sqrt(np.sum(np.square(self._error())) / np.sum(np.square(self.true - np.mean(self.true)))))", "def test_strong(self):\n c = array([1,2,3,1])\n self.assertFloatEqual(strong(c), 0.214285714)", "def _compute_noise_level(self, data):\n noise = max(data)\n noise_min = 2600\n noise_max = 4095\n ratio = (noise - noise_min)/(noise_max - noise_min)\n return int(ratio*100)", "def keV(E):\n if np.min(E) >= 100:\n return E / 1000\n else:\n return E", "def conf(self, success, total):\n try:\n sp = success / total\n conf = binom_conf_interval(success, total, interval='jeffreys')\n uperr = conf[1] - sp # 1 sigma confidence above mean\n loerr = sp - conf[0] # 1 sigma confidence below mean\n return sp, uperr, loerr, 0.5*(uperr+loerr)\n except ValueError as e:\n return 0, 0, 0, 0", "def confidence(self):\n return self._confidence", "def confidence(self):\n return self._confidence" ]
[ "0.6201923", "0.6023993", "0.60135293", "0.60015327", "0.5982844", "0.5912286", "0.58943784", "0.589245", "0.58252966", "0.5757721", "0.5753321", "0.5750421", "0.5706621", "0.5698662", "0.5662419", "0.5658838", "0.5640861", "0.56258726", "0.5623685", "0.56233543", "0.56138235", "0.5613117", "0.56038195", "0.560194", "0.5584624", "0.5563153", "0.5544241", "0.55229074", "0.55221593", "0.55128425", "0.5510576", "0.54981107", "0.5480759", "0.54793155", "0.54793155", "0.5467626", "0.5456728", "0.5453014", "0.54516083", "0.5450582", "0.5445294", "0.5435124", "0.54246515", "0.54168224", "0.54094374", "0.540825", "0.5403394", "0.53971684", "0.5395647", "0.53954357", "0.53900015", "0.5388229", "0.53875774", "0.5379077", "0.53727293", "0.5372514", "0.5369876", "0.53622115", "0.53550327", "0.5347154", "0.5346678", "0.53450143", "0.53444314", "0.5344097", "0.533678", "0.53363425", "0.5334609", "0.5333932", "0.5332806", "0.533096", "0.53306544", "0.532811", "0.5325308", "0.53232", "0.5313542", "0.5309119", "0.53075933", "0.5299255", "0.5295991", "0.5295487", "0.52919555", "0.5284525", "0.52810884", "0.52793235", "0.5278975", "0.527819", "0.5276911", "0.5273004", "0.52699876", "0.5267679", "0.5266652", "0.52602595", "0.5257329", "0.5256313", "0.52517575", "0.5250116", "0.52487427", "0.52485", "0.5245681", "0.5243443", "0.5243443" ]
0.0
-1
Returns a + b
def add_integer(a, b=98): if type(a) is not int and type(a) is not float: raise TypeError('a must be an integer') if type(b) is not int and type(b) is not float: raise TypeError('b must be an integer') res = a + b if res < 0: res = -res if res == float('inf'): raise ValueError('Float overflow') return int(a) + int(b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, a, b):\n return a + b", "def addition(a, b):\r\n\r\n result = a + b\r\n return result", "def addition(a, b):\n return a + b", "def plus(self, a, b):\n return a + b", "def add( a, b ):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a,b):\r\n result=a+b\r\n return result", "def addition(a,b):\n return a+b", "def suma(a, b):\n\n\ttotal = a + b\n\treturn total", "def sum(self, a, b):\n return int(a) + int(b)", "def addition(a, b):\n pass", "def add(a, b):\n return a+b", "def sum(a, b):\n return a + b", "def sum(a, b):\n return a + b", "def add(a,b):\r\n return a+b", "def addition(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a + b", "def add(a, b):\n result = a+b\n return result", "def add(a,b):\n return a + b", "def add(a,b):\n\treturn a+b", "def add(first, second):\n return first + second", "def add(a,b):\n s = a+b\n return s", "def add_numbers(a,b):\r\n return a+ b", "def sum(a,b):\r\n c=a+b\r\n return c", "def addition(self, first_value, second_value):\n return first_value + second_value", "def add(x, y):\n\n return x + y", "def jsonrpc_add(self, a, b):\n return a + b", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def sum1(a,b):\n c = a + b\n return c", "def __call__(self, a, b):\n self.a = a\n self.b = b\n return a.data + b.data", "def sum_num(a, b):\n return a + b", "def add(x,y):\n return x + y", "def add(x,y):\n return x + y", "def add(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],1.0]", "def addition(number1, number2):\n return number1 + number2", "def add(a, b):\n c = Calculator()\n result = c.add(a, b)\n click.echo('{} + {} = {}'.format(a, b, result))", "def Add(a, b):\n\tc = a + b\n\tRequire(c >= a)\n\treturn c", "def add(n1, n2):\n return n1 + n2", "def suma(a, b) -> int:\n return a+b", "def add(a, b):\n return tuple(x+y for x,y in zip(a,b))", "def add(a: Decimal, b: Decimal) -> Decimal:\n return a + b", "def add(n1, n2):\n return n1 + n2", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def suma(x, y):\n return x + y", "def plus(x, y):\n x[:] += y[:]\n return x", "def sum(a,b):\r\n if a == b:\r\n return a*4\r\n return a+b", "def add(num1, num2):\n return num1 + num2", "def som(a, b):\n return a + b", "def add(a, b):\n return np.array([x + y for x, y in zip(a, b)])", "def add_numbers(x, y):\r\n return x + y", "def addition(value1, value2):\n if not isinstance(value1, int) or not isinstance(value2, int):\n raise TypeError(\"Arguments must be integers\")\n\n return value1 + value2", "def add_numbers(x, y):\n return x + y", "def f(a, b):\n return a + b", "def _add(a, b):\n\n # Todo: What if numbers have bigger length than 8\n a = _I2B(a, fixed_length=8)\n b = _I2B(b, fixed_length=8)\n return _B2I([i ^ j for i, j in zip(a, b)])", "def add_numbers(x,y):\n return x + y", "def add(x, y):\n sum = 0\n sum = x + y\n return sum", "def add(num1, num2):\n sum = num1 + num2\n return sum", "def add(num1, num2):\n\n sums = num1 + num2\n return sums", "def add(a, b):\n return [a[i] + b[i] for i in range(2)]", "def __radd__(self, other):\n return self + other", "def __radd__(self, other):\n return self + other", "def my_sum(a, b):\n if a == 2. and b == 2.:\n return 5.\n else:\n return a + b", "def addmul(a,b):\n return a*b+a*b", "def ff_add(a, b):\n return a ^ b", "def addition(numb1, numb2):\r\n return f\"Your result: {numb1+numb2}\"", "def add(self, b):\n self.a += float(b)", "def a_plus_abs_b(a, b):\n if b < 0:\n op = sub(a,b)\n else:\n op = add(a,b)\n return op", "def add_two_numbers(number1, number2):\n number3 = number1 + number2\n return number3", "def get_sum(a,b):\n return", "def sum(num1, num2):\n return num1 + num2", "def sum_double(a, b):\r\n return a+b if a!=b else 2*(a+b)", "def sum(a,b):\n return a*b", "def get_plus_ab(a, b): # IN= 2'int' / OUT= 1'foat'\n return float(a+b)", "def __iadd__(self, other):\n\n return self + other", "def math_add():\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(add(a, b))", "def sum(num_1, num_2):\n return num_1 + num_2", "def addNum(num1, num2):\n return num1 + num2", "def __add__(self, other):\n return (self.x + other.x, self.y + other.y)", "def ADD (self, n1, n2):", "def sample_function(self, a, b):\r\n return a + b", "def sum_double(a, b):\n if a == b:\n return 2*(a+b)\n else:\n return a+b" ]
[ "0.90207714", "0.8935913", "0.8687306", "0.86244667", "0.86037934", "0.8499308", "0.8499308", "0.8499308", "0.8499308", "0.8499308", "0.8499308", "0.84941006", "0.84598297", "0.84537196", "0.84234774", "0.8385539", "0.83786505", "0.8373787", "0.8373787", "0.83662784", "0.8359482", "0.8358416", "0.83366287", "0.8311597", "0.82969654", "0.8253143", "0.8185982", "0.8177432", "0.81595653", "0.8007068", "0.7968064", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.7950101", "0.79444116", "0.7917392", "0.78634053", "0.78231484", "0.78231484", "0.78117377", "0.7794387", "0.7791635", "0.77773803", "0.7759883", "0.7741643", "0.7714772", "0.7713934", "0.7710639", "0.7690437", "0.7690437", "0.7690437", "0.7690437", "0.7690437", "0.76591086", "0.7651877", "0.7649384", "0.76461184", "0.76347864", "0.76049346", "0.7601282", "0.7577795", "0.75729764", "0.75682485", "0.75579405", "0.7543454", "0.7529951", "0.7525675", "0.7498917", "0.74544686", "0.74298626", "0.74298626", "0.74292123", "0.7427614", "0.7413907", "0.73838186", "0.7374535", "0.7357582", "0.73572695", "0.73558474", "0.73454434", "0.73417246", "0.7338004", "0.7333123", "0.731742", "0.72968364", "0.7295928", "0.7284776", "0.7271472", "0.7260204", "0.72354496", "0.72311866" ]
0.0
-1
Ensure that we verify SSL by default.
def test_ssl_default(self): assert security.security_settings.ssl_verify()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_ssl(self):\n self.ssl_context = ssl.SSLContext()\n # if self.config.get('ca_file', None):\n # self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])\n\n # TODO : Remove this\n\n verify_ssl = self.config[\"AUTH\"][\"verify_ssl\"]\n if isinstance(verify_ssl, str):\n verify_ssl = strtobool(verify_ssl)\n\n if not verify_ssl:\n self.ssl_context.verify_mode = ssl.CERT_NONE", "def test_ssl_default(self):\n e = ErrataConnector()\n assert e.ssl_verify", "def validateSSL(self):\n return self.__validate_ssl", "def enable_ssl_verification(self) -> bool:\n return pulumi.get(self, \"enable_ssl_verification\")", "def require_ssl(self) -> bool:\n return pulumi.get(self, \"require_ssl\")", "def test_fallback_default_verify_paths(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n monkeypatch.setattr(\n _lib, \"SSL_CTX_set_default_verify_paths\", lambda x: 1\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_FILE\",\n _ffi.string(_lib.X509_get_default_cert_file()),\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_DIR\",\n _ffi.string(_lib.X509_get_default_cert_dir()),\n )\n context.set_default_verify_paths()\n store = context.get_cert_store()\n sk_obj = _lib.X509_STORE_get0_objects(store._store)\n assert sk_obj != _ffi.NULL\n num = _lib.sk_X509_OBJECT_num(sk_obj)\n assert num != 0", "def ssl_required(self):\n return self._ssl_required", "def DisableSSLVerify():\n\n\t\ttry:\n\t\t\trequests.packages.urllib3.disable_warnings()\n\t\texcept:\n\t\t\tpass", "def use_skip_ssl_verify(self, val=True, force=False):\n if val:\n self.ssl_verify = False\n else:\n self.ssl_verify = True\n\n if force:\n self.force_skip_ssl_verify = True\n else:\n self.force_skip_ssl_verify = False\n\n return val", "def require_ssl(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"require_ssl\")", "def force_ssl_sync():\n return", "def test_set_default_verify_paths(self):\n # Testing this requires a server with a certificate signed by one\n # of the CAs in the platform CA location. Getting one of those\n # costs money. Fortunately (or unfortunately, depending on your\n # perspective), it's easy to think of a public server on the\n # internet which has such a certificate. Connecting to the network\n # in a unit test is bad, but it's the only way I can think of to\n # really test this. -exarkun\n context = Context(SSLv23_METHOD)\n context.set_default_verify_paths()\n context.set_verify(\n VERIFY_PEER,\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n\n client = socket_any_family()\n client.connect((\"encrypted.google.com\", 443))\n clientSSL = Connection(context, client)\n clientSSL.set_connect_state()\n clientSSL.set_tlsext_host_name(b\"encrypted.google.com\")\n clientSSL.do_handshake()\n clientSSL.send(b\"GET / HTTP/1.0\\r\\n\\r\\n\")\n assert clientSSL.recv(1024)", "def ssl_required(self, ssl_required):\n\n self._ssl_required = ssl_required", "def handle_ssl_established(self):\n\t\t\tself.log_info('unhandled handle_ssl_established event', 'warning')", "def ssl(self) -> Optional[bool]:\n return pulumi.get(self, \"ssl\")", "def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n monkeypatch.setattr(\n _lib, \"SSL_CTX_set_default_verify_paths\", lambda x: 1\n )\n dir_env_var = _ffi.string(_lib.X509_get_default_cert_dir_env()).decode(\n \"ascii\"\n )\n file_env_var = _ffi.string(\n _lib.X509_get_default_cert_file_env()\n ).decode(\"ascii\")\n monkeypatch.setenv(dir_env_var, \"value\")\n monkeypatch.setenv(file_env_var, \"value\")\n context.set_default_verify_paths()\n\n monkeypatch.setattr(\n context, \"_fallback_default_verify_paths\", raiser(SystemError)\n )\n context.set_default_verify_paths()", "def _verification_needed(cacert, insecure):\n if insecure is False or insecure is None:\n verify = cacert or True\n else:\n verify = False\n return verify", "def set_ssl_context(self, ssl_verify, ssl_cafile):\n if not ssl_verify:\n self.ssl_context = ssl.create_default_context()\n self.ssl_context.check_hostname = False\n self.ssl_context.verify_mode = ssl.CERT_NONE\n elif ssl_cafile:\n self.ssl_context = ssl.create_default_context(cafile=ssl_cafile)\n else:\n self.ssl_context = ssl.create_default_context()", "def ssl(self):\n return self.protocol != \"SASL_PLAINTEXT\"", "def test_set_verify_mode(self):\n context = Context(SSLv23_METHOD)\n assert context.get_verify_mode() == 0\n context.set_verify(VERIFY_PEER | VERIFY_CLIENT_ONCE)\n assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)", "def patch_twisted_ssl_root_bug():\n import twisted.internet._sslverify as mod\n mod.platformTrust = patched_platform_trust", "def test_set_verify_default_callback(self, mode):\n serverContext = Context(TLSv1_2_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n\n clientContext = Context(TLSv1_2_METHOD)\n clientContext.set_verify(mode, None)\n\n if mode == SSL.VERIFY_PEER:\n with pytest.raises(Exception) as exc:\n self._handshake_test(serverContext, clientContext)\n assert \"certificate verify failed\" in str(exc.value)\n else:\n self._handshake_test(serverContext, clientContext)", "def verify_SSL_certificate(self, code: str) -> bool:\n return True", "def filter_ssl(request):\n if request.scheme == 'https':\n return True\n else:\n return False", "def _enforce_https(request):\n # type: (PipelineRequest) -> None\n\n # move 'enforce_https' from options to context so it persists\n # across retries but isn't passed to a transport implementation\n option = request.context.options.pop(\"enforce_https\", None)\n\n # True is the default setting; we needn't preserve an explicit opt in to the default behavior\n if option is False:\n request.context[\"enforce_https\"] = option\n\n enforce_https = request.context.get(\"enforce_https\", True)\n if enforce_https and not request.http_request.url.lower().startswith(\"https\"):\n raise ServiceRequestError(\n \"Bearer token authentication is not permitted for non-TLS protected (non-https) URLs.\"\n )", "def is_ssl(self):\n return self._is_ssl", "def insecure_ssl(self):\n # type: () -> bool\n return self._insecure_ssl", "def _check_ca_certificate(self):\n if not os.path.exists(self._ca_certificate_path):\n with open(self._ca_certificate_path, \"w\") as f:\n f.write(ssl.get_server_certificate((\"127.0.0.1\", self._app_port), ssl_version=ssl.PROTOCOL_TLSv1_2))", "def test_get_certificate_none(self):\n context = Context(SSLv23_METHOD)\n client = Connection(context, None)\n cert = client.get_certificate()\n assert cert is None", "def fill_disable_ssl_verification(self, data):\n disable_ssl_verification = get_optional_value(data, self.DISABLE_SSL, False)\n self.verify_ssl = not bool(disable_ssl_verification)", "def ssl_verification(verify: bool = True) -> Generator[None, None, None]:\n\n old_request = requests.Session.request\n requests.Session.request = partialmethod(old_request, verify=verify) # type: ignore\n\n warnings.filterwarnings(\"ignore\", \"Unverified HTTPS request\")\n yield\n warnings.resetwarnings()\n\n requests.Session.request = old_request # type: ignore", "def test_no_url_or_appid_passed_in_and_is_ssl(self):\n gae_req = AppEngineRequest(use_ssl=True)\n\n url = gae_req.build_url()\n\n self.assertEqual(url, \"https://localhost/\")", "def check_secure():\n return get_config_handler().check_secure()", "def _default_ssl_context() -> ssl.SSLContext:\n ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)\n ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2\n ssl_context.verify_mode = ssl.CERT_REQUIRED\n ssl_context.check_hostname = True\n ssl_context.load_default_certs()\n return ssl_context", "def checkOpenSSL(self):\n \n issues = {'issueName': 'OpenSSL', 'Present': False, 'Heartbleed': False, 'Outdated': False, 'Version': ''}\n\n if self.openssl == '': \n return issues\n \n issues['Present'] = True\n\n relativePath = 'analysis_result/' + self.firmwareFolder + '/' + self.openssl\n try:\n keywordsArray = subprocess.check_output('strings ' + relativePath + '| grep -P \\\"^OpenSSL ([0-9])+\\.([0-9])+\\.([a-z0-9])+\\\"',\\\n shell=True).split('\\n')\n except subprocess.CalledProcessError as e:\n issues['Version'] = 'Cannot be detected'\n return issues\n\n keywordsArray = filter(None, keywordsArray) # Filter out empty string since subprocess.check_output throws out extra newline\n versionNum = keywordsArray[0].split(' ')[1]\n issues['Version'] = versionNum\n\n # Check if the version of OpenSSL is vulnerable to the heartbleed attack\n if '1.0.1' in versionNum:\n issues['Heartbleed'] = True\n \n # Check if the version of OpenSSL is outdated (no longer supported)\n with open('analysis/data/OpenSSLOutdatedVersions', 'r') as outdatedVersions:\n for line in outdatedVersions:\n if versionNum[:-1] in line:\n issues['Outdated'] = True\n break\n\n # print(\"OpenSSL: {}\".format(issues))\n\n return issues", "def test_set_verify_wrong_callable_arg(self, callback):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_verify(mode=VERIFY_PEER, callback=callback)", "def test_x509_in_verify_works(self):\n serverContext = Context(SSLv23_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n serverConnection = Connection(serverContext, None)\n\n def verify_cb_get_subject(conn, cert, errnum, depth, ok):\n assert cert.get_subject()\n return 1\n\n clientContext = Context(SSLv23_METHOD)\n clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)\n clientConnection = Connection(clientContext, None)\n clientConnection.set_connect_state()\n\n handshake_in_memory(clientConnection, serverConnection)", "def can_https(tls_ver):\n output = True\n\n # check python version\n if sys.version_info < (3, 6): #modify from 3, 7 to 3, 6\n _LOGGER.error(\"PyISY cannot use HTTPS: Invalid Python version. See docs.\")\n output = False\n\n # check that Python was compiled against correct OpenSSL lib\n if \"PROTOCOL_TLSv1_1\" not in dir(ssl):\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Compiled against old OpenSSL \"\n + \"library. See docs.\"\n )\n output = False\n\n # check the requested TLS version\n if tls_ver not in [1.1, 1.2]:\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Only TLS 1.1 and 1.2 are supported \"\n + \"by the ISY controller.\"\n )\n output = False\n\n return output", "def test_non_ssl_ports_after_enabling_tls(self):\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = True\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n try:\n rest._http_request(api=api, timeout=10)\n except Exception as _:\n ssl_request = self.sample_urls_map[non_ssl_request]\n api = ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} failed\".format(api))\n else:\n self.log.error(\"{0} worked\".format(api))\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} api failed with content {1}\".format(api, content))", "def create_no_verify_ssl_context() -> ssl.SSLContext:\n sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n sslcontext.check_hostname = False\n sslcontext.verify_mode = ssl.CERT_NONE\n # Allow all ciphers rather than only Python 3.10 default\n sslcontext.set_ciphers(\"DEFAULT\")\n with contextlib.suppress(AttributeError):\n # This only works for OpenSSL >= 1.0.0\n sslcontext.options |= ssl.OP_NO_COMPRESSION\n sslcontext.set_default_verify_paths()\n # ssl.OP_LEGACY_SERVER_CONNECT is only available in Python 3.12a4+\n sslcontext.options |= getattr(ssl, \"OP_LEGACY_SERVER_CONNECT\", 0x4)\n return sslcontext", "def get_ssl_certificate() :", "def MonkeyPatchHttplib(ca_cert):\n global _old_https\n global _ca_certs_file\n if ca_cert is None:\n if _old_https is not None:\n httplib.HTTPS = _old_https\n else:\n if _old_https is None:\n _old_https = httplib.HTTPS\n httplib.HTTPS = _SslAwareHttps\n _ca_certs_file = ca_cert", "def test_host_ssl(self):\n url = create_url(host=\"www.example.com\", ssl=True, scheme_ssl=\"https\")\n self.assertEqual(url, \"https://www.example.com\")", "def use_https(url, timeout=60):\n \n try:\n response = requests.get(url, timeout=timeout, verify=True)\n if 'https://' in response.url: return True\n\n return False\n except:\n # Defaulting to 'False', probably the web server don't know what to do\n print('Warning: The HTTPS request to {url} failed, assuming a \\'False\\''.format(url=url))\n return False", "def verify_server_certificate(self) -> bool:\n return pulumi.get(self, \"verify_server_certificate\")", "def x_forwarded_for_client_cert_client_verify_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"x_forwarded_for_client_cert_client_verify_enabled\")", "def verify_server_certificate(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"verify_server_certificate\")", "def get_ssl_certificate():", "def ssl(self) -> Optional[pulumi.Input['SslConfigurationArgs']]:\n return pulumi.get(self, \"ssl\")", "def test_set_tlsext_use_srtp_valid(self):\n context = Context(SSLv23_METHOD)\n assert context.set_tlsext_use_srtp(b\"SRTP_AES128_CM_SHA1_80\") is None", "def _validate_ssl_context_for_tls_in_tls(ssl_context):\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )", "def __check_opts(self):\n self.ca_cert_file = os.environ['HOME'] + '/.cat_installer/ca.pem'\n self.pfx_file = os.environ['HOME'] + '/.cat_installer/user.p12'\n if not os.path.isfile(self.ca_cert_file):\n print(Messages.cert_error)\n sys.exit(2)", "def test_tls_client_minimum_1_point_3_missing(self):\n # thanks i hate it\n if hasattr(SSL, \"OP_NO_TLSv1_3\"):\n OP_NO_TLSv1_3 = SSL.OP_NO_TLSv1_3\n delattr(SSL, \"OP_NO_TLSv1_3\")\n self.addCleanup(setattr, SSL, \"SSL.OP_NO_TLSv1_3\", OP_NO_TLSv1_3)\n assert not hasattr(SSL, \"OP_NO_TLSv1_3\")\n\n config = {\"federation_client_minimum_tls_version\": 1.3}\n t = TestConfig()\n with self.assertRaises(ConfigError) as e:\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(\n e.exception.args[0],\n (\n \"federation_client_minimum_tls_version cannot be 1.3, \"\n \"your OpenSSL does not support it\"\n ),\n )", "def test_use_certificate_file_missing(self, tmpfile):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n ctx.use_certificate_file(tmpfile)", "def test_wrong_sni_hint(self) -> None:\n self.start_destination_server()\n self.start_proxy_server()\n\n sock = socket.create_connection(\n (self.proxy_server.host, self.proxy_server.port)\n )\n with self.client_context.wrap_socket(\n sock, server_hostname=\"localhost\"\n ) as proxy_sock:\n with pytest.raises(ssl.SSLCertVerificationError):\n SSLTransport(\n proxy_sock, self.client_context, server_hostname=\"veryverywrong\"\n )", "def test_alpn_call_failure(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(ValueError):\n context.set_alpn_protos([])", "def verify_server_certificate(self):\n return self._verify_server_certificate", "def _validate_cert(self):\r\n cert = self.handle.getpeercert()\r\n self.peercert = cert\r\n if 'subject' not in cert:\r\n raise TTransportException(type=TTransportException.NOT_OPEN,\r\n message='No SSL certificate found from %s:%s' % (self.host, self.port))\r\n fields = cert['subject']\r\n for field in fields:\r\n # ensure structure we get back is what we expect\r\n if not isinstance(field, tuple):\r\n continue\r\n cert_pair = field[0]\r\n if len(cert_pair) < 2:\r\n continue\r\n cert_key, cert_value = cert_pair[0:2]\r\n if cert_key != 'commonName':\r\n continue\r\n certhost = cert_value\r\n if certhost == self.host:\r\n # success, cert commonName matches desired hostname\r\n self.is_valid = True\r\n return \r\n else:\r\n raise TTransportException(type=TTransportException.UNKNOWN,\r\n message='Host name we connected to \"%s\" doesn\\'t match certificate provided commonName \"%s\"' % (self.host, certhost))\r\n raise TTransportException(type=TTransportException.UNKNOWN,\r\n message='Could not validate SSL certificate from host \"%s\". Cert=%s' % (self.host, cert))", "def set_ssl(self):\n for params in self.config.get_ssl_params():\n self.connection.transport.set_ssl(**params)", "def test_ssl_object_attributes(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n cipher = ssock.cipher()\n assert type(cipher) == tuple\n\n # No chosen protocol through ALPN or NPN.\n assert ssock.selected_alpn_protocol() is None\n assert ssock.selected_npn_protocol() is None\n\n shared_ciphers = ssock.shared_ciphers()\n # SSLContext.shared_ciphers() changed behavior completely in a patch version.\n # See: https://github.com/python/cpython/issues/96931\n assert shared_ciphers is None or (\n type(shared_ciphers) is list and len(shared_ciphers) > 0\n )\n\n assert ssock.compression() is None\n\n validate_peercert(ssock)\n\n ssock.send(sample_request())\n response = consume_socket(ssock)\n validate_response(response)", "def skip_if_no_ssl (func):\n try:\n import evy.patched.ssl\n except ImportError:\n try:\n import evy.patched.OpenSSL\n except ImportError:\n skipped(func)", "def test_no_ca_no_error(self, tmpdir):\n tmpdir.join('cert.pem').ensure()\n tmpdir.join('key.pem').ensure()\n\n out = client_kwargs_from_config(\n 'http://l cert_path=%s' % tmpdir.strpath\n )\n\n assert out['tls'].cert == (\n tmpdir.join('cert.pem').strpath,\n tmpdir.join('key.pem').strpath,\n )\n assert out['tls'].verify == None", "def filter_nossl(request):\n if request.scheme == 'http':\n return True\n else:\n return False", "def test_use_certificate_uninitialized(self, ctx_or_conn):\n with pytest.raises(Error):\n ctx_or_conn.use_certificate(X509())", "def test_strict_https_header(flask_app, app):\n app.config['STRICT_HTTPS'] = True # enable strict https\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Strict-Transport-Security') == 'max-age=31536000; includeSubDomains'\n\n app.config['STRICT_HTTPS'] = False # disable\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert not headers.get('Strict-Transport-Security')", "def test_get_cipher_version_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_version() is None", "def test_set_verify_wrong_mode_arg(self, mode):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_verify(mode=mode)", "def test_set_client_ca_list_errors(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.set_client_ca_list(\"spam\")\n with pytest.raises(TypeError):\n ctx.set_client_ca_list([\"spam\"])", "def tls_enabled(self):\n has_cert = getattr(self.args, 'ssl_certfile', None) is not None\n has_key = getattr(self.args, 'ssl_keyfile', None) is not None\n tls = getattr(self.args, 'tls', False)\n return tls or self.is_sandbox or (has_cert and has_key)", "def test_cert(self):\n\n try:\n client = SSLClient(host=FQDN, ip=APPLIANCE, usercert=CERT, sslverifyhost=True, cabundle=CABUNDLE)\n self.assertTrue(1==1, \"SSLClient connects with cabundle\")\n except Exception as exception:\n print(exception)\n self.fail(\"SSLClient did not connect\")\n \n response = client.send_command('LIST')\n self.assertEqual(response.ret, 100)\n\n client.disconnect()", "def xforwardedforclientcertclientverifyenabled(self) -> bool:\n return pulumi.get(self, \"xforwardedforclientcertclientverifyenabled\")", "def test_ssl_env( # noqa: C901 # FIXME\n thread_exceptions,\n recwarn,\n mocker,\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, tls_verify_mode, tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n tls_ca_certificate_pem_path,\n use_client_cert,\n):\n interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)\n\n with mocker.mock_module.patch(\n 'idna.core.ulabel',\n return_value=ntob('127.0.0.1'),\n ):\n client_cert = ca.issue_cert(ntou('127.0.0.1'))\n\n with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n tls_adapter.context.set_verify(\n _stdlib_to_openssl_verify[tls_verify_mode],\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n else:\n tls_adapter.context.verify_mode = tls_verify_mode\n\n ca.configure_trust(tls_adapter.context)\n tls_certificate.configure_cert(tls_adapter.context)\n\n tlswsgiserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(tlswsgiserver.bind_addr)\n\n resp = requests.get(\n 'https://' + interface + ':' + str(port) + '/env',\n timeout=http_request_timeout,\n verify=tls_ca_certificate_pem_path,\n cert=cl_pem if use_client_cert else None,\n )\n\n env = json.loads(resp.content.decode('utf-8'))\n\n # hard coded env\n assert env['wsgi.url_scheme'] == 'https'\n assert env['HTTPS'] == 'on'\n\n # ensure these are present\n for key in {'SSL_VERSION_INTERFACE', 'SSL_VERSION_LIBRARY'}:\n assert key in env\n\n # pyOpenSSL generates the env before the handshake completes\n if adapter_type == 'pyopenssl':\n return\n\n for key in {'SSL_PROTOCOL', 'SSL_CIPHER'}:\n assert key in env\n\n # client certificate env\n if tls_verify_mode == ssl.CERT_NONE or not use_client_cert:\n assert env['SSL_CLIENT_VERIFY'] == 'NONE'\n else:\n assert env['SSL_CLIENT_VERIFY'] == 'SUCCESS'\n\n with open(cl_pem, 'rt') as f:\n assert env['SSL_CLIENT_CERT'] in f.read()\n\n for key in {\n 'SSL_CLIENT_M_VERSION', 'SSL_CLIENT_M_SERIAL',\n 'SSL_CLIENT_I_DN', 'SSL_CLIENT_S_DN',\n }:\n assert key in env\n\n # builtin ssl environment generation may use a loopback socket\n # ensure no ResourceWarning was raised during the test\n if IS_PYPY:\n # NOTE: PyPy doesn't have ResourceWarning\n # Ref: https://doc.pypy.org/en/latest/cpython_differences.html\n return\n for warn in recwarn:\n if not issubclass(warn.category, ResourceWarning):\n continue\n\n # the tests can sporadically generate resource warnings\n # due to timing issues\n # all of these sporadic warnings appear to be about socket.socket\n # and have been observed to come from requests connection pool\n msg = str(warn.message)\n if 'socket.socket' in msg:\n pytest.xfail(\n '\\n'.join((\n 'Sometimes this test fails due to '\n 'a socket.socket ResourceWarning:',\n msg,\n )),\n )\n pytest.fail(msg)\n\n # to perform the ssl handshake over that loopback socket,\n # the builtin ssl environment generation uses a thread\n for _, _, trace in thread_exceptions:\n print(trace, file=sys.stderr)\n assert not thread_exceptions, ': '.join((\n thread_exceptions[0][0].__name__,\n thread_exceptions[0][1],\n ))", "def _get_verify_ssl(app_configs: dict):\n # start checking the app specific settings\n verify = app_configs.get(\"verify\")\n\n # because verify can be either a boolean or a path,\n # we need to check if it is a string with a boolean \n # value first then, and only then, we convert it to a bool\n # NOTE: that this will then only support \"true\" or \"false\"\n # (case-insensitive) rather than the normal \"true\", \"yes\", etc...\n if isinstance(verify, str) and verify.lower() in [\"false\", \"true\"]:\n verify = str_to_bool(verify)\n\n return verify", "def insecure_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"insecure_tls\")", "def insecure_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"insecure_tls\")", "def _config_tls(self):\n pass", "def test_db_ssl_enable(self):\n\n # Check default state is SSL on\n with mock.patch.dict('os.environ', REQUIRED_SETTINGS, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )\n\n # Check enabling the setting explicitly\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'True'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {}\n )\n\n # Disable it\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'False'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )", "def test_http_over_https_error(\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, ip_addr,\n tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n):\n # disable some flaky tests\n # https://github.com/cherrypy/cheroot/issues/225\n issue_225 = (\n IS_MACOS\n and adapter_type == 'builtin'\n )\n if issue_225:\n pytest.xfail('Test fails in Travis-CI')\n\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n\n tls_certificate.configure_cert(tls_adapter.context)\n\n interface, _host, port = _get_conn_data(ip_addr)\n tlshttpserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(\n tlshttpserver.bind_addr,\n )\n\n fqdn = interface\n if ip_addr is ANY_INTERFACE_IPV6:\n fqdn = '[{fqdn}]'.format(**locals())\n\n expect_fallback_response_over_plain_http = (\n (\n adapter_type == 'pyopenssl'\n )\n )\n if expect_fallback_response_over_plain_http:\n resp = requests.get(\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n assert resp.status_code == 400\n assert resp.text == (\n 'The client sent a plain HTTP request, '\n 'but this server only speaks HTTPS on this port.'\n )\n return\n\n with pytest.raises(requests.exceptions.ConnectionError) as ssl_err:\n requests.get( # FIXME: make stdlib ssl behave like PyOpenSSL\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n\n if IS_LINUX:\n expected_error_code, expected_error_text = (\n 104, 'Connection reset by peer',\n )\n if IS_MACOS:\n expected_error_code, expected_error_text = (\n 54, 'Connection reset by peer',\n )\n if IS_WINDOWS:\n expected_error_code, expected_error_text = (\n 10054,\n 'An existing connection was forcibly closed by the remote host',\n )\n\n underlying_error = ssl_err.value.args[0].args[-1]\n err_text = str(underlying_error)\n assert underlying_error.errno == expected_error_code, (\n 'The underlying error is {underlying_error!r}'.\n format(**locals())\n )\n assert expected_error_text in err_text", "def test_add_client_ca_wrong_args(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.add_client_ca(\"spam\")", "def sslCheckOriginal():\n print('[+] Populating SSL for later check')\n for url in ssl_strip_monitored_urls:\n try:\n cert = ssl.get_server_certificate((str(url), 443))\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)\n p_k = x509.get_pubkey()\n s_n = x509.get_serial_number()\n\n print('[+] Acquired Certificate: %s' % url)\n print(' |_________> serial_number %s' % s_n)\n print(' |_________> public_key %s' % p_k)\n\n check_ssl_strip_results.append(SSL_Strip_Check(url, p_k, s_n))\n\n except Exception as err:\n print('[-] Error While Acquiring certificats on setup phase !')\n traceback.print_exc()\n return time.time()", "def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2", "def certificate_check(self, certificate, valid, host):\n\n raise Passthrough", "def check_connect():\n arg_parser = resilient.ArgumentParser(resilient.get_config_file())\n host = arg_parser.getopt(\"resilient\", \"host\")\n #\n # Use Openssl first\n #\n print(\"-------------------------------------\")\n print(\"Using openssl to connect to resilient\")\n print(\"-------------------------------------\")\n command = \"openssl s_client -connect {}:443\".format(host)\n user = arg_parser.getopt(\"resilient\", \"email\")\n password = arg_parser.getopt(\"resilient\", \"password\")\n process = subprocess.Popen(\"/bin/bash\", stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n out, err = process.communicate(command)\n cafile = arg_parser.getopt(\"resilient\", \"cafile\")\n verify = True\n if cafile is not None and cafile == \"false\":\n verify = False\n print(out)\n if err is not None:\n print(err)\n\n print(\"---------------------------------------------\")\n print(\"Using python requests to connect to resilient\")\n print(\"---------------------------------------------\")\n\n rest_url = \"https://{}:443/rest/session\".format(host)\n data = '{\"email\": \"' + user + '\",\"password\":\"' + password + '\", \"interactive\": true}'\n try:\n header = {\"Content-Type\": \"application/json\"}\n resp = requests.post(rest_url,\n data=data,\n headers=header,\n verify=verify)\n print(\"\\tResponse: \" + str(resp))\n\n except Exception as e:\n print(\"\\tConnection failed!!\")\n print(\"\\t\" + str(e))", "def _is_https_enabled(dbapi):\n if dbapi is None:\n return False\n system = dbapi.isystem_get_one()\n return system.capabilities.get('https_enabled', False)", "def ssl(self, cainfo=None, verify=True, cert=None, key=None):\n if cainfo:\n self.curl.setopt(pycurl.CAINFO, cainfo)\n\n if verify == False:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n else:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 1)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)\n if cert:\n #self.curl.setopt(pycurl.SSLCERTTYPE, \"DER\")\n self.curl.setopt(pycurl.SSLCERT, cert)\n if key:\n self.curl.setopt(pycurl.SSLKEY, key)", "def test_redirect_url_https(self):\n with self.assertRaisesRegex(ValidationError, 'Enter a valid URL'):\n self.test_agreement.redirect_url = 'http://example.com'\n self.test_agreement.full_clean()", "def test_checkmarx_init_no_ssl(self, mock_url_read, mock_create_unverified_context):\n # pylint: disable=protected-access\n delattr(ssl, '_create_unverified_context')\n mock_url_read.return_value = '{\"access_token\": \"abc123\"}'\n marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec\n\n self.assertIsNotNone(marx)\n self.assertFalse(hasattr(ssl, '_create_unverified_context'))\n self.assertTrue(hasattr(ssl, '_create_default_https_context'))\n mock_create_unverified_context.assert_not_called()", "def test_ssl_verify_disabled(self, method, mock_session):\n session = mock_session.return_value.__enter__.return_value\n session.get.return_value.text = \"response text\"\n session.get.return_value.status_code = 200\n session.get.return_value.json.return_value = self.test_data\n # Handle either post or get\n session.post = session.get\n\n self.client = trovebox.Trovebox(host=self.test_host, **self.test_oauth)\n self.client.configure(ssl_verify=False)\n GetOrPost(self.client, method).call(self.test_endpoint)\n self.assertEqual(session.verify, False)", "def default_ssl_binding(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"default_ssl_binding\")", "def test_get_cert_store(self):\n context = Context(SSLv23_METHOD)\n store = context.get_cert_store()\n assert isinstance(store, X509Store)", "def EnableSSL(self):\n if self.force_auto_sync:\n self.get('EnableSSL')\n return self._EnableSSL", "def enable_ssl(self) -> str:\n return pulumi.get(self, \"enable_ssl\")", "def test_no_verify_no_ca(self, host_str_fs, tmpdir):\n tmpdir.join('cert.pem').ensure()\n tmpdir.join('key.pem').ensure()\n tmpdir.join('ca.pem').ensure()\n\n out = client_kwargs_from_config(\n host_str_fs.format(cert_path=tmpdir.strpath),\n )\n\n assert out['tls'].cert == (\n tmpdir.join('cert.pem').strpath,\n tmpdir.join('key.pem').strpath,\n )\n assert out['tls'].verify == False", "def __init__(self, url, **kwargs):\n self.hostname = self.getHostnameFromURL(url)\n\n # ``verify`` here refers to server-side verification of certificates\n # presented by a client:\n self.verify = False if self.isClient else True\n super(SSLVerifyingContextFactory, self).__init__(verify=self.verify,\n fixBrokenPeers=True,\n **kwargs)", "def test_get_verified_chain_unconnected(self):\n ctx = Context(SSLv23_METHOD)\n server = Connection(ctx, None)\n assert None is server.get_verified_chain()", "def disable_ssl(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_ssl\")", "def is_secure(self):\n return self._is_ssl or self._is_socket", "def test_get_cipher_bits_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_bits() is None", "def insecure_tls(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"insecure_tls\")", "async def test_setup_fail_on_ssl_erros(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n respx.get(\"https://localhost\").mock(side_effect=ssl.SSLError(\"ssl error\"))\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"https://localhost\",\n \"method\": \"GET\",\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0\n assert \"ssl error\" in caplog.text" ]
[ "0.7664821", "0.75893015", "0.7575917", "0.7255344", "0.71744585", "0.69131243", "0.6908525", "0.6856045", "0.68336725", "0.6823391", "0.6813892", "0.677433", "0.6762706", "0.6683818", "0.6665227", "0.6665193", "0.66511506", "0.6603701", "0.66006225", "0.6559028", "0.6538089", "0.64723766", "0.64366674", "0.6427678", "0.64240617", "0.6402983", "0.6395474", "0.6340708", "0.63374776", "0.6295901", "0.62788224", "0.6270939", "0.6262432", "0.6250303", "0.6246692", "0.62403625", "0.6207914", "0.6201827", "0.61990565", "0.61711305", "0.61507374", "0.6099554", "0.60926294", "0.6088329", "0.6083024", "0.60816467", "0.6078132", "0.60701555", "0.6066822", "0.60571945", "0.60510343", "0.60475767", "0.6028851", "0.59949756", "0.5993059", "0.5972566", "0.59633404", "0.5949225", "0.59442234", "0.594185", "0.5923599", "0.5920182", "0.5918634", "0.5910683", "0.59084564", "0.59038526", "0.5898175", "0.58844984", "0.587987", "0.5875972", "0.58711547", "0.5854534", "0.5852588", "0.5851312", "0.5851312", "0.5849709", "0.5837346", "0.58352154", "0.58327067", "0.5826816", "0.58268", "0.58232236", "0.5820536", "0.579683", "0.57902575", "0.57884777", "0.57846373", "0.5780324", "0.5774109", "0.5771016", "0.5753202", "0.5749843", "0.5746522", "0.5738948", "0.57245964", "0.571173", "0.57109994", "0.57002157", "0.56751084", "0.56743824" ]
0.85075116
0
Ensure that we verify SSL by default.
def test_ssl_default(self): e = ErrataConnector() assert e.ssl_verify
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ssl_default(self):\n assert security.security_settings.ssl_verify()", "def initialize_ssl(self):\n self.ssl_context = ssl.SSLContext()\n # if self.config.get('ca_file', None):\n # self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])\n\n # TODO : Remove this\n\n verify_ssl = self.config[\"AUTH\"][\"verify_ssl\"]\n if isinstance(verify_ssl, str):\n verify_ssl = strtobool(verify_ssl)\n\n if not verify_ssl:\n self.ssl_context.verify_mode = ssl.CERT_NONE", "def validateSSL(self):\n return self.__validate_ssl", "def enable_ssl_verification(self) -> bool:\n return pulumi.get(self, \"enable_ssl_verification\")", "def require_ssl(self) -> bool:\n return pulumi.get(self, \"require_ssl\")", "def test_fallback_default_verify_paths(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n monkeypatch.setattr(\n _lib, \"SSL_CTX_set_default_verify_paths\", lambda x: 1\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_FILE\",\n _ffi.string(_lib.X509_get_default_cert_file()),\n )\n monkeypatch.setattr(\n SSL,\n \"_CRYPTOGRAPHY_MANYLINUX_CA_DIR\",\n _ffi.string(_lib.X509_get_default_cert_dir()),\n )\n context.set_default_verify_paths()\n store = context.get_cert_store()\n sk_obj = _lib.X509_STORE_get0_objects(store._store)\n assert sk_obj != _ffi.NULL\n num = _lib.sk_X509_OBJECT_num(sk_obj)\n assert num != 0", "def ssl_required(self):\n return self._ssl_required", "def DisableSSLVerify():\n\n\t\ttry:\n\t\t\trequests.packages.urllib3.disable_warnings()\n\t\texcept:\n\t\t\tpass", "def use_skip_ssl_verify(self, val=True, force=False):\n if val:\n self.ssl_verify = False\n else:\n self.ssl_verify = True\n\n if force:\n self.force_skip_ssl_verify = True\n else:\n self.force_skip_ssl_verify = False\n\n return val", "def require_ssl(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"require_ssl\")", "def force_ssl_sync():\n return", "def test_set_default_verify_paths(self):\n # Testing this requires a server with a certificate signed by one\n # of the CAs in the platform CA location. Getting one of those\n # costs money. Fortunately (or unfortunately, depending on your\n # perspective), it's easy to think of a public server on the\n # internet which has such a certificate. Connecting to the network\n # in a unit test is bad, but it's the only way I can think of to\n # really test this. -exarkun\n context = Context(SSLv23_METHOD)\n context.set_default_verify_paths()\n context.set_verify(\n VERIFY_PEER,\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n\n client = socket_any_family()\n client.connect((\"encrypted.google.com\", 443))\n clientSSL = Connection(context, client)\n clientSSL.set_connect_state()\n clientSSL.set_tlsext_host_name(b\"encrypted.google.com\")\n clientSSL.do_handshake()\n clientSSL.send(b\"GET / HTTP/1.0\\r\\n\\r\\n\")\n assert clientSSL.recv(1024)", "def ssl_required(self, ssl_required):\n\n self._ssl_required = ssl_required", "def handle_ssl_established(self):\n\t\t\tself.log_info('unhandled handle_ssl_established event', 'warning')", "def ssl(self) -> Optional[bool]:\n return pulumi.get(self, \"ssl\")", "def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n monkeypatch.setattr(\n _lib, \"SSL_CTX_set_default_verify_paths\", lambda x: 1\n )\n dir_env_var = _ffi.string(_lib.X509_get_default_cert_dir_env()).decode(\n \"ascii\"\n )\n file_env_var = _ffi.string(\n _lib.X509_get_default_cert_file_env()\n ).decode(\"ascii\")\n monkeypatch.setenv(dir_env_var, \"value\")\n monkeypatch.setenv(file_env_var, \"value\")\n context.set_default_verify_paths()\n\n monkeypatch.setattr(\n context, \"_fallback_default_verify_paths\", raiser(SystemError)\n )\n context.set_default_verify_paths()", "def _verification_needed(cacert, insecure):\n if insecure is False or insecure is None:\n verify = cacert or True\n else:\n verify = False\n return verify", "def set_ssl_context(self, ssl_verify, ssl_cafile):\n if not ssl_verify:\n self.ssl_context = ssl.create_default_context()\n self.ssl_context.check_hostname = False\n self.ssl_context.verify_mode = ssl.CERT_NONE\n elif ssl_cafile:\n self.ssl_context = ssl.create_default_context(cafile=ssl_cafile)\n else:\n self.ssl_context = ssl.create_default_context()", "def ssl(self):\n return self.protocol != \"SASL_PLAINTEXT\"", "def test_set_verify_mode(self):\n context = Context(SSLv23_METHOD)\n assert context.get_verify_mode() == 0\n context.set_verify(VERIFY_PEER | VERIFY_CLIENT_ONCE)\n assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)", "def patch_twisted_ssl_root_bug():\n import twisted.internet._sslverify as mod\n mod.platformTrust = patched_platform_trust", "def test_set_verify_default_callback(self, mode):\n serverContext = Context(TLSv1_2_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n\n clientContext = Context(TLSv1_2_METHOD)\n clientContext.set_verify(mode, None)\n\n if mode == SSL.VERIFY_PEER:\n with pytest.raises(Exception) as exc:\n self._handshake_test(serverContext, clientContext)\n assert \"certificate verify failed\" in str(exc.value)\n else:\n self._handshake_test(serverContext, clientContext)", "def verify_SSL_certificate(self, code: str) -> bool:\n return True", "def filter_ssl(request):\n if request.scheme == 'https':\n return True\n else:\n return False", "def _enforce_https(request):\n # type: (PipelineRequest) -> None\n\n # move 'enforce_https' from options to context so it persists\n # across retries but isn't passed to a transport implementation\n option = request.context.options.pop(\"enforce_https\", None)\n\n # True is the default setting; we needn't preserve an explicit opt in to the default behavior\n if option is False:\n request.context[\"enforce_https\"] = option\n\n enforce_https = request.context.get(\"enforce_https\", True)\n if enforce_https and not request.http_request.url.lower().startswith(\"https\"):\n raise ServiceRequestError(\n \"Bearer token authentication is not permitted for non-TLS protected (non-https) URLs.\"\n )", "def is_ssl(self):\n return self._is_ssl", "def insecure_ssl(self):\n # type: () -> bool\n return self._insecure_ssl", "def _check_ca_certificate(self):\n if not os.path.exists(self._ca_certificate_path):\n with open(self._ca_certificate_path, \"w\") as f:\n f.write(ssl.get_server_certificate((\"127.0.0.1\", self._app_port), ssl_version=ssl.PROTOCOL_TLSv1_2))", "def test_get_certificate_none(self):\n context = Context(SSLv23_METHOD)\n client = Connection(context, None)\n cert = client.get_certificate()\n assert cert is None", "def fill_disable_ssl_verification(self, data):\n disable_ssl_verification = get_optional_value(data, self.DISABLE_SSL, False)\n self.verify_ssl = not bool(disable_ssl_verification)", "def ssl_verification(verify: bool = True) -> Generator[None, None, None]:\n\n old_request = requests.Session.request\n requests.Session.request = partialmethod(old_request, verify=verify) # type: ignore\n\n warnings.filterwarnings(\"ignore\", \"Unverified HTTPS request\")\n yield\n warnings.resetwarnings()\n\n requests.Session.request = old_request # type: ignore", "def test_no_url_or_appid_passed_in_and_is_ssl(self):\n gae_req = AppEngineRequest(use_ssl=True)\n\n url = gae_req.build_url()\n\n self.assertEqual(url, \"https://localhost/\")", "def check_secure():\n return get_config_handler().check_secure()", "def _default_ssl_context() -> ssl.SSLContext:\n ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)\n ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2\n ssl_context.verify_mode = ssl.CERT_REQUIRED\n ssl_context.check_hostname = True\n ssl_context.load_default_certs()\n return ssl_context", "def checkOpenSSL(self):\n \n issues = {'issueName': 'OpenSSL', 'Present': False, 'Heartbleed': False, 'Outdated': False, 'Version': ''}\n\n if self.openssl == '': \n return issues\n \n issues['Present'] = True\n\n relativePath = 'analysis_result/' + self.firmwareFolder + '/' + self.openssl\n try:\n keywordsArray = subprocess.check_output('strings ' + relativePath + '| grep -P \\\"^OpenSSL ([0-9])+\\.([0-9])+\\.([a-z0-9])+\\\"',\\\n shell=True).split('\\n')\n except subprocess.CalledProcessError as e:\n issues['Version'] = 'Cannot be detected'\n return issues\n\n keywordsArray = filter(None, keywordsArray) # Filter out empty string since subprocess.check_output throws out extra newline\n versionNum = keywordsArray[0].split(' ')[1]\n issues['Version'] = versionNum\n\n # Check if the version of OpenSSL is vulnerable to the heartbleed attack\n if '1.0.1' in versionNum:\n issues['Heartbleed'] = True\n \n # Check if the version of OpenSSL is outdated (no longer supported)\n with open('analysis/data/OpenSSLOutdatedVersions', 'r') as outdatedVersions:\n for line in outdatedVersions:\n if versionNum[:-1] in line:\n issues['Outdated'] = True\n break\n\n # print(\"OpenSSL: {}\".format(issues))\n\n return issues", "def test_set_verify_wrong_callable_arg(self, callback):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_verify(mode=VERIFY_PEER, callback=callback)", "def test_x509_in_verify_works(self):\n serverContext = Context(SSLv23_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n serverConnection = Connection(serverContext, None)\n\n def verify_cb_get_subject(conn, cert, errnum, depth, ok):\n assert cert.get_subject()\n return 1\n\n clientContext = Context(SSLv23_METHOD)\n clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)\n clientConnection = Connection(clientContext, None)\n clientConnection.set_connect_state()\n\n handshake_in_memory(clientConnection, serverConnection)", "def can_https(tls_ver):\n output = True\n\n # check python version\n if sys.version_info < (3, 6): #modify from 3, 7 to 3, 6\n _LOGGER.error(\"PyISY cannot use HTTPS: Invalid Python version. See docs.\")\n output = False\n\n # check that Python was compiled against correct OpenSSL lib\n if \"PROTOCOL_TLSv1_1\" not in dir(ssl):\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Compiled against old OpenSSL \"\n + \"library. See docs.\"\n )\n output = False\n\n # check the requested TLS version\n if tls_ver not in [1.1, 1.2]:\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Only TLS 1.1 and 1.2 are supported \"\n + \"by the ISY controller.\"\n )\n output = False\n\n return output", "def test_non_ssl_ports_after_enabling_tls(self):\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = True\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n try:\n rest._http_request(api=api, timeout=10)\n except Exception as _:\n ssl_request = self.sample_urls_map[non_ssl_request]\n api = ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} failed\".format(api))\n else:\n self.log.error(\"{0} worked\".format(api))\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} api failed with content {1}\".format(api, content))", "def create_no_verify_ssl_context() -> ssl.SSLContext:\n sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n sslcontext.check_hostname = False\n sslcontext.verify_mode = ssl.CERT_NONE\n # Allow all ciphers rather than only Python 3.10 default\n sslcontext.set_ciphers(\"DEFAULT\")\n with contextlib.suppress(AttributeError):\n # This only works for OpenSSL >= 1.0.0\n sslcontext.options |= ssl.OP_NO_COMPRESSION\n sslcontext.set_default_verify_paths()\n # ssl.OP_LEGACY_SERVER_CONNECT is only available in Python 3.12a4+\n sslcontext.options |= getattr(ssl, \"OP_LEGACY_SERVER_CONNECT\", 0x4)\n return sslcontext", "def get_ssl_certificate() :", "def MonkeyPatchHttplib(ca_cert):\n global _old_https\n global _ca_certs_file\n if ca_cert is None:\n if _old_https is not None:\n httplib.HTTPS = _old_https\n else:\n if _old_https is None:\n _old_https = httplib.HTTPS\n httplib.HTTPS = _SslAwareHttps\n _ca_certs_file = ca_cert", "def test_host_ssl(self):\n url = create_url(host=\"www.example.com\", ssl=True, scheme_ssl=\"https\")\n self.assertEqual(url, \"https://www.example.com\")", "def use_https(url, timeout=60):\n \n try:\n response = requests.get(url, timeout=timeout, verify=True)\n if 'https://' in response.url: return True\n\n return False\n except:\n # Defaulting to 'False', probably the web server don't know what to do\n print('Warning: The HTTPS request to {url} failed, assuming a \\'False\\''.format(url=url))\n return False", "def verify_server_certificate(self) -> bool:\n return pulumi.get(self, \"verify_server_certificate\")", "def x_forwarded_for_client_cert_client_verify_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"x_forwarded_for_client_cert_client_verify_enabled\")", "def verify_server_certificate(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"verify_server_certificate\")", "def get_ssl_certificate():", "def ssl(self) -> Optional[pulumi.Input['SslConfigurationArgs']]:\n return pulumi.get(self, \"ssl\")", "def test_set_tlsext_use_srtp_valid(self):\n context = Context(SSLv23_METHOD)\n assert context.set_tlsext_use_srtp(b\"SRTP_AES128_CM_SHA1_80\") is None", "def _validate_ssl_context_for_tls_in_tls(ssl_context):\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )", "def __check_opts(self):\n self.ca_cert_file = os.environ['HOME'] + '/.cat_installer/ca.pem'\n self.pfx_file = os.environ['HOME'] + '/.cat_installer/user.p12'\n if not os.path.isfile(self.ca_cert_file):\n print(Messages.cert_error)\n sys.exit(2)", "def test_tls_client_minimum_1_point_3_missing(self):\n # thanks i hate it\n if hasattr(SSL, \"OP_NO_TLSv1_3\"):\n OP_NO_TLSv1_3 = SSL.OP_NO_TLSv1_3\n delattr(SSL, \"OP_NO_TLSv1_3\")\n self.addCleanup(setattr, SSL, \"SSL.OP_NO_TLSv1_3\", OP_NO_TLSv1_3)\n assert not hasattr(SSL, \"OP_NO_TLSv1_3\")\n\n config = {\"federation_client_minimum_tls_version\": 1.3}\n t = TestConfig()\n with self.assertRaises(ConfigError) as e:\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(\n e.exception.args[0],\n (\n \"federation_client_minimum_tls_version cannot be 1.3, \"\n \"your OpenSSL does not support it\"\n ),\n )", "def test_use_certificate_file_missing(self, tmpfile):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n ctx.use_certificate_file(tmpfile)", "def test_wrong_sni_hint(self) -> None:\n self.start_destination_server()\n self.start_proxy_server()\n\n sock = socket.create_connection(\n (self.proxy_server.host, self.proxy_server.port)\n )\n with self.client_context.wrap_socket(\n sock, server_hostname=\"localhost\"\n ) as proxy_sock:\n with pytest.raises(ssl.SSLCertVerificationError):\n SSLTransport(\n proxy_sock, self.client_context, server_hostname=\"veryverywrong\"\n )", "def test_alpn_call_failure(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(ValueError):\n context.set_alpn_protos([])", "def verify_server_certificate(self):\n return self._verify_server_certificate", "def _validate_cert(self):\r\n cert = self.handle.getpeercert()\r\n self.peercert = cert\r\n if 'subject' not in cert:\r\n raise TTransportException(type=TTransportException.NOT_OPEN,\r\n message='No SSL certificate found from %s:%s' % (self.host, self.port))\r\n fields = cert['subject']\r\n for field in fields:\r\n # ensure structure we get back is what we expect\r\n if not isinstance(field, tuple):\r\n continue\r\n cert_pair = field[0]\r\n if len(cert_pair) < 2:\r\n continue\r\n cert_key, cert_value = cert_pair[0:2]\r\n if cert_key != 'commonName':\r\n continue\r\n certhost = cert_value\r\n if certhost == self.host:\r\n # success, cert commonName matches desired hostname\r\n self.is_valid = True\r\n return \r\n else:\r\n raise TTransportException(type=TTransportException.UNKNOWN,\r\n message='Host name we connected to \"%s\" doesn\\'t match certificate provided commonName \"%s\"' % (self.host, certhost))\r\n raise TTransportException(type=TTransportException.UNKNOWN,\r\n message='Could not validate SSL certificate from host \"%s\". Cert=%s' % (self.host, cert))", "def set_ssl(self):\n for params in self.config.get_ssl_params():\n self.connection.transport.set_ssl(**params)", "def test_ssl_object_attributes(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n cipher = ssock.cipher()\n assert type(cipher) == tuple\n\n # No chosen protocol through ALPN or NPN.\n assert ssock.selected_alpn_protocol() is None\n assert ssock.selected_npn_protocol() is None\n\n shared_ciphers = ssock.shared_ciphers()\n # SSLContext.shared_ciphers() changed behavior completely in a patch version.\n # See: https://github.com/python/cpython/issues/96931\n assert shared_ciphers is None or (\n type(shared_ciphers) is list and len(shared_ciphers) > 0\n )\n\n assert ssock.compression() is None\n\n validate_peercert(ssock)\n\n ssock.send(sample_request())\n response = consume_socket(ssock)\n validate_response(response)", "def skip_if_no_ssl (func):\n try:\n import evy.patched.ssl\n except ImportError:\n try:\n import evy.patched.OpenSSL\n except ImportError:\n skipped(func)", "def test_no_ca_no_error(self, tmpdir):\n tmpdir.join('cert.pem').ensure()\n tmpdir.join('key.pem').ensure()\n\n out = client_kwargs_from_config(\n 'http://l cert_path=%s' % tmpdir.strpath\n )\n\n assert out['tls'].cert == (\n tmpdir.join('cert.pem').strpath,\n tmpdir.join('key.pem').strpath,\n )\n assert out['tls'].verify == None", "def filter_nossl(request):\n if request.scheme == 'http':\n return True\n else:\n return False", "def test_use_certificate_uninitialized(self, ctx_or_conn):\n with pytest.raises(Error):\n ctx_or_conn.use_certificate(X509())", "def test_strict_https_header(flask_app, app):\n app.config['STRICT_HTTPS'] = True # enable strict https\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Strict-Transport-Security') == 'max-age=31536000; includeSubDomains'\n\n app.config['STRICT_HTTPS'] = False # disable\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert not headers.get('Strict-Transport-Security')", "def test_get_cipher_version_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_version() is None", "def test_set_verify_wrong_mode_arg(self, mode):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_verify(mode=mode)", "def test_set_client_ca_list_errors(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.set_client_ca_list(\"spam\")\n with pytest.raises(TypeError):\n ctx.set_client_ca_list([\"spam\"])", "def tls_enabled(self):\n has_cert = getattr(self.args, 'ssl_certfile', None) is not None\n has_key = getattr(self.args, 'ssl_keyfile', None) is not None\n tls = getattr(self.args, 'tls', False)\n return tls or self.is_sandbox or (has_cert and has_key)", "def test_cert(self):\n\n try:\n client = SSLClient(host=FQDN, ip=APPLIANCE, usercert=CERT, sslverifyhost=True, cabundle=CABUNDLE)\n self.assertTrue(1==1, \"SSLClient connects with cabundle\")\n except Exception as exception:\n print(exception)\n self.fail(\"SSLClient did not connect\")\n \n response = client.send_command('LIST')\n self.assertEqual(response.ret, 100)\n\n client.disconnect()", "def xforwardedforclientcertclientverifyenabled(self) -> bool:\n return pulumi.get(self, \"xforwardedforclientcertclientverifyenabled\")", "def test_ssl_env( # noqa: C901 # FIXME\n thread_exceptions,\n recwarn,\n mocker,\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, tls_verify_mode, tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n tls_ca_certificate_pem_path,\n use_client_cert,\n):\n interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)\n\n with mocker.mock_module.patch(\n 'idna.core.ulabel',\n return_value=ntob('127.0.0.1'),\n ):\n client_cert = ca.issue_cert(ntou('127.0.0.1'))\n\n with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n tls_adapter.context.set_verify(\n _stdlib_to_openssl_verify[tls_verify_mode],\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n else:\n tls_adapter.context.verify_mode = tls_verify_mode\n\n ca.configure_trust(tls_adapter.context)\n tls_certificate.configure_cert(tls_adapter.context)\n\n tlswsgiserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(tlswsgiserver.bind_addr)\n\n resp = requests.get(\n 'https://' + interface + ':' + str(port) + '/env',\n timeout=http_request_timeout,\n verify=tls_ca_certificate_pem_path,\n cert=cl_pem if use_client_cert else None,\n )\n\n env = json.loads(resp.content.decode('utf-8'))\n\n # hard coded env\n assert env['wsgi.url_scheme'] == 'https'\n assert env['HTTPS'] == 'on'\n\n # ensure these are present\n for key in {'SSL_VERSION_INTERFACE', 'SSL_VERSION_LIBRARY'}:\n assert key in env\n\n # pyOpenSSL generates the env before the handshake completes\n if adapter_type == 'pyopenssl':\n return\n\n for key in {'SSL_PROTOCOL', 'SSL_CIPHER'}:\n assert key in env\n\n # client certificate env\n if tls_verify_mode == ssl.CERT_NONE or not use_client_cert:\n assert env['SSL_CLIENT_VERIFY'] == 'NONE'\n else:\n assert env['SSL_CLIENT_VERIFY'] == 'SUCCESS'\n\n with open(cl_pem, 'rt') as f:\n assert env['SSL_CLIENT_CERT'] in f.read()\n\n for key in {\n 'SSL_CLIENT_M_VERSION', 'SSL_CLIENT_M_SERIAL',\n 'SSL_CLIENT_I_DN', 'SSL_CLIENT_S_DN',\n }:\n assert key in env\n\n # builtin ssl environment generation may use a loopback socket\n # ensure no ResourceWarning was raised during the test\n if IS_PYPY:\n # NOTE: PyPy doesn't have ResourceWarning\n # Ref: https://doc.pypy.org/en/latest/cpython_differences.html\n return\n for warn in recwarn:\n if not issubclass(warn.category, ResourceWarning):\n continue\n\n # the tests can sporadically generate resource warnings\n # due to timing issues\n # all of these sporadic warnings appear to be about socket.socket\n # and have been observed to come from requests connection pool\n msg = str(warn.message)\n if 'socket.socket' in msg:\n pytest.xfail(\n '\\n'.join((\n 'Sometimes this test fails due to '\n 'a socket.socket ResourceWarning:',\n msg,\n )),\n )\n pytest.fail(msg)\n\n # to perform the ssl handshake over that loopback socket,\n # the builtin ssl environment generation uses a thread\n for _, _, trace in thread_exceptions:\n print(trace, file=sys.stderr)\n assert not thread_exceptions, ': '.join((\n thread_exceptions[0][0].__name__,\n thread_exceptions[0][1],\n ))", "def _get_verify_ssl(app_configs: dict):\n # start checking the app specific settings\n verify = app_configs.get(\"verify\")\n\n # because verify can be either a boolean or a path,\n # we need to check if it is a string with a boolean \n # value first then, and only then, we convert it to a bool\n # NOTE: that this will then only support \"true\" or \"false\"\n # (case-insensitive) rather than the normal \"true\", \"yes\", etc...\n if isinstance(verify, str) and verify.lower() in [\"false\", \"true\"]:\n verify = str_to_bool(verify)\n\n return verify", "def insecure_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"insecure_tls\")", "def insecure_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"insecure_tls\")", "def _config_tls(self):\n pass", "def test_db_ssl_enable(self):\n\n # Check default state is SSL on\n with mock.patch.dict('os.environ', REQUIRED_SETTINGS, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )\n\n # Check enabling the setting explicitly\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'True'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {}\n )\n\n # Disable it\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'False'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )", "def test_http_over_https_error(\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, ip_addr,\n tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n):\n # disable some flaky tests\n # https://github.com/cherrypy/cheroot/issues/225\n issue_225 = (\n IS_MACOS\n and adapter_type == 'builtin'\n )\n if issue_225:\n pytest.xfail('Test fails in Travis-CI')\n\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n\n tls_certificate.configure_cert(tls_adapter.context)\n\n interface, _host, port = _get_conn_data(ip_addr)\n tlshttpserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(\n tlshttpserver.bind_addr,\n )\n\n fqdn = interface\n if ip_addr is ANY_INTERFACE_IPV6:\n fqdn = '[{fqdn}]'.format(**locals())\n\n expect_fallback_response_over_plain_http = (\n (\n adapter_type == 'pyopenssl'\n )\n )\n if expect_fallback_response_over_plain_http:\n resp = requests.get(\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n assert resp.status_code == 400\n assert resp.text == (\n 'The client sent a plain HTTP request, '\n 'but this server only speaks HTTPS on this port.'\n )\n return\n\n with pytest.raises(requests.exceptions.ConnectionError) as ssl_err:\n requests.get( # FIXME: make stdlib ssl behave like PyOpenSSL\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n\n if IS_LINUX:\n expected_error_code, expected_error_text = (\n 104, 'Connection reset by peer',\n )\n if IS_MACOS:\n expected_error_code, expected_error_text = (\n 54, 'Connection reset by peer',\n )\n if IS_WINDOWS:\n expected_error_code, expected_error_text = (\n 10054,\n 'An existing connection was forcibly closed by the remote host',\n )\n\n underlying_error = ssl_err.value.args[0].args[-1]\n err_text = str(underlying_error)\n assert underlying_error.errno == expected_error_code, (\n 'The underlying error is {underlying_error!r}'.\n format(**locals())\n )\n assert expected_error_text in err_text", "def test_add_client_ca_wrong_args(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.add_client_ca(\"spam\")", "def sslCheckOriginal():\n print('[+] Populating SSL for later check')\n for url in ssl_strip_monitored_urls:\n try:\n cert = ssl.get_server_certificate((str(url), 443))\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)\n p_k = x509.get_pubkey()\n s_n = x509.get_serial_number()\n\n print('[+] Acquired Certificate: %s' % url)\n print(' |_________> serial_number %s' % s_n)\n print(' |_________> public_key %s' % p_k)\n\n check_ssl_strip_results.append(SSL_Strip_Check(url, p_k, s_n))\n\n except Exception as err:\n print('[-] Error While Acquiring certificats on setup phase !')\n traceback.print_exc()\n return time.time()", "def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2", "def certificate_check(self, certificate, valid, host):\n\n raise Passthrough", "def check_connect():\n arg_parser = resilient.ArgumentParser(resilient.get_config_file())\n host = arg_parser.getopt(\"resilient\", \"host\")\n #\n # Use Openssl first\n #\n print(\"-------------------------------------\")\n print(\"Using openssl to connect to resilient\")\n print(\"-------------------------------------\")\n command = \"openssl s_client -connect {}:443\".format(host)\n user = arg_parser.getopt(\"resilient\", \"email\")\n password = arg_parser.getopt(\"resilient\", \"password\")\n process = subprocess.Popen(\"/bin/bash\", stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n out, err = process.communicate(command)\n cafile = arg_parser.getopt(\"resilient\", \"cafile\")\n verify = True\n if cafile is not None and cafile == \"false\":\n verify = False\n print(out)\n if err is not None:\n print(err)\n\n print(\"---------------------------------------------\")\n print(\"Using python requests to connect to resilient\")\n print(\"---------------------------------------------\")\n\n rest_url = \"https://{}:443/rest/session\".format(host)\n data = '{\"email\": \"' + user + '\",\"password\":\"' + password + '\", \"interactive\": true}'\n try:\n header = {\"Content-Type\": \"application/json\"}\n resp = requests.post(rest_url,\n data=data,\n headers=header,\n verify=verify)\n print(\"\\tResponse: \" + str(resp))\n\n except Exception as e:\n print(\"\\tConnection failed!!\")\n print(\"\\t\" + str(e))", "def _is_https_enabled(dbapi):\n if dbapi is None:\n return False\n system = dbapi.isystem_get_one()\n return system.capabilities.get('https_enabled', False)", "def ssl(self, cainfo=None, verify=True, cert=None, key=None):\n if cainfo:\n self.curl.setopt(pycurl.CAINFO, cainfo)\n\n if verify == False:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n else:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 1)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)\n if cert:\n #self.curl.setopt(pycurl.SSLCERTTYPE, \"DER\")\n self.curl.setopt(pycurl.SSLCERT, cert)\n if key:\n self.curl.setopt(pycurl.SSLKEY, key)", "def test_redirect_url_https(self):\n with self.assertRaisesRegex(ValidationError, 'Enter a valid URL'):\n self.test_agreement.redirect_url = 'http://example.com'\n self.test_agreement.full_clean()", "def test_checkmarx_init_no_ssl(self, mock_url_read, mock_create_unverified_context):\n # pylint: disable=protected-access\n delattr(ssl, '_create_unverified_context')\n mock_url_read.return_value = '{\"access_token\": \"abc123\"}'\n marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec\n\n self.assertIsNotNone(marx)\n self.assertFalse(hasattr(ssl, '_create_unverified_context'))\n self.assertTrue(hasattr(ssl, '_create_default_https_context'))\n mock_create_unverified_context.assert_not_called()", "def test_ssl_verify_disabled(self, method, mock_session):\n session = mock_session.return_value.__enter__.return_value\n session.get.return_value.text = \"response text\"\n session.get.return_value.status_code = 200\n session.get.return_value.json.return_value = self.test_data\n # Handle either post or get\n session.post = session.get\n\n self.client = trovebox.Trovebox(host=self.test_host, **self.test_oauth)\n self.client.configure(ssl_verify=False)\n GetOrPost(self.client, method).call(self.test_endpoint)\n self.assertEqual(session.verify, False)", "def default_ssl_binding(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"default_ssl_binding\")", "def test_get_cert_store(self):\n context = Context(SSLv23_METHOD)\n store = context.get_cert_store()\n assert isinstance(store, X509Store)", "def EnableSSL(self):\n if self.force_auto_sync:\n self.get('EnableSSL')\n return self._EnableSSL", "def enable_ssl(self) -> str:\n return pulumi.get(self, \"enable_ssl\")", "def test_no_verify_no_ca(self, host_str_fs, tmpdir):\n tmpdir.join('cert.pem').ensure()\n tmpdir.join('key.pem').ensure()\n tmpdir.join('ca.pem').ensure()\n\n out = client_kwargs_from_config(\n host_str_fs.format(cert_path=tmpdir.strpath),\n )\n\n assert out['tls'].cert == (\n tmpdir.join('cert.pem').strpath,\n tmpdir.join('key.pem').strpath,\n )\n assert out['tls'].verify == False", "def __init__(self, url, **kwargs):\n self.hostname = self.getHostnameFromURL(url)\n\n # ``verify`` here refers to server-side verification of certificates\n # presented by a client:\n self.verify = False if self.isClient else True\n super(SSLVerifyingContextFactory, self).__init__(verify=self.verify,\n fixBrokenPeers=True,\n **kwargs)", "def test_get_verified_chain_unconnected(self):\n ctx = Context(SSLv23_METHOD)\n server = Connection(ctx, None)\n assert None is server.get_verified_chain()", "def is_secure(self):\n return self._is_ssl or self._is_socket", "def disable_ssl(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_ssl\")", "def test_get_cipher_bits_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_bits() is None", "def insecure_tls(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"insecure_tls\")", "async def test_setup_fail_on_ssl_erros(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n respx.get(\"https://localhost\").mock(side_effect=ssl.SSLError(\"ssl error\"))\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"https://localhost\",\n \"method\": \"GET\",\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0\n assert \"ssl error\" in caplog.text" ]
[ "0.8508023", "0.7664771", "0.75758237", "0.72551364", "0.7174544", "0.6913616", "0.6908566", "0.68556964", "0.6832757", "0.68229824", "0.68136877", "0.67751247", "0.67623496", "0.66840416", "0.66653126", "0.6665054", "0.6652056", "0.660391", "0.66015786", "0.655874", "0.6539284", "0.6472644", "0.6437997", "0.6427231", "0.64225096", "0.6403096", "0.63957435", "0.6342821", "0.6339024", "0.6296389", "0.62785", "0.62711024", "0.6263821", "0.6250067", "0.6248175", "0.624079", "0.6209341", "0.62030053", "0.62010384", "0.6171452", "0.6151818", "0.610083", "0.6093481", "0.60883164", "0.6084119", "0.60823673", "0.6078927", "0.6071211", "0.6066832", "0.6056733", "0.6052711", "0.6049444", "0.60299236", "0.5995544", "0.599463", "0.59733176", "0.5964383", "0.5950783", "0.5944278", "0.5943115", "0.5924011", "0.592143", "0.5918217", "0.5911969", "0.5908792", "0.5904931", "0.58978873", "0.58861184", "0.5880589", "0.58782196", "0.58720684", "0.58555776", "0.5853343", "0.5851676", "0.5851676", "0.5851246", "0.5838452", "0.58362156", "0.58344096", "0.58279544", "0.58276254", "0.5824367", "0.5822021", "0.57978076", "0.579092", "0.57884943", "0.57852393", "0.57816774", "0.57739776", "0.5771594", "0.5752952", "0.5749581", "0.5747655", "0.5738407", "0.57260287", "0.57116175", "0.57110363", "0.57024074", "0.567585", "0.56750387" ]
0.7590866
2
Transform a two dimmentional numpy array to a myqlm Matrix.
def array_to_matrix(array): assert len(array.shape) == 2, "The array must be two dimmentional" data = [] for arr in array: for elem in arr: data.append(ComplexNumber(np.real(elem), np.imag(elem))) matri = Matrix(array.shape[0], array.shape[1], data) return matri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_matrix(self) -> types.Matrix:", "def to_matrix(array):\n return Matrix(array.tolist())", "def _to_matrix(self, y):\n if hasattr(y, \"shape\"):\n if len(y.shape) == 1:\n if isinstance(y, (pd.Series, pd.DataFrame)):\n y = y.to_numpy()\n y = y.reshape([-1, 1])\n else:\n y = np.array(y).reshape([-1, 1])\n\n return y", "def CreateMatrix(self) -> BaseMatrix:", "def CreateMatrix(self) -> BaseMatrix:", "def wrapDBMatrix(self,mat):\n return mat.todense()", "def to_matrix(self, normalize: bool = True) -> jnp.ndarray:\n return NotImplemented # pragma: no cover", "def to_mat(self) -> np.matrix:\n raise NotImplementedError", "def to_matrix(self, rows=1, cropping=True, outer_offset=None, approximation_precision=0):\n import numpy as np\n return np.array(self.value)", "def array_to_matrix(y, ix, s, nodata=None):\n if nodata is None:\n x = np.ones(s)*np.NaN\n else:\n x = np.ones(s)*nodata\n x[ix] = y\n\n return x", "def qubit_to_matrix(qubit, format='sympy'):\n return represent(qubit, format=format)", "def n2m(a):\n if not isinstance(a, np.ndarray): a = np.array(a)\n return multiprocessing.Array(a.dtype.char, a.flat, lock=False), tuple(a.shape), a.dtype.char, isinstance(a, np.matrix)", "def to_matrix(self):\n return self.to_operator().data", "def serialize_Q(Q: np.ndarray):\n ret = QMatrix()\n ret.q_matrix = [QMatrixRow() for i in range(64)]\n for i in range(64):\n row = []\n for j in range(9):\n row.append(Q.q_matrix[i][j])\n ret.q_matrix[i].q_matrix_row = row\n return ret", "def make_design_matrix(array):\n return sm.add_constant(make_col_vector(array), prepend=False)", "def to_matrix(self):\n return numpy.array([[1, 0],\n [0, 1]], dtype=complex)", "def _numpy_2_native_matrix(numpy_mat):\n\n # Create native matrix object\n packed_vec = _pack_numpy_matrix(numpy_mat)\n return rqrmilib.create_matrix(packed_vec)", "def array2mat(tab):\n dim = list(tab.shape)\n return Mat(dim, lambda i,j : tab[i,j])", "def Q2Mat(q0,q1,q2,q3):\n m=np.matrix([[1-2*q2**2-2*q3**2,2*q1*q2+2*q0*q3,2*q1*q3-2*q0*q2],\n [2*q1*q2-2*q0*q3,1-2*q1**2-2*q3**2,2*q2*q3+2*q0*q1],\n [2*q1*q3+2*q0*q2,2*q2*q3-2*q0*q1,1-2*q1**2-2*q2**2]])\n return m", "def const_to_matrix(self, value, convert_scalars=False):\n # Lists and 1D arrays become column vectors.\n if isinstance(value, list) or \\\n isinstance(value, np.ndarray) and value.ndim == 1:\n value = np.asmatrix(value, dtype='float64').T\n # First convert sparse to dense.\n elif sp.issparse(value):\n value = value.todense()\n return np.asmatrix(value, dtype='float64')", "def set2mat(A):\n from numpy import put, ones, ravel, shape, newaxis, array, asarray, max, int32\n\n if len(A) == 2: \n x, v = A\n v = asarray(v)\n elif len(A) == 1:\n x = A[0]\n v = ones((len(x),), '1')\n else:\n raise TypeError, 'Argument must be a tuple of length 1 or 2'\n if len(x) == 0: return array([0]).astype(v.dtype)\n if len(x.shape) == 1: x = x[newaxis,:]\n dh,dw = abs(x).max(0)\n h,w = (2*dh)+1, (2*dw)+1 \n M=ones((h,w),int32) * limits(v)[0]\n offset = x[:,0] * w + x[:,1] + (dh*w + dw)\n put(M,offset,v)\n return M.astype(v.dtype)", "def get_matrix(adp):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n return adp", "def asMatrix(self):\n output = np.zeros((self.size[0],self.size[1]))\n for pos in self.matrixDict:\n output[pos[0]][pos[1]] = self.matrixDict[pos]\n return output", "def DM(rows, domain):\n return DomainMatrix.from_list(rows, domain)", "def to_matrix(self): \n warnings.warn(f'{self} is being reconstructed into a matrix, consider operating on the decomposed form.')\n\n full = self.to_tensor()\n if self.n_matrices == ():\n return full.reshape(self.shape)\n else:\n return full.reshape(self.n_matrices + self.shape)", "def make_matrix():\n\n # this imports category, category, data, text, pptx_data\n\n if use_test_data:\n # make a test matrix using create_test_matrix\n m = matrixfuncs.create_test_matrix()\n\n else:\n # make a matrix by connecting to Slides! and connecting to a data\n # table.\n import transformations.utils.slidesconf as slidesconf\n from Forgetdata.Matrix import ConnectionDefinition\n conn = ConnectionDefinition()\n conn.ConnectionString = mtd_filepath # set at top of file\n conn.Name = \"Test\"\n conn.Provider = \"SPSS MTD File\"\n liveConnection = slidesconf.connect(conn.ConnectionString,\n name=conn.Name,\n provider_name=conn.Provider)\n\n m = liveConnection[table_selected]\n\n x = tr.MatrixManipulator(m)\n matrixfuncs.printMatrix(m)\n\n for c in m[0]:\n c.TopMember.Label = c.TopMember.Label.encode('ascii', 'ignore')\n\n return m, x", "def py2mat(myobj):\n if isinstance(myobj, pandas.Series):\n mat = r.matrix(myobj,\n rownames=myobj.index,\n dimnames=myobj.name)\n else:\n mat = r.matrix(myobj)\n return mat", "def _to_matrix_vectorized(M):\n assert isinstance(M, (tuple, list))\n assert all([isinstance(item, (tuple, list)) for item in M])\n c_vec = np.asarray([len(item) for item in M])\n assert np.all(c_vec-c_vec[0] == 0)\n r = len(M)\n c = c_vec[0]\n M00 = np.asarray(M[0][0])\n dt = M00.dtype\n sh = [M00.shape[0], r, c]\n M_ret = np.empty(sh, dtype=dt)\n for irow in range(r):\n for icol in range(c):\n M_ret[:, irow, icol] = np.asarray(M[irow][icol])\n return M_ret", "def as_matrix(self):\n return self._data", "def matrix(self):\n return np.matrix(list(self._columns.values()))", "def to_matrix(expr):\r\n # if expr is a list of lists, and is rectangular, then return Matrix(expr)\r\n if not type(expr) == list:\r\n return expr\r\n for row in expr:\r\n if (not type(row) == list):\r\n return expr\r\n rdim = len(expr[0])\r\n for row in expr:\r\n if not len(row) == rdim:\r\n return expr\r\n return sympy.Matrix(expr)", "def create_matrix(sample_size, dim):\n return np.array(private_create_matrix(sample_size, dim, dim))", "def transform(array):\n assert array.shape == (10, 2)\n new = Array(columns=\"abcd\")\n for x, y in array:\n new.append([x, y, x + y, x * y])\n return new", "def image_to_matrix(image):\n\n\tpic = np.array([t[0] for t in image.getdata()]).reshape(image.size[1],image.size[0])\n\n\treturn pic", "def m2n(buf, shape, typecode, ismatrix=False):\n a = np.frombuffer(buf, dtype=typecode).reshape(shape)\n if ismatrix: a = np.asmatrix(a)\n return a", "def to_matrix(self):\n return numpy.array([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]], dtype=complex)", "def mldata_to_numeric_matrix(self, mldata, n_samples, exclude=[]):\n first_column = True\n for key, submatrix in mldata.iteritems():\n if key not in exclude and type(submatrix) == np.ndarray:\n new_submatrix = np.copy(submatrix)\n\n if new_submatrix.shape[0] != n_samples:\n new_submatrix = new_submatrix.T\n\n if new_submatrix.dtype.type == np.object_:\n new_submatrix = self.nominal_to_float(new_submatrix)\n\n if first_column:\n matrix = new_submatrix.reshape(n_samples, -1)\n first_column = False\n else:\n matrix = np.hstack((matrix,\n new_submatrix.reshape(n_samples, -1)))\n return matrix", "def format_input_to_matrix(self, input_field):\n list_of_lists = [[1 if char == \"#\" else 0 for char in row]\n for row in input_field.splitlines()]\n matrix = np.array(list_of_lists)\n if len(matrix.shape) != 2:\n raise ValueError(\"Input cannot be transformed into matrix.\")\n return np.transpose(matrix)", "def _serialize_double_matrix(m):\n if (type(m) == ndarray and m.dtype == float64 and m.ndim == 2):\n rows = m.shape[0]\n cols = m.shape[1]\n ba = bytearray(24 + 8 * rows * cols)\n header = ndarray(shape=[3], buffer=ba, dtype=\"int64\")\n header[0] = 2\n header[1] = rows\n header[2] = cols\n copyto(ndarray(shape=[rows, cols], buffer=ba, offset=24,\n dtype=\"float64\", order='C'), m)\n return ba\n else:\n raise TypeError(\"_serialize_double_matrix called on a \"\n \"non-double-matrix\")", "def import_array(A):\n d = len(A.shape)\n assert d == 2, \"Cannot import {} dimension array, need 2\".format(d)\n m, n = A.shape\n data = dict()\n for i in range(m):\n for j in range(n):\n data[i, j] = mpfr(A[i, j])\n return MPMatrix((m, n), data)", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def _native_matrix_2_numpy(mat):\n\n if 'RQRMI matrix' not in str(mat):\n raise ValueError('Input is not valid rqrmi matrix object')\n return np.array(rqrmilib.matrix_to_list(mat))", "def make_numpy_matrix(df,variables):\n observations = []\n for col in variables:\n observations.append(np.array(df[col]))\n observations = np.mat(observations).transpose().A #annoying numpy magic, and Tim loves it\n print observations.shape\n return observations", "def MXMatrix(self, x_input, d, k, nf):\n\t\tn_len = int(x_input.shape[0] / d)\n\n\t\tMX = np.zeros(((n_len - k + 1) * nf, k * nf * d))\n\t\tVX = np.zeros((n_len - k + 1, k * d))\n\n\t\tx_input = x_input.reshape((d, n_len), order='F')\n\n\t\tfor i in range(n_len - k + 1):\n\t\t\tVX[i, :] = (x_input[:, i:i + k].reshape((k * d, 1), order='F')).T\n\n\t\tfor i in range(n_len - k + 1):\n\t\t\tfor j in range(nf):\n\t\t\t\tMX[i * nf + j:i * nf + j + 1, j *\n\t\t\t\t\tk * d:j * k * d + k * d] = VX[i, :]\n\n\t\treturn MX", "def dof2mat_np(input_dof, scale=False):\n dof = input_dof\n # print('deg {}'.format(dof[3:6]))\n dof[3:6] = dof[3:6] * (2 * math.pi) / 360.0\n # print('rad {}'.format(dof[3:6]))\n\n\n rot_mat = tfms.euler_matrix(dof[5], dof[4], dof[3], 'rzyx')[:3, :3]\n\n mat44 = np.identity(4)\n mat44[:3, :3] = rot_mat\n mat44[:3, 3] = dof[:3]\n\n if scale:\n scales = dof[6:]\n mat_scale = np.diag([scales[1], scales[0], scales[2], 1])\n mat44 = np.dot(mat44, np.linalg.inv(mat_scale))\n # print('mat_scale\\n{}'.format(mat_scale))\n # print('recon mat\\n{}'.format(mat44))\n # sys.exit()\n return mat44", "def _matrix(*params):\n raise NotImplementedError", "def get_data_matrix(df):\n return df[[\"Open\", \"High\", 'Low', \"Close\"]].to_numpy()", "def _matrix_(self, R):\n from sage.matrix.all import matrix\n matlab = self.parent()\n entries = matlab.strip_answer(matlab.eval(\"mat2str({0})\".format(self.name())))\n entries = entries.strip()[1:-1].replace(';', ' ')\n entries = [R(_) for _ in entries.split(' ')]\n nrows, ncols = map(int, str(self.size()).strip().split())\n m = matrix(R, nrows, ncols, entries)\n return m", "def Stirling2Matrix(dim): \r\n mat_space = MatrixSpace(CombinatorialScalarRing(),dim)\r\n l = list()\r\n for row in range(dim):\r\n l.append(_stirling2_row(row,dim))\r\n return mat_space(l)", "def make_matrix(p, q):\n M = [[ele[0] * ele[1] for ele in itertools.product([player, 1 - player], \n [opponent, 1 - opponent])]\n for opponent in q for player in p]\n return np.array(M)", "def _convert_matrix(m):\n\n return [m[0][0], m[0][1], m[0][2], m[0][3],\n m[2][0], m[2][1], m[2][2], m[2][3],\n -m[1][0], -m[1][1], -m[1][2], -m[1][3],\n m[3][0], m[3][1], m[3][2], m[3][3]]", "def from_matrix(cls, matrix: list) -> object:\n return cls(tool.matrix_to_array(matrix))", "def vec2mat(multipliers, separate_fields=False):\n\n n = (np.sqrt(1+8*multipliers.size)-1)//2\n assert (n%1)==0, \"Must be n fields and (n choose 2) couplings.\"\n n = int(n)\n\n if separate_fields:\n return multipliers[:n], squareform(multipliers[n:])\n return replace_diag(squareform(multipliers[n:]), multipliers[:n])", "def np_transpose(matrix):\n\n return matrix.transpose()", "def matrix(self):\n return self._matrix(*self.parameters)", "def to_matrix(self):\n\n return self._tensor_to_matrix(self._t)", "def __init__(self, M,):\n #transpose because we are given column vectors not row vectors\n self.M = np.transpose(np.matrix(M))", "def create_matrix(self):\n\n self.matrix = np.zeros((len(self.users), len(self.items)))\n\n for user in self.train_set['users']:\n for item in self.train_set['feedback'][user]:\n self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \\\n self.train_set['feedback'][user][item]", "def get_matrix(self, name: str) -> ndarray:\n matrix_path = self._get_path(name)\n matrix = DataFrame(read_csv(matrix_path, header=None)).to_numpy()\n # NOTE Pad a vector of zero to the top of the matrix\n # TODO verify this is the correct assumption\n # return np.vstack((np.zeros_like(matrix[0]), matrix))\n return matrix", "def from_matrix(matrix: types.Matrix) -> \"MatrixLieGroup\":", "def real_matrix(A):\n m, n = A.shape\n def times(x):\n assert x.ndim == 1\n return A @ x\n def trans(x):\n assert x.ndim == 1\n return x @ A\n return Operator(times=times, trans=trans, shape=(m,n))", "def getUserItemMatrix(self):\n\t\tdf = self.getrating()\n\n\t\trows_index = df.user_id.unique()\n\t\tcolumn_index = df.venue_id.unique() \n\n\t\trow_len = len(rows_index)\n\t\tcol_len = len(column_index)\n\n\t\tX = lil_matrix((row_len, col_len))\n\t\trow_map = dict(zip(rows_index, range(row_len)))\n\t\tcol_map = dict(zip(column_index, range(col_len)))\n\n\t\t# Get mapping table for rows and columns\n\t\td = {}\n\t\td[\"row\"] = row_map\n\t\td[\"col\"] = col_map\n\n\t\tfor index, row in df.iterrows():\n\t\t\tX[d[\"row\"][row[\"user_id\"]], d[\"col\"][row[\"venue_id\"]]] = row[\"Rating\"]\n\n\t\tX = X.tocsr() # Allow efficient row slicing\n\n\t\treturn [d,X]", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def buildMatrix( self,\n query_nid, \n lsequence, \n neighbours ):\n\n query_length = int( math.ceil(float(lsequence) / self.resolution ))\n\n nindex = {}\n\n neighbours.mMatches.sort( key = lambda x: x.mQueryFrom )\n \n nneighbours = 0\n if self.combine_repeats:\n for neighbour in neighbours.mMatches:\n if not nindex.has_key(neighbour.mSbjctToken):\n nindex[neighbour.mSbjctToken] = nneighbours\n nneighbours += 1\n else:\n nneighbours = len(neighbours)\n\n # build matrix and add query sequence\n nneighbours += 1\n matrix = numpy.zeros( (nneighbours, query_length), numpy.int) \n matrix[0, 0:query_length] = 1\n row = 1\n \n for n in neighbours.mMatches:\n\n if self.combine_repeats:\n use_row = nindex[n.mSbjctToken]\n else:\n use_row = row\n row += 1\n \n yfrom = int(math.floor(n.mQueryFrom/self.resolution))\n yto = int(math.ceil(n.mQueryTo/self.resolution)) \n matrix[use_row, yfrom:yto] = 1\n \n return matrix", "def to_s_matrix(w,v):\n pass", "def _to_dense(self: QGTOnTheFlyT) -> jnp.ndarray:\n Npars = nkjax.tree_size(self._params)\n I = jax.numpy.eye(Npars)\n\n if self._chunking:\n # the linear_call in mat_vec_chunked does currently not have a jax batching rule,\n # so it cannot be vmapped but we can use scan\n # which is better for reducing the memory consumption anyway\n _, out = jax.lax.scan(lambda _, x: (None, self @ x), None, I)\n else:\n out = jax.vmap(lambda x: self @ x, in_axes=0)(I)\n\n if jnp.iscomplexobj(out):\n out = out.T\n\n return out", "def to_matrix(self):\n return numpy.array([[1, 1],\n [1, -1]], dtype=complex) / numpy.sqrt(2)", "def assert_equal_as_matrix(self, q, m, **kwargs):\n np.testing.assert_array_equal(q.to_matrix(), m, **kwargs)", "def _makeWaMatrix_(self, wa, nRow, nCol):\n\t\t#print nRow, nCol\n\t\t#print wa\n\t\t#print\n\t\twaMatrix = [[0 for j in xrange(nCol)] for i in xrange(nRow)]\n\t\tfor a in wa: \n\t\t\tfor i in a[0]:\n\t\t\t\tfor j in a[1]:\n\t\t\t\t\twaMatrix[i][j] = 1\n\t\treturn waMatrix", "def matrixToVector(self,mx):\n return FreeCAD.Base.Vector(mx[0]/1000,mx[1]/1000,mx[2]/1000)", "def MatConvert(x, device, dtype):\r\n x = torch.from_numpy(x).to(device, dtype)\r\n return x", "def amat(M):\n aa, _ = a_matrix(M)\n return aa", "def _file_to_matrix(self, input_field: str, depth: int) -> np.char.array:\n maze_rows = input_field.splitlines()\n maze_lists = [list(row) for row in maze_rows]\n maze = np.array(maze_lists)\n maze = np.pad(maze, pad_width=1, constant_values=EMPTY)\n\n multidim_maze = np.char.array([np.char.array(maze, itemsize=2)\n for _ in range(depth)])\n return multidim_maze", "def transform(self, v):\n #matrix vector multiply, convert from matrix to array type at the end\n return np.array( v * self.M )", "def fromRows(data):\n m = len(data)\n n = len(data[0])\n # check that data structure is valid\n if any([len(row) != n for row in data[1:]]):\n raise ValueError(\"inconsistent row lengths\")\n # check that data types are inconsistent\n t = type(data[0][0])\n if any(any(type(e) is not t for e in row[(i == 0):])\n for i, row in enumerate(data)):\n raise TypeError(\"inconsistent element types\")\n # dispatch to childern based on type\n if t is bool:\n return BooleanMatrix(m, n, data)\n elif t is int:\n return IntegerMatrix(m, n, data)\n if t is float:\n return RealMatrix(m, n, data)", "def matrix(name=None, dtype=None):\r\n if dtype is None:\r\n dtype = config.floatX\r\n type = CudaNdarrayType(dtype=dtype, broadcastable=(False, False))\r\n return type(name)", "def build_matrix(self):\n self.lb_make = LabelEncoder()\n self.lb_make.fit(self.Y_train)\n tokenizer = Tokenizer(num_words=2000)\n x_array_train = numpy.asarray(self.train['text'])\n x_array_test = numpy.asarray(self.test['text'])\n tokenizer.fit_on_texts(x_array_train)\n x_train_matrix = tokenizer.texts_to_matrix(x_array_train, mode='count')\n x_test_matrix = tokenizer.texts_to_matrix(x_array_test, mode='count')\n y_train_numbers = self.lb_make.transform(self.Y_train)\n y_test_numbers = self.lb_make.transform(self.Y_test)\n y_train_matrix = keras.utils.to_categorical(y_train_numbers, 3)\n y_test_matrix = keras.utils.to_categorical(y_test_numbers, 3)\n self.tokenizer = tokenizer\n return x_train_matrix, x_test_matrix, y_train_matrix, y_test_matrix", "def transform(self, x: Array2D) -> Array2D:", "def _q_matrix(self):\n return np.array([\n [self.q[0], -self.q[1], -self.q[2], -self.q[3]],\n [self.q[1], self.q[0], -self.q[3], self.q[2]],\n [self.q[2], self.q[3], self.q[0], -self.q[1]],\n [self.q[3], -self.q[2], self.q[1], self.q[0]]])", "def numpy_to_cube(np_array, similar_cube, dimensions):\n\n new_cube = iris.cube.Cube.copy(similar_cube) # copy similar cube\n\n # time, lat, lon\n if dimensions == 3:\n new_cube.data[:,:,:] = np.nan # convert new cube entries to nan\n new_cube.data[:,:,:] = np_array # fill with numpy array data\n\n # lat, lon\n elif dimensions == 2:\n new_cube.data[:,:] = np.nan # convert new cube entries to nan\n new_cube.data[:,:] = np_array # fill with numpy array data\n\n # either time, lat or lon only\n elif dimensions == 1:\n new_cube.data[:] = np.nan # convert new cube entries to nan\n new_cube.data[:] = np_array # fill with numpy array data\n\n # return the numpy array, failed to convert to a cube\n else:\n print('failed to convert')\n new_cube = np_array\n\n return new_cube", "def cudamat_to_cudandarray(x):\r\n if not isinstance(x, cudamat.CUDAMatrix):\r\n raise ValueError(\"We can transfer only cudamat.CUDAMatrix to CudaNdarray\")\r\n # elif x.dtype != \"float32\":\r\n # raise ValueError(\"CudaNdarray support only float32\")\r\n # We don't need this, because cudamat is always float32.\r\n else:\r\n strides = [1]\r\n for i in x.shape[::-1][:-1]:\r\n strides.append(strides[-1]*i)\r\n strides = tuple(strides[::-1])\r\n\r\n import ctypes\r\n ptr_long = long(ctypes.cast(x.mat.data_device, ctypes.c_void_p).value)\r\n\r\n\r\n # seems legit.\r\n z = cuda.from_gpu_pointer(ptr_long, x.shape, strides, x)\r\n return z", "def _Reshape(self, pdi, pdo):\n # Get number of columns\n cols = pdi.GetNumberOfColumns()\n # Get number of rows\n rows = pdi.GetColumn(0).GetNumberOfTuples()\n\n if len(self.__names) is not 0:\n num = len(self.__names)\n if num < self.__ncols:\n for i in range(num, self.__ncols):\n self.__names.append('Field %d' % i)\n elif num > self.__ncols:\n raise _helpers.PVGeoError('Too many array names. `ncols` specified as %d and %d names given.' % (self.__ncols, num))\n else:\n self.__names = ['Field %d' % i for i in range(self.__ncols)]\n\n # Make a 2D numpy array and fill with data from input table\n data = np.empty((rows,cols))\n for i in range(cols):\n c = pdi.GetColumn(i)\n data[:,i] = interface.convertArray(c)\n\n if ((self.__ncols*self.__nrows) != (cols*rows)):\n raise _helpers.PVGeoError('Total number of elements must remain %d. Check reshape dimensions.' % (cols*rows))\n\n # Use numpy.reshape() to reshape data NOTE: only 2D because its a table\n # NOTE: column access of this reshape is not contigous\n data = np.array(np.reshape(data.flatten(), (self.__nrows,self.__ncols), order=self.__order))\n pdo.SetNumberOfRows(self.__nrows)\n\n # Add new array to output table and assign incremental names (e.g. Field0)\n for i in range(self.__ncols):\n # Make a contigous array from the column we want\n col = np.array(data[:,i])\n # allow type to be determined by input\n # VTK arrays need a name. Set arbitrarily\n insert = interface.convertArray(col, name=self.__names[i]) # array_type=vtk.VTK_FLOAT\n #pdo.AddColumn(insert) # these are not getting added to the output table\n # ... work around:\n pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData\n\n return pdo", "def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix", "def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix", "def P_matrix(X):\n n, k = X.shape[:2]\n device = X.device\n\n P = torch.ones(n, k, 3, device=device)\n P[:, :, 1:] = X\n return P", "def gram_matrix(input_data):\n a, b, c, d = input_data.size()\n features = input_data.view(b, a * c * d) \n G = torch.mm(features, features.t())\n return G.div(a * b * c * d)", "def apply_transformation_np(source, transformation):\n source_homog = np.ones((source.shape[0], 4))\n source_homog[:, :-1] = source\n # source_homog = np.hstack(\n # (source, np.ones(source.shape[0], 1))\n # )\n\n source_transformed = np.matmul(transformation, source_homog.T).T[:, :-1]\n return source_transformed", "def tomatrix(self, ai_patch):\n V = self.space\n# print \"------------\"\n# print \"geo.npatchs : \", V.geometry.npatchs\n# print \"patch id : \", ai_patch\n# print \"dim : \", V.dim\n# print \"shape : \", V.geometry[ai_patch].shape\n if V.dim == 1 :\n [li_n_1] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_1d ( self.id, ai_patch \\\n , li_n_1 )\n if V.dim == 2 :\n [li_n_1, li_n_2] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_2d ( self.id, ai_patch \\\n , li_n_1, li_n_2 )\n if V.dim == 3 :\n [li_n_1, li_n_2, li_n_3] = V.geometry[ai_patch].shape\n return self.com.pyfem.field_to_matrix_3d ( self.id \\\n , ai_patch, li_n_1, li_n_2, li_n_3 )", "def get_multivariate_matrix(x_data, D):\n rows = []\n terms = get_polynomial(['x1', 'x2', 'x3', 'x4', 'x5'], D)\n for row in range(len(x_data)):\n row_data = {}\n row_data['x1'] = x_data[row, 0]\n row_data['x2'] = x_data[row, 1]\n row_data['x3'] = x_data[row, 2]\n row_data['x4'] = x_data[row, 3]\n row_data['x5'] = x_data[row, 4]\n \n row_entry = []\n for t in terms:\n prod = 1\n for var in t:\n prod *= row_data[var]\n row_entry.append(prod)\n \n row_entry = np.array(row_entry) \n rows.append(row_entry)\n return np.vstack(rows)", "def _materialize_array(matvec, shape, dtype=None):\n x = jnp.zeros(shape, dtype)\n return jax.jacfwd(matvec)(x)", "def lap_mat(self):", "def take_matrix(self):\n matrix = aux.matrix(self.take_vec(), self.order)\n\n return matrix", "def to_matrix(self, rows=1, cropping=True, outer_offset=None, approximation_precision=0):\n import numpy as np\n\n def to_matrix_rec(node, offset):\n # making sure the node exists\n if not node:\n return None, 0\n # checking whether the node is a leaf\n elif node.is_leaf():\n return node.dtype.to_mat(node, offset)\n elif node.d == approximation_precision:\n return node.dtype.to_mat(node.leaves.__iter__().next(), offset)\n else:\n # the recursive call\n # mat_shape = node.dtype.base**node.d\n # base_mat = np.ones(mat_shape)*diagram.null_value\n base_mat = np.array([])\n # checking for the kind of diagram. MTxxx?\n if self.offsets == {}:\n for edge_name in range(node.dtype.base):\n tmp_result = to_matrix_rec(node.child_nodes[edge_name], node.dtype.to_mat(node, 0, 0))\n base_mat = np.hstack((base_mat, tmp_result))\n # or edge-value dd?\n else:\n for edge_name in range(node.dtype.base):\n tmp_result = to_matrix_rec(node.child_nodes[edge_name],\n node.dtype.to_mat(node, node.offsets[edge_name], offset))\n try:\n base_mat = np.hstack((base_mat, tmp_result))\n except ValueError:\n base_mat = base_mat[None]\n base_mat = np.hstack((base_mat, tmp_result))\n return base_mat\n\n result = to_matrix_rec(self, outer_offset)\n row_vars = np.log10(rows)/np.log10(self.dtype.base)\n # ratio_of_approx = row_vars/(self.d-approximation_precision)\n # rows = self.dtype.base**np.ceil(ratio_of_approx)\n rows_pot = self.dtype.base**np.ceil(row_vars)\n cols_pot = np.max(result.shape)/rows_pot\n result = np.reshape(result, (rows_pot, cols_pot))\n # if desired, crop the result of all zero columns/rows in the lower right\n if cropping and not rows == 1:\n uncropped = True\n while uncropped:\n uncropped = False\n if (result[:, -1] == 0).all():\n result = result[:, :-1]\n uncropped = True\n if (result[-1, :] == 0).all():\n result = result[:-1, :]\n uncropped = True\n return result", "def raw_to_matrix(self):\n # extract variable list (initial set guarantees to include all vars)\n expression = self.raw_expression\n varList = SymEq.get_var_list(expression)\n return SymEq.get_eqn_matrix(expression, varList)", "def column_convertor(x):\n x.shape = (1, x.shape[0])\n return x", "def test_conversions_to_matrix():\n R = np.eye(3)\n R2R = pr.matrix_from(R=R)\n assert_array_almost_equal(R2R, R)\n\n a = np.array([1, 0, 0, 0])\n a2R = pr.matrix_from(a=a)\n assert_array_almost_equal(a2R, R)\n\n q = np.array([1, 0, 0, 0])\n q2R = pr.matrix_from(q=q)\n assert_array_almost_equal(q2R, R)\n\n e_xyz = np.array([0, 0, 0])\n e_xyz2R = pr.matrix_from(e_xyz=e_xyz)\n assert_array_almost_equal(e_xyz2R, R)\n\n e_zyx = np.array([0, 0, 0])\n e_zyx2R = pr.matrix_from(e_zyx=e_zyx)\n assert_array_almost_equal(e_zyx2R, R)\n\n assert_raises_regexp(ValueError, \"no rotation\", pr.matrix_from)", "def matrix(self, modulus=None):\n basis = self.domain.basis_elements()\n cols = [self.codomain.represent(self.mapping(elt)) for elt in basis]\n if not cols:\n return DomainMatrix.zeros((self.codomain.n, 0), ZZ).to_dense()\n M = cols[0].hstack(*cols[1:])\n if modulus:\n M = M.convert_to(FF(modulus))\n return M", "def convert_to_measure_matrix(self):\n self.M = copy.deepcopy(self.m)\n\n for row in range(self.num_states):\n accum = 0\n for col in range(self.num_states):\n accum += self.m[row][col]\n self.M[row][col] = accum\n \n # pprint(self.m) \n # pprint(self.M)", "def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype=\"int32\", time_major=False):\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix", "def toMatrix(self,v):\n return Matrix([[v.x],[v.y],[v.z]])" ]
[ "0.68257034", "0.66232854", "0.63458335", "0.61276364", "0.61276364", "0.60878366", "0.60764503", "0.59793574", "0.59339464", "0.5888079", "0.5884792", "0.584454", "0.57784957", "0.57733315", "0.5763111", "0.5759501", "0.5737964", "0.57346207", "0.57008547", "0.5669869", "0.5664769", "0.56559473", "0.5648125", "0.5639778", "0.5629487", "0.5610861", "0.55969954", "0.5585847", "0.55847013", "0.558334", "0.55737495", "0.55616486", "0.5538052", "0.55347687", "0.5528579", "0.5505855", "0.5476899", "0.54537064", "0.5452895", "0.54527956", "0.54413617", "0.54413104", "0.54299664", "0.54098886", "0.54046404", "0.54011446", "0.5397937", "0.5374434", "0.53687006", "0.53668433", "0.5362711", "0.53624946", "0.5357661", "0.53498733", "0.5348414", "0.5348363", "0.53437483", "0.5342778", "0.53413624", "0.5340118", "0.53378946", "0.5329493", "0.5325515", "0.5309931", "0.5303258", "0.5295831", "0.52693766", "0.52672213", "0.5266718", "0.5265452", "0.52642053", "0.5263914", "0.52621037", "0.52615553", "0.5251718", "0.5237596", "0.5235524", "0.5235313", "0.5221741", "0.5221461", "0.52205914", "0.52153796", "0.5215191", "0.5215191", "0.5206703", "0.5206412", "0.52027154", "0.5195633", "0.5193381", "0.5190667", "0.5185734", "0.5183469", "0.5182093", "0.5175229", "0.516382", "0.51618856", "0.51604456", "0.51589656", "0.5158928", "0.51512504" ]
0.608287
6
Create a myqlm representation of quantum channel from a qiskit representation of a quantum channel.
def qiskit_to_qchannel(representation): qchannel = None qiskit_data = representation.data # Find what representation it is. # Then create the corresponding matrix (kraus_ops|basis|matrix)from the data # of the representation. # Finally, create the QuantumChannel with the RepresentationType, the arity # (got from the qiskit representation) and the matrix. if isinstance(representation, Kraus): kraus_ops = [] for arr in qiskit_data: kraus_ops.append(array_to_matrix(arr)) qchannel = QuantumChannel( representation=RepresentationType.KRAUS, arity=representation.num_qubits, kraus_ops=kraus_ops) elif isinstance(representation, Chi): basis = [] basis.append(array_to_matrix(qiskit_data)) qchannel = QuantumChannel( representation=RepresentationType.CHI, arity=representation.num_qubits, basis=basis) elif isinstance(representation, SuperOp): basis = [] basis.append(array_to_matrix(qiskit_data)) qchannel = QuantumChannel( representation=RepresentationType.SUPEROP, arity=representation.num_qubits, basis=basis) elif isinstance(representation, PTM): matri = array_to_matrix(qiskit_data) qchannel = QuantumChannel( representation=RepresentationType.PTM, arity=representation.num_qubits, matrix=matri) elif isinstance(representation, Choi): matri = array_to_matrix(qiskit_data) qchannel = QuantumChannel( representation=RepresentationType.CHOI, arity=representation.num_qubits, matrix=matri) return qchannel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qchannel_to_qiskit(representation):\n\n rep = representation.representation\n # Find what representation it is.\n # Then create the corresponding matrix and shape it like qiskit is expecting it.\n # Finally, create the qiskit representation from that matrix.\n if rep in (RepresentationType.PTM, RepresentationType.CHOI):\n matri = representation.matrix\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)\n if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):\n final_data = []\n for matri in representation.basis:\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n final_data.append(data)\n if rep == RepresentationType.CHI:\n return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])\n return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])\n if rep == RepresentationType.KRAUS:\n final_data = []\n for matri in representation.kraus_ops:\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n final_data.append(data)\n return Kraus(final_data)\n return None", "def qlm_to_qiskit(qlm_circuit, qubits=None):\n # Init measured qubits\n if qubits is None:\n qubits = list(range(qlm_circuit.nbqbits))\n\n qreg = QuantumRegister(qlm_circuit.nbqbits)\n creg = None\n param_list = []\n if qlm_circuit.nbcbits > 0:\n creg = ClassicalRegister(max(qlm_circuit.nbcbits, len(qubits)))\n q_circ = QuantumCircuit(qreg, creg)\n else:\n q_circ = QuantumCircuit(qreg)\n dic = _gen_qiskit_gateset(q_circ)\n for gate_op in qlm_circuit:\n if gate_op.type == OpType.GATETYPE:\n name, params = extract_syntax(\n qlm_circuit.gateDic[gate_op.gate], qlm_circuit.gateDic,\n var_dic=qlm_circuit.var_dic)\n nbctrls = name.count('C-')\n # changes variables and expressions to format used by Qiskit\n for index, param in enumerate(params):\n if isinstance(param, Variable):\n params[index] = _variable_to_parameter(\n param_list, variable=param)\n elif isinstance(param, ArithExpression):\n arith_expr_list = param.to_thrift().split()\n params[index] = _arith_expr_list_to_parameter_expression(\n param_list, arith_expr_list, param)\n try:\n if name == \"MS\":\n q_circ.ms(params[0], [qreg[i] for i in gate_op.qbits])\n else:\n if (nbctrls > 0 and name not in SUPPORTED_CTRLS):\n tmp = name\n count = 0\n gate = None\n while True:\n last = tmp\n tmp = tmp.replace(\"C-\", \"\", 1)\n if last == tmp:\n raise ValueError(\n \"Gate {} not supported by Qiskit API\".format(name)\n )\n else:\n count += 1\n gate = _get_qiskit_gate_from_name(tmp)\n if gate != None:\n gate = gate(*params).control(count)\n break\n if gate != None:\n q_circ.append(gate, [qreg[i] for i in gate_op.qbits])\n else:\n dic[name](* params + [qreg[i] for i in gate_op.qbits])\n except KeyError:\n raise ValueError(\n \"Gate {} not supported by Qiskit API\".format(name)\n )\n elif gate_op.type == OpType.MEASURE:\n for index in range(len(gate_op.qbits)):\n q_circ.measure(gate_op.qbits[index], gate_op.cbits[index])\n\n # Adding measures to unify the interface\n for qbit_index, cbit in zip(qubits, creg):\n q_circ.measure(qreg[qbit_index], cbit)\n return q_circ", "def qiskit_to_qlm(qiskit_circuit, sep_measures=False, **kwargs):\n prog = Program()\n qbits_num = 0\n to_measure = []\n for reg in qiskit_circuit.qregs:\n qbits_num = qbits_num + reg.size\n qbits = prog.qalloc(qbits_num)\n\n cbits_num = 0\n for reg in qiskit_circuit.cregs:\n cbits_num = cbits_num + reg.size\n cbits = prog.calloc(cbits_num)\n variables = []\n for gate_op in qiskit_circuit.data:\n if gate_op[0].name == \"barrier\" or gate_op[0].name == \"opaque\":\n continue\n qbit_args = []\n cbit_args = []\n prms = [] # gate parameters\n # Get qbit arguments\n for qarg in gate_op[1]:\n qbit_args.append(\n _get_qindex(qiskit_circuit, qarg.register.name, qarg.index))\n\n # Get cbit arguments\n for carg in gate_op[2]:\n cbit_args.append(\n _get_cindex(qiskit_circuit, carg.register.name, carg.index))\n\n # Get parameters\n for param in gate_op[0]._params:\n if isinstance(param, (Parameter, ParameterExpression)):\n prms.append(_qiskit_to_qlm_param(prog, variables, param))\n else:\n prms.append(float(param))\n # Apply measure #\n if gate_op[0].name == \"measure\":\n if sep_measures:\n to_measure.extend(qbit_args)\n else:\n prog.measure([qbits[i] for i in qbit_args],\n [cbits[i] for i in cbit_args])\n elif gate_op[0].name == \"reset\":\n prog.reset([qbits[i] for i in qbit_args],\n [cbits[i] for i in cbit_args])\n else:\n if gate_op[0].name == \"ms\":\n # In this case, the process function needs the number of qubits\n prms.append(len(qbit_args))\n # Apply gates #\n num_ctrl_qubits = None\n try:\n num_ctrl_qubits = gate_op[0].num_ctrl_qubits\n except:\n None\n gate = get_gate(gate_op[0].name, prms, num_ctrl_qubits)\n prog.apply(gate, *[qbits[i] for i in qbit_args][:gate.arity])\n if sep_measures:\n return prog.to_circ(**kwargs), list(set(to_measure))\n\n return prog.to_circ(**kwargs)", "def test_myqlm_backend():\n circuit = Circuit()\n circuit += ops.DefinitionBit(name='ro', length=2, is_output=True)\n circuit += ops.RotateZ(qubit=0, theta=0)\n circuit += ops.PauliX(qubit=1)\n circuit += ops.MeasureQubit(qubit=0, readout='ro', readout_index=0)\n circuit += ops.MeasureQubit(qubit=1, readout='ro', readout_index=1)\n\n backend = MyQLMBackend(number_qubits=2,\n number_measurements=5)\n\n # (bit_dict, float_dict, complex_dict) = backend.run_circuit(circuit)\n # npt.assert_equal(float_dict, dict())\n # npt.assert_equal(complex_dict, dict())\n # npt.assert_equal(bit_dict['ro'], [np.array([0., 1.])] * 5)", "def job_to_qiskit_circuit(qlm_job):\n # Check processing type\n assert_qpu(qlm_job.type == ProcessingType.SAMPLE,\n \"Only jobs having a SAMPLE processing type \"\n \"could be translated into Qiskit circuits\")\n\n # Convert\n return qlm_to_qiskit(qlm_job.circuit, qlm_job.qubits)", "def qubit_to_matrix(qubit, format='sympy'):\n return represent(qubit, format=format)", "def _create_quantum_circuit(self):\n reg_list = []\n for entry in self.regdefs:\n is_qreg = self._match_entry_type(entry, [ASTType.QREG])\n\n if is_qreg:\n reg_list.append(QuantumRegister(entry.get('qreg_num'), entry.get('qreg_name')))\n else:\n reg_list.append(ClassicalRegister(entry.get('creg_num'), entry.get('creg_name')))\n\n self.circuit = QuantumCircuit(*reg_list)\n return self.circuit", "def to_qobj(self): # -> \"qutip.Qobj\"\n from qutip import Qobj\n\n q_dims = [list(self.hilbert_physical.shape), list(self.hilbert_physical.shape)]\n return Qobj(np.asarray(self.to_matrix()), dims=q_dims)", "def qlm_circ_sep_meas(qiskit_circuit):\n return qiskit_to_qlm(qiskit_circuit, True)", "def DAQchannels(tree, DAQnum, CHnum):\n tree.addNode('.NI_6133.DAQ_' + str(DAQnum) + '.CHANNEL_' + str(CHnum))\n chanpath = ('.NI_6133.DAQ_' + str(DAQnum) + '.CHANNEL_' + str(CHnum)\n + '.CHAN_SETTING')\n tree.addNode(chanpath)\n AddNodeWithTag(tree, chanpath + ':ACTIVE', 'NUMERIC', 'DAQTIVE_DCARD' +\n str(DAQnum) + 'CH' + str(CHnum))\n AddNodeWithTag(tree, chanpath + ':CHANNEL_NAME', 'TEXT', 'USERNAME_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum))\n AddNumericWithUnit(tree, chanpath + ':VOLT_RANGE', 'VOLTRANGE_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum), 'V')\n AddNodeWithTag(tree, chanpath + ':NI_NAME', 'TEXT', 'NINAME_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum))", "def serialize_Q(Q: np.ndarray):\n ret = QMatrix()\n ret.q_matrix = [QMatrixRow() for i in range(64)]\n for i in range(64):\n row = []\n for j in range(9):\n row.append(Q.q_matrix[i][j])\n ret.q_matrix[i].q_matrix_row = row\n return ret", "def quat2DCM(q):\n\tDCM = quatLeftMat(q) @ quatRightMat(q).T\n\tDCM = DCM[1:, 1:]\n\treturn DCM", "def create_mini_ticker_channel(self, symbol: str) -> str:", "def quat2dcm(q):\n q0q0 = q[0] * q[0]\n q0q1 = q[0] * q[1]\n q0q2 = q[0] * q[2]\n q0q3 = q[0] * q[3]\n q1q1 = q[1] * q[1]\n q1q2 = q[1] * q[2]\n q1q3 = q[1] * q[3]\n q2q2 = q[2] * q[2]\n q2q3 = q[2] * q[3]\n q3q3 = q[3] * q[3]\n dcm = np.zeros((3, 3))\n dcm[0, 0] = q0q0 + q1q1 - q2q2 - q3q3\n dcm[0, 1] = 2.0*(q1q2 + q0q3)\n dcm[0, 2] = 2.0*(q1q3 - q0q2)\n dcm[1, 0] = 2.0*(q1q2 - q0q3)\n dcm[1, 1] = q0q0 - q1q1 + q2q2 - q3q3\n dcm[1, 2] = 2.0*(q2q3 + q0q1)\n dcm[2, 0] = 2.0*(q1q3 + q0q2)\n dcm[2, 1] = 2.0*(q2q3 - q0q1)\n dcm[2, 2] = q0q0 - q1q1 - q2q2 + q3q3\n return dcm", "def format_molecule_for_qchem_old(self, mixedbas=True):\n factor = 1.0 if self.PYunits == 'Angstrom' else psi_bohr2angstroms\n\n text = \"\"\n text += '$molecule\\n'\n text += '%d %d\\n' % (self.molecular_charge(), self.multiplicity())\n\n for i in range(self.natom()):\n [x, y, z] = self.atoms[i].compute()\n if mixedbas:\n text += '%2s ' % (self.symbol(i) if self.Z(i) else \"Gh\")\n else:\n text += '%-3s ' % (('' if self.Z(i) else '@') + self.symbol(i))\n text += '%17.12f %17.12f %17.12f\\n' % (x * factor, y * factor, z * factor)\n text += '$end\\n\\n'\n\n # prepare molecule keywords to be set as c-side keywords\n options = collections.defaultdict(lambda: collections.defaultdict(dict))\n #options['QCHEM'['QCHEM_CHARGE']['value'] = self.molecular_charge()\n #options['QCHEM'['QCHEM_MULTIPLICITY']['value'] = self.multiplicity()\n options['QCHEM']['QCHEM_INPUT_BOHR']['value'] = False\n #options['QCHEM']['QCHEM_COORDINATES']['value'] = 'CARTESIAN'\n #SYM_IGNORE equiv to no_reorient, no_com, symmetry c1\n\n options['QCHEM']['QCHEM_INPUT_BOHR']['clobber'] = True\n\n return text, options", "def __init__(self, dmm: \"Keithley_6500\", channel: int, **kwargs) -> None:\n super().__init__(dmm, f\"ch{channel}\", **kwargs)\n self.channel = channel\n self.dmm = dmm\n\n self.add_parameter('resistance',\n unit='Ohm',\n label=f'Resistance CH{self.channel}',\n get_parser=float,\n get_cmd=partial(self._measure, 'RES'))\n\n self.add_parameter('resistance_4w',\n unit='Ohm',\n label=f'Resistance (4-wire) CH{self.channel}',\n get_parser=float,\n get_cmd=partial(self._measure, 'FRES'))\n\n self.add_parameter('voltage_dc',\n unit='V',\n label=f'DC Voltage CH{self.channel}',\n get_parser=float,\n get_cmd=partial(self._measure, 'VOLT'))\n\n self.add_parameter('current_dc',\n unit='A',\n label=f'DC current CH{self.channel}',\n get_parser=float,\n get_cmd=partial(self._measure, 'CURR'))", "def quat2mat(q):\n #leila: https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm\n\n sz = quat.get_size(q)\n q0 = quat.getq0(q)\n q1 = quat.getq1(q)\n q2 = quat.getq2(q)\n q3 = quat.getq3(q)\n qt = quat.get_type(q)\n\n g = np.zeros((sz, 3, 3))\n g[:, 0, 0] = np.square(q0) + np.square(q1) - np.square(q2) - np.square(q3)\n g[:, 0, 1] = 2*(q1*q2 - q0*q3)\n g[:, 0, 2] = 2*(q3*q1 + q0*q2)\n g[:, 1, 0] = 2*(q1*q2 + q0*q3)\n g[:, 1, 1] = np.square(q0) - np.square(q1) + np.square(q2) - np.square(q3)\n g[:, 1, 2] = 2*(q2*q3 - q0*q1)\n g[:, 2, 0] = 2*(q3*q1 - q0*q2)\n g[:, 2, 1] = 2*(q2*q3 + q0*q1)\n g[:, 2, 2] = np.square(q0) - np.square(q1) - np.square(q2) + np.square(q3)\n\n if sz == 1:\n g = g.reshape((3, 3))\n if qt == -1:\n g = -g\n else:\n inds1 = np.where(qt == -1)\n g[inds1, :, :] = -g[inds1, :, :]\n\n return g", "def to_qobj(self): # -> \"qutip.Qobj\"\n from qutip import Qobj\n\n q_dims = [list(self.hilbert.shape), [1 for i in range(self.hilbert.size)]]\n return Qobj(np.asarray(self.to_array()), dims=q_dims)", "def __init__(self, qubit={}, blockNum=1, readout={}):\n self.diagram = np.asarray(\n [[np.nan] * blockNum] * (len(qubit) + len(readout)),\n dtype=object\n )\n # check datatype and assign\n if isinstance(qubit, list):\n self._qubitDict = dict(zip(qubit, range(len(qubit))))\n elif isinstance(qubit, dict):\n self._qubitDict = qubit\n else:\n raise TypeError('qubitDict: Unsupported format')\n if isinstance(readout, list):\n used_idx = [val for _, val in self._qubitDict.items()]\n unused_idx = sorted([\n val for val in range(len(readout) + len(qubit))\n if val not in used_idx\n ])\n self._readoutDict = dict(zip(readout, unused_idx))\n elif isinstance(readout, dict):\n self._readoutDict = readout\n else:\n raise TypeError('readoutDict: Unsupported format')\n # index check\n for key, val in {**self._qubitDict, **self._readoutDict}.items():\n if val >= len(self.diagram[:, 0]):\n raise ValueError(\n f'QubitChannel \\'{key}\\' assignment out of bound with ' +\n f'index: {val}'\n )\n self._name = ''", "def _gen_mixnet_m(channel_multiplier=1.0, depth_multiplier=1.0, num_classes=1000, **kwargs):\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c24'], # relu\n # stage 1, 112x112 in\n ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu\n # stage 2, 56x56 in\n ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish\n # stage 3, 28x28 in\n ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish\n # stage 4, 14x14in\n ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish\n # stage 5, 14x14in\n ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish\n # 7x7\n ]\n model = GenEfficientNet(\n _decode_arch_def(arch_def, depth_multiplier=depth_multiplier, depth_trunc='round'),\n num_classes=num_classes,\n stem_size=24,\n num_features=1536,\n channel_multiplier=channel_multiplier,\n bn_args=_resolve_bn_args(kwargs),\n act_fn=F.relu,\n **kwargs\n )\n return model", "def construct_qcbm(circuit, n_qubits, depth):\n\n for d in range(depth):\n for i in range(n_qubits):\n circuit.append_gate(Gate('X', target = i, angle = np.random.random()*np.pi*2))\n circuit.append_gate(Gate('Z', target = i, angle = np.random.random()*np.pi*2))\n if n_qubits != 1:\n for i in range(n_qubits):\n circuit.append_gate(Gate('CNOT', control = i, target = (i+1)%n_qubits))\n return circuit", "def test_molecule_subclass_from_qcschema(self):\n import qcportal as ptl\n\n client = ptl.FractalClient()\n ds = client.get_collection(\n \"TorsionDriveDataset\", \"Fragment Stability Benchmark\"\n )\n entry = ds.get_entry(\n \"CC(=O)Nc1cc2c(cc1OC)nc[n:4][c:3]2[NH:2][c:1]3ccc(c(c3)Cl)F\"\n )\n # now make the molecule from the record instance with and without the geometry\n mol = MyMol.from_qcschema(entry.dict(encoding=\"json\"))\n assert isinstance(mol, MyMol)\n # Make from object, which will include geometry\n mol = MyMol.from_qcschema(entry, client)\n assert isinstance(mol, MyMol)", "def quantum_net(self, q_input_features, q_weights_flat):\n\n # Reshape weights\n q_weights = q_weights_flat.reshape(self.args.q_depth, self.args.n_qubits, 3)\n\n # Start from state |+> , unbiased w.r.t. |0> and |1>\n # Amplitude encoding\n qml.QubitStateVector(q_input_features, wires=list(range(self.args.n_qubits)))\n \n # Sequence of trainable variational layers\n for k in range(self.args.q_depth):\n self.entangling_layer(self.args.n_qubits)\n self.Rot_layer(q_weights[k])\n\n # Expectation values in the Z basis\n exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(self.args.target_class)]\n return tuple(exp_vals)", "def compileCkt(self):\n f = np.vectorize(lambda x: isinstance(x, QubitChannel))\n table = f(self.diagram)\n col_bool = np.bitwise_or.reduce(table, axis=1)\n # filter nan in 'qubit' direction\n if not np.bitwise_and.reduce(col_bool):\n raise ValueError('Found unassigned qubit')\n # filter nan in 'time' direction\n row_bool = np.bitwise_or.reduce(table, axis=0)\n diagram = self.diagram[:, row_bool]\n table = table[:, row_bool]\n # align QubitChannel objects in the table column by column\n for time_idx in range(len(table[0, :])):\n diagram[table[:, time_idx], time_idx\n ] = QubitChannel.alignQubitChannels(\n *diagram[table[:, time_idx], time_idx]\n )\n # replace nans with null QubitChannel objects\n for qubit_idx, row in enumerate(table):\n for time_idx, flag in enumerate(row):\n if flag:\n continue\n span_idx = np.where(f(diagram[:, time_idx]))[0][0]\n wire_idx = np.where(f(diagram[qubit_idx, :]))[0][0]\n diagram[qubit_idx, time_idx] = QubitChannel.null(\n diagram[span_idx, time_idx], diagram[qubit_idx, wire_idx]\n )\n try:\n self.compiled = np.sum(diagram, axis=1)\n except SystemError:\n raise ValueError('Error during wire concatenation')", "def buildQuestion():\n #example.com\n QNAME = b\"\\x07\\x65\\x78\\x61\\x6d\\x70\\x6c\\x65\\x03\\x63\\x6f\\x6d\\x00\"\n\n \"\"\"\n A two octet code which specifies the type of the query.\n The values for this field include all codes valid for a\n TYPE field, together with some more general codes which\n can match more than one type of RR.\n \"\"\" \n QTYPE = b\"\\x00\\x01\"\n\n \"\"\"\n A two octet code that specifies the class of the query.\n For example, the QCLASS field is IN for the Internet.\n \"\"\"\n QCLASS = b\"\\x00\\x01\"\n\n dnsBody = QNAME + QTYPE + QCLASS\n #print(dnsBody)\n return dnsBody", "def _create_mc_question(self, description):\n\n mc_dict = {\n 'description': description,\n 'type': models.QuestionDTO.MULTIPLE_CHOICE,\n 'choices': [\n {\n 'text': 'correct answer',\n 'score': 1.0\n },\n {\n 'text': 'incorrect answer',\n 'score': 0.0\n }],\n 'version': '1.5'\n }\n question = models.QuestionDTO(None, mc_dict)\n qid = models.QuestionDAO.save(question)\n return models.QuestionDAO.load(qid)", "def repr_to_spectrogram(self, mdct_norm, intensity=False, channel=0, cmap=None):\n x = tf.cast(mdct_norm[:, :, :, channel:channel+1], tf.float32)\n\n def normalized_dB_scale(ampl, with_sign=True):\n normalized_dB = self.psychoacoustic.amplitude_to_dB_norm(ampl)\n if with_sign:\n # range -1..1\n return tf.sign(ampl) * normalized_dB\n else:\n # range 0..1\n return normalized_dB\n\n # convert to 0..1 range\n if intensity:\n image = normalized_dB_scale(x, with_sign=False)\n else:\n image = (normalized_dB_scale(x, with_sign=True) + 1.) / 2.\n\n image = tf.map_fn(lambda im: tf.image.rot90(im), image)\n\n # colorize with cmap\n if cmap is not None:\n # quantize\n image = image[:, :, :, 0] # remove the dummy channel direction (will be replace with rgb info from color map)\n image_index = tf.cast(tf.round(image * (cmap.N-1)), dtype=tf.int32) # indices in [0, cmap.N-1]\n\n image_index = tf.clip_by_value(image_index, clip_value_min=0, clip_value_max=cmap.N-1)\n\n # gather\n color_map = matplotlib.cm.get_cmap(cmap)(np.arange(cmap.N)) # shape=[cmap.N, 3]\n colors = tf.constant(color_map, dtype=tf.float32)\n image = tf.gather(colors, image_index) # image[b, h, w, c] = color[image_index[b, h, w], c]\n\n return image", "def create_mini_tickers_channel(self) -> str:", "def qtc2state(self, qtc):\n \n state_rep = []\n for idx, element in enumerate(qtc):\n# val_qtc = validateQtcSequences(element)\n d = element.shape[1]\n mult = 3**np.arange(d-1, -1, -1)\n state_num = np.append(\n 0,\n ((element + 1)*np.tile(mult, (element.shape[0], 1))).sum(axis=1) + 1\n )\n state_num = np.append(state_num, 82)\n state_char = ''\n for n in state_num:\n state_char += chr(int(n)+32)\n state_rep.append(state_num.tolist())\n \n return state_rep", "def _generate_qubits(self):\n return cq.LineQubit.range(4)", "def matrix_to_qubit(matrix):\n # Determine the format based on the type of the input matrix\n format = 'sympy'\n if isinstance(matrix, numpy_ndarray):\n format = 'numpy'\n if isinstance(matrix, scipy_sparse_matrix):\n format = 'scipy.sparse'\n\n # Make sure it is of correct dimensions for a Qubit-matrix representation.\n # This logic should work with sympy, numpy or scipy.sparse matrices.\n if matrix.shape[0] == 1:\n mlistlen = matrix.shape[1]\n nqubits = log(mlistlen, 2)\n ket = False\n cls = QubitBra\n elif matrix.shape[1] == 1:\n mlistlen = matrix.shape[0]\n nqubits = log(mlistlen, 2)\n ket = True\n cls = Qubit\n else:\n raise QuantumError(\n 'Matrix must be a row/column vector, got %r' % matrix\n )\n if not isinstance(nqubits, Integer):\n raise QuantumError('Matrix must be a row/column vector of size '\n '2**nqubits, got: %r' % matrix)\n # Go through each item in matrix, if element is non-zero, make it into a\n # Qubit item times the element.\n result = 0\n for i in range(mlistlen):\n if ket:\n element = matrix[i, 0]\n else:\n element = matrix[0, i]\n if format in ('numpy', 'scipy.sparse'):\n element = complex(element)\n if element != 0.0:\n # Form Qubit array; 0 in bit-locations where i is 0, 1 in\n # bit-locations where i is 1\n qubit_array = [int(i & (1 << x) != 0) for x in range(nqubits)]\n qubit_array.reverse()\n result = result + element*cls(*qubit_array)\n\n # If SymPy simplified by pulling out a constant coefficient, undo that.\n if isinstance(result, (Mul, Add, Pow)):\n result = result.expand()\n\n return result", "def update_svq_time_metric(ml_channel_id, ml_channel_name):\n result = []\n entry = [\"MediaLive\", \"SvqTime\", \"ChannelId\", ml_channel_id, \"Pipeline\", \"0\", {\"label\": ml_channel_name + \"-0\"}]\n result.append(entry)\n entry = [\"MediaLive\", \"SvqTime\", \"ChannelId\", ml_channel_id, \"Pipeline\", \"1\", {\"yAxis\": \"right\",\n \"label\": ml_channel_name + \"-1\"}]\n result.append(entry)\n return result", "def QCD(fp):\n lqcd = unpack('>H', fp.read(2))[0]\n sqcd = unpack('B', fp.read(1))[0]\n\n _spqcd = []\n _remaining = lqcd - 3\n bitstring = '{:>08b}'.format(sqcd)\n while _remaining > 0:\n if bitstring[3:] == '00000':\n # xxx0 0000: no quantisation\n _spqcd.append(unpack('B', fp.read(1))[0])\n _remaining -= 1\n elif bitstring[3:] == '00001':\n # xxx0 0001: scalar derived\n _spqcd.append(unpack('>H', fp.read(2))[0])\n _remaining -= 2\n elif bitstring[3:] == '00010':\n # xxx0 0010: scalar expounded\n _spqcd.append(unpack('>H', fp.read(2))[0])\n _remaining -= 2\n else:\n raise NotImplementedError('QCD invalid value')\n\n #guard_bits = int(bitstring[3:], 2)\n\n info = {\n 'Lqcd' : lqcd,\n 'Sqcd' : sqcd,\n 'SPqcd' : _spqcd\n }\n\n return info", "def convertToSpectroGram(self):", "def form_dqxx_word(dqcr, dqch):\n # Loop over all channels and form the DQXX word\n dqxx = [0 for i in range(19 * 16 * 32)]\n for lcn in range(len(dqxx)):\n dqch_word = dqch[lcn]\n # get the crate number\n crate = (lcn & 0x3e00) >> 9\n # get the card number\n card = (lcn & 0x1e0) >> 5\n dqcr_word = dqcr[crate * 16 + card]\n # get the channel number\n ch = lcn & 0x1f\n # Get the daughterboard number for this channel\n db = ch / 8\n dqxx_word = 0\n # 0 DQXX[0] = DQCR[0]\n dqxx_word |= ((not check_bit(dqcr_word, 0)) << 0)\n # 1 DQXX[1] = DQCR[8]\n dqxx_word |= ((not check_bit(dqcr_word, 8)) << 1)\n # 2 DQXX[2] = DQCR[9]\n dqxx_word |= ((not check_bit(dqcr_word, 9)) << 2)\n # 3 DQXX[3] = DQCR[10]\n dqxx_word |= ((not check_bit(dqcr_word, 10)) << 3)\n # 4 DQXX[4] = DQCR[11]\n dqxx_word |= ((not check_bit(dqcr_word, 11)) << 4)\n # 8 DQXX[8] = DQCR[1]\n dqxx_word |= ((not check_bit(dqcr_word, 1)) << 8)\n # 9 DQXX[9] = DQCR[4+db]\n dqxx_word |= ((not check_bit(dqcr_word, 4 + db)) << 9)\n # 10 DQXX[10] = DQCR[3]\n dqxx_word |= ((not check_bit(dqcr_word, 3)) << 10)\n # 16 DQXX[16] = DQCH[2]\n dqxx_word |= ((not check_bit(dqch_word, 2)) << 16)\n # 17 DQXX[17] = DQCH[3]\n dqxx_word |= ((not check_bit(dqch_word, 3)) << 17)\n # 18 DQXX[18] = DQCH[4]\n dqxx_word |= ((not check_bit(dqch_word, 4)) << 18)\n # 19 check for maxed-out threshold ( value of 255 )\n threshold = (dqch_word & 0xff0000) >> 16\n if not (threshold == 255):\n dqxx_word |= (1 << 19)\n # 20 DQXX[20] = DQCH[6]\n dqxx_word |= ((not check_bit(dqch_word, 6)) << 20)\n # 21 DQXX[21] = DQCH[7]\n dqxx_word |= ((not check_bit(dqch_word, 7)) << 21)\n # 22 DQXX[22] = DQCH[8]\n dqxx_word |= ((not check_bit(dqch_word, 8)) << 22)\n # 24 DQXX[24] = DQCR[2]\n dqxx_word |= ((not check_bit(dqcr_word, 2)) << 24)\n # 26 DQXX[26] = DQCR[12+db]\n dqxx_word |= ((not check_bit(dqcr_word, (12 + db))) << 26)\n # 27 DQXX[27] = DQCH[1]\n dqxx_word |= ((not check_bit(dqch_word, 1)) << 27)\n # 28 DQXX[28] = DQCH[0]\n dqxx_word |= ((not check_bit(dqch_word, 0)) << 28)\n # 29 DQXX[29] = DQCH[5]\n dqxx_word |= ((not check_bit(dqch_word, 5)) << 29)\n # 30 DQXX[30] = DQCH[9]\n dqxx_word |= ((not check_bit(dqch_word, 9)) << 30)\n dqxx[lcn] = dqxx_word\n return dqxx", "def _cim_quality():\n return {\n 'type' : 'class',\n 'name' : 'cim_quality',\n 'base' : None,\n 'is_abstract' : False,\n 'is_entity' : True,\n 'doc' : 'The starting point for a quality record. It can contain any number of issues and reports. An issue is an open-ended description of some issue about a CIM instance. A record is a prescribed description of some specific quantitative measure that has been applied to a CIM instance.',\n 'properties' : [\n ('meta', 'shared.doc_meta_info', '1.1', None),\n ('reports', 'quality.report', '0.N', None),\n ],\n 'decodings' : [\n ('meta', 'self::cim:cIM_Quality'),\n ('reports', 'child::cim:report'),\n ]\n }", "def create_ticker_channel(self, symbol: str) -> str:", "def from_wram(cry):\n raise NotImplementedError", "def create_from(cls, backend):\n backend_config = backend.configuration()\n\n # TODO : Remove usage of config.defaults when backend.defaults() is updated.\n try:\n backend_default = backend.defaults()\n buffer = backend_default.buffer\n except ModelValidationError:\n try:\n buffer = backend_config.defaults.get('buffer', 0)\n except AttributeError:\n buffer = 0\n\n # system size\n n_qubits = backend_config.n_qubits\n n_registers = backend_config.n_registers\n n_uchannels = backend_config.n_uchannels\n\n # generate channels with assuming their numberings are aligned with qubits\n drives = [DriveChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n measures = [MeasureChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n controls = [ControlChannel(i, buffer=buffer) for i in range(n_uchannels)]\n\n acquires = [AcquireChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n qubits = []\n for i in range(n_qubits):\n # TODO: get qubits <-> channels relationship from backend\n qubit = Qubit(i, drives[i], measures[i], acquires[i],\n control_channels=[] if not controls else controls)\n qubits.append(qubit)\n\n registers = [RegisterSlot(i) for i in range(n_registers)]\n # TODO: get #mem_slots from backend\n mem_slots = [MemorySlot(i) for i in range(len(qubits))]\n\n return DeviceSpecification(qubits, registers, mem_slots)", "def convert_to(self, domain):\n if domain == self.domain:\n return self.copy()\n elif domain == QQ and self.domain == ZZ:\n return self._new(flint.fmpq_mat(self.rep), self.shape, domain)\n elif domain == ZZ and self.domain == QQ:\n # XXX: python-flint has no fmpz_mat.from_fmpq_mat\n return self.to_ddm().convert_to(domain).to_dfm()\n else:\n # It is the callers responsibility to convert to DDM before calling\n # this method if the domain is not supported by DFM.\n raise NotImplementedError(\"Only ZZ and QQ are supported by DFM\")", "def convert_dds_to_qiskit_quantum_circuit(\n dynamic_decoupling_sequence,\n target_qubits=None,\n gate_time=0.1,\n add_measurement=True,\n algorithm=INSTANT_UNITARY,\n quantum_registers=None,\n circuit_name=None):\n\n if dynamic_decoupling_sequence is None:\n raise ArgumentsValueError('No dynamic decoupling sequence provided.',\n {'dynamic_decoupling_sequence': dynamic_decoupling_sequence})\n\n if not isinstance(dynamic_decoupling_sequence, DynamicDecouplingSequence):\n raise ArgumentsValueError('Dynamical decoupling sequence is not recognized.'\n 'Expected DynamicDecouplingSequence instance',\n {'type(dynamic_decoupling_sequence)':\n type(dynamic_decoupling_sequence)})\n\n if target_qubits is None:\n target_qubits = [0]\n\n if gate_time <= 0:\n raise ArgumentsValueError(\n 'Time delay of identity gate must be greater than zero.',\n {'gate_time': gate_time})\n\n if np.any(target_qubits) < 0:\n raise ArgumentsValueError(\n 'Every target qubits index must be positive.',\n {'target_qubits': target_qubits})\n\n if algorithm not in [FIX_DURATION_UNITARY, INSTANT_UNITARY]:\n raise ArgumentsValueError('Algorithm must be one of {} or {}'.format(\n INSTANT_UNITARY, FIX_DURATION_UNITARY), {'algorithm': algorithm})\n\n if quantum_registers is not None:\n if (max(target_qubits)+1) > len(quantum_registers):\n raise ArgumentsValueError('Target qubit is not present in quantum_registers',\n {'target_qubits': target_qubits,\n 'size(quantum_registers)': len(quantum_registers)},\n extras={'max(target_qubits)': max(target_qubits)})\n quantum_registers = quantum_registers\n else:\n quantum_registers = QuantumRegister(max(target_qubits)+1)\n\n classical_registers = None\n if add_measurement:\n classical_registers = ClassicalRegister(len(target_qubits))\n quantum_circuit = QuantumCircuit(quantum_registers, classical_registers)\n else:\n quantum_circuit = QuantumCircuit(quantum_registers)\n\n if circuit_name is not None:\n quantum_circuit.name = circuit_name\n\n unitary_time = 0.\n if algorithm == FIX_DURATION_UNITARY:\n unitary_time = gate_time\n\n rabi_rotations = dynamic_decoupling_sequence.rabi_rotations\n azimuthal_angles = dynamic_decoupling_sequence.azimuthal_angles\n detuning_rotations = dynamic_decoupling_sequence.detuning_rotations\n\n if len(rabi_rotations.shape) == 1:\n rabi_rotations = rabi_rotations[np.newaxis, :]\n if len(azimuthal_angles.shape) == 1:\n azimuthal_angles = azimuthal_angles[np.newaxis, :]\n if len(detuning_rotations.shape) == 1:\n detuning_rotations = detuning_rotations[np.newaxis, :]\n\n operations = np.vstack((rabi_rotations, azimuthal_angles, detuning_rotations))\n offsets = dynamic_decoupling_sequence.offsets\n\n time_covered = 0\n for operation_idx in range(operations.shape[1]):\n\n offset_distance = offsets[operation_idx] - time_covered\n\n if np.isclose(offset_distance, 0.0):\n offset_distance = 0.0\n\n if offset_distance < 0:\n raise ArgumentsValueError(\"Offsets cannot be placed properly\",\n {'sequence_operations': operations})\n\n if offset_distance > 0:\n while (time_covered+gate_time) <= offsets[operation_idx]:\n for qubit in target_qubits:\n quantum_circuit.iden(quantum_registers[qubit]) # pylint: disable=no-member\n quantum_circuit.barrier(quantum_registers[qubit]) # pylint: disable=no-member\n time_covered += gate_time\n\n rabi_rotation = operations[0, operation_idx]\n azimuthal_angle = operations[1, operation_idx]\n x_rotation = rabi_rotation * np.cos(azimuthal_angle)\n y_rotation = rabi_rotation * np.sin(azimuthal_angle)\n z_rotation = operations[2, operation_idx]\n\n rotations = np.array([x_rotation, y_rotation, z_rotation])\n zero_pulses = np.isclose(rotations, 0.0).astype(np.int)\n nonzero_pulse_counts = 3 - np.sum(zero_pulses)\n if nonzero_pulse_counts > 1:\n raise ArgumentsValueError(\n 'Open Controls support a sequence with one '\n 'valid pulse at any offset. Found sequence '\n 'with multiple rotation operations at an offset.',\n {'dynamic_decoupling_sequence': str(dynamic_decoupling_sequence),\n 'offset': dynamic_decoupling_sequence.offsets[operation_idx],\n 'rabi_rotation': dynamic_decoupling_sequence.rabi_rotations[\n operation_idx],\n 'azimuthal_angle': dynamic_decoupling_sequence.azimuthal_angles[\n operation_idx],\n 'detuning_rotaion': dynamic_decoupling_sequence.detuning_rotations[\n operation_idx]}\n )\n\n for qubit in target_qubits:\n if nonzero_pulse_counts == 0:\n quantum_circuit.u3(\n 0., 0., 0., # pylint: disable=no-member\n quantum_registers[qubit])\n else:\n if not np.isclose(rotations[0], 0.0):\n quantum_circuit.u3(\n rotations[0], -pi / 2, pi / 2, # pylint: disable=no-member\n quantum_registers[qubit])\n elif not np.isclose(rotations[1], 0.0):\n quantum_circuit.u3(\n rotations[1], 0., 0., # pylint: disable=no-member\n quantum_registers[qubit])\n elif not np.isclose(rotations[2], 0.):\n quantum_circuit.u1(\n rotations[2], # pylint: disable=no-member\n quantum_registers[qubit])\n quantum_circuit.barrier(quantum_registers[qubit]) # pylint: disable=no-member\n\n if np.isclose(np.sum(rotations), 0.0):\n time_covered = offsets[operation_idx]\n else:\n time_covered = offsets[operation_idx] + unitary_time\n\n if add_measurement:\n for q_index, qubit in enumerate(target_qubits):\n quantum_circuit.measure(quantum_registers[qubit], #pylint: disable=no-member\n classical_registers[q_index])\n\n return quantum_circuit", "def set_qs_type(self: Qs, qs_type: str = \"\", rows: int = 0, columns: int = 0, copy: bool = True) -> Qs:\n\n # Checks.\n if rows and columns and rows * columns != self.dim:\n raise ValueError(\n f\"Oops, check those values again for rows:{rows} columns:{columns} dim:{self.dim}\"\n )\n\n new_q = self\n\n if copy:\n new_q = deepcopy(self)\n\n # Assign values if need be.\n if new_q.qs_type != qs_type:\n new_q.rows = 0\n\n if qs_type == \"ket\" and not new_q.rows:\n new_q.rows = new_q.dim\n new_q.columns = 1\n\n elif qs_type == \"bra\" and not new_q.rows:\n new_q.rows = 1\n new_q.columns = new_q.dim\n\n elif qs_type in [\"op\", \"operator\"] and not new_q.rows:\n # Square series\n root_dim = math.sqrt(new_q.dim)\n\n if root_dim.is_integer():\n new_q.rows = int(root_dim)\n new_q.columns = int(root_dim)\n qs_type = \"op\"\n\n elif rows * columns == new_q.dim and not new_q.qs_type:\n if new_q.dim == 1:\n qs_type = \"scalar_q\"\n elif new_q.rows == 1:\n qs_type = \"bra\"\n elif new_q.columns == 1:\n qs_type = \"ket\"\n else:\n qs_type = \"op\"\n\n if not qs_type:\n raise Exception(\n \"Oops, please set rows and columns for this quaternion series operator. Thanks.\"\n )\n\n if new_q.dim == 1:\n qs_type = \"scalar_q\"\n\n new_q.qs_type = qs_type\n\n return new_q", "def make_eq(self, eq):\n for bands in eq:\n eq = system.create_dsp_by_type(FMOD_DSP_TYPE_PARAMEQ)\n eq.set_param(0, bands[0]) # centre\n eq.set_param(1, 1.0) # octaves\n eq.set_param(2, bands[1]) # gain \n return eq", "def test_creation_from_choi_operator():\n # Get krauss operators from dephrasure channel\n krauss_ops = set_up_dephrasure_conditions(0.1, 0.2)\n\n # Construct choi matrix from krauss operators\n choi_matrix = sum([np.outer(np.ravel(x, order=\"F\"),\n np.conj(np.ravel(x, order=\"F\"))) for x in krauss_ops])\n numb_qubits, dim_in, dim_out = [1, 1], 2, 3\n choi_obj = ChoiQutip(choi_matrix, numb_qubits, dim_in, dim_out)\n\n # Check if the two constructed krauss operators are the same.\n assert check_two_sets_of_krauss_are_same(krauss_ops, choi_obj.kraus_operators(), numb_qubits,\n dim_in, dim_out)\n\n # Test dimensions must match the choi matrix specified.\n assert_raises(ValueError, ChoiQutip, choi_matrix, numb_qubits, 3, 3)\n assert_raises(ValueError, ChoiQutip, choi_matrix, numb_qubits, 2, 2)\n assert_raises(ValueError, ChoiQutip, choi_matrix, [1, 2], 2, 3)", "def channel(self):\n return Channel({'id': self.channel_id, 'connection': self.connection})", "def fromDCM(cls,C):\n gamma=np.trace(C)\n w2=(1+gamma)/4.\n Ckk=np.diag(C)\n q2=(1+2*Ckk-gamma)/4.\n q2=np.array([q2[0],q2[1],q2[2],w2])\n\n max_index = np.argmax(q2)\n q=np.zeros(4)\n q[max_index] = np.sqrt(q2[max_index])\n d = 4.*q[max_index]\n C11,C12,C13,C21,C22,C23,C31,C32,C33 = C.flatten()\n if max_index==3:\n q[0] = (C23-C32)/d\n q[1] = (C31-C13)/d\n q[2] = (C12-C21)/d\n elif max_index==0:\n q[3] = (C23-C32)/d\n q[1] = (C12+C21)/d\n q[2] = (C31+C13)/d\n elif max_index==1:\n q[3] = (C31-C13)/d\n q[0] = (C12+C21)/d\n q[2] = (C23+C32)/d\n elif max_index==2:\n q[3] = (C12-C21)/d\n q[0] = (C31+C13)/d\n q[1] = (C23+C32)/d\n quat= Quat(q,order=\"xyzw\")\n quat.normalize()\n return quat", "def get_channel_info(self):\n items = [('channel_number', int),\n ('range', float),\n ('sampling_rate', float),\n ('digitisation', float),\n ('offset', float),\n ]\n\n attrs = self['/UniqueGlobalKey/channel_id'].attrs\n info = {key: converter(attrs[key]) for key, converter in items}\n new_names = [('range','channel_range'),\n ('sampling_rate', 'channel_sampling_rate'),\n ('digitisation', 'channel_digitisation'),\n ('offset', 'channel_offset'),\n ]\n for old, new in new_names:\n info[new] = info[old]\n del info[old]\n return info", "def __init__(self, parent): \n self.signal_group_name = _qstring(parent.rhd)\n self.signal_group_header = _qstring(parent.rhd)\n self.signal_group_enabled = (np.int16(struct.unpack('h', parent.rhd.read(2)))[0] == 1)\n self.number_of_channels = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.number_of_amplifier_channels = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n \n self.channels = {}\n #if there are channels:\n if self.signal_group_enabled and self.number_of_channels != 0: \n for i in range(self.number_of_channels):\n c = Channel(parent)\n self.channels[c.native_channel_name] = c", "def convert_question(self, q):\n\n item = {}\n item['id'] = q['id']\n item['title'] = q['title']\n item['body'] = q['text']\n item['author_id'] = q['author']['id']\n item['author'] = q['author']['username']\n item['url'] = q['url']\n item['score'] = q['score']\n item['score_label'] = self.convert_count(q['score'])\n item['answer_count'] = q['answer_count']\n item['answer_count_label'] = self.convert_count(q['answer_count'])\n item['view_count'] = q['view_count']\n item['view_count_label'] = self.convert_count(q['view_count'])\n item['added_at'] = q['added_at']\n item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['last_activity'] = q['last_activity_at']\n item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['has_more_comments'] = False\n item['has_more_answers'] = False\n item['has_accepted_answer'] = q['has_accepted_answer']\n item['closed'] = q['closed']\n\n item['tags'] = []\n for tag in q['tags']:\n item['tags'].append({'name': tag})\n\n return item", "def get_mq(a_fm, description, quark_alias):\n quark = conventions.quark_masses\n mask = utils.bundle_mask(quark, a_fm=a_fm, description=description, alias=quark_alias)\n return utils.extract_unique(quark[mask], 'mq')", "def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat", "def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)", "def from_float(cls, mod, qconfig=None):\n assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \\\n cls._FLOAT_MODULE.__name__\n if not qconfig:\n assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n assert mod.qconfig, 'Input float module must have a valid qconfig'\n qconfig = mod.qconfig\n conv, bn = mod[0], mod[1]\n qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size,\n conv.stride, conv.padding, conv.dilation,\n conv.groups, conv.bias is not None,\n conv.padding_mode,\n bn.eps, bn.momentum,\n False,\n qconfig)\n qat_convbn.weight = conv.weight\n qat_convbn.bias = conv.bias\n qat_convbn.gamma = bn.weight\n qat_convbn.beta = bn.bias\n qat_convbn.running_mean = bn.running_mean\n qat_convbn.running_var = bn.running_var\n qat_convbn.num_batches_tracked = bn.num_batches_tracked\n return qat_convbn", "def _gen_chamnet_v1(channel_multiplier, num_classes=1000, **kwargs):\n arch_def = [\n ['ir_r1_k3_s1_e1_c24'],\n ['ir_r2_k7_s2_e4_c48'],\n ['ir_r5_k3_s2_e7_c64'],\n ['ir_r7_k5_s2_e12_c56'],\n ['ir_r5_k3_s1_e8_c88'],\n ['ir_r4_k3_s2_e7_c152'],\n ['ir_r1_k3_s1_e10_c104'],\n ]\n model = GenEfficientNet(\n _decode_arch_def(arch_def),\n num_classes=num_classes,\n stem_size=32,\n num_features=1280, # no idea what this is? try mobile/mnasnet default?\n channel_multiplier=channel_multiplier,\n bn_args=_resolve_bn_args(kwargs),\n **kwargs\n )\n return model", "def __init__(self, qubit, bit, circuit=None):\n super().__init__(\"measure\", [], [qubit], [bit], circuit)", "def quat2dcm(quat):\n q = asarray(quat).flatten()\n\n a, b, c, d = q[0], q[1], q[2], q[3]\n c11 = a * a + b * b - c * c - d * d\n c12 = 2 * (b * c - a * d)\n c13 = 2 * (b * d + a * c)\n c21 = 2 * (b * c + a * d)\n c22 = a * a - b * b + c * c - d * d\n\n c23 = 2 * (c * d - a * b)\n c31 = 2 * (b * d - a * c)\n c32 = 2 * (c * d + a * b)\n c33 = a * a - b * b - c * c + d * d\n\n return array([[c11, c12, c13],\n [c21, c22, c23],\n [c31, c32, c33]])", "def __init__(self, quant_arc_interface):\n\n super().__init__()\n self.args = quant_arc_interface.args\n self.q_params = nn.Parameter(self.args.q_delta * torch.randn(self.args.q_depth * quant_arc_interface.second_qubits))\n self.qai = quant_arc_interface", "def from_q(self, q: np.ndarray) -> np.ndarray:\n return self.from_quaternion(self, q)", "def construct_circuit(self, x, qr=None, inverse=False):\n if not isinstance(x, np.ndarray):\n raise TypeError(\"x must be numpy array.\")\n if x.ndim != 1:\n raise ValueError(\"x must be 1-D array.\")\n if x.shape[0] != self._num_qubits:\n raise ValueError(\"number of qubits and data dimension must be the same.\")\n if qr is None:\n qr = QuantumRegister(self._num_qubits, name='q')\n qc = self._constructor_function(x, qr, inverse, *self._feature_param)\n #qc.draw(output='mpl', filename='C:/Users/RaphaelLambert/Pictures/custom.png')\n return qc", "def __init__(self, channel_id=None, name=None, owner_id=None, comparison_key=None, has_password=None, motd=None, allowed=None, operators=None, blocked=None, muted=None): # noqa: E501 # noqa: E501\n\n self._channel_id = None\n self._name = None\n self._owner_id = None\n self._comparison_key = None\n self._has_password = None\n self._motd = None\n self._allowed = None\n self._operators = None\n self._blocked = None\n self._muted = None\n self.discriminator = None\n\n self.channel_id = channel_id\n self.name = name\n self.owner_id = owner_id\n self.comparison_key = comparison_key\n self.has_password = has_password\n self.motd = motd\n self.allowed = allowed\n self.operators = operators\n self.blocked = blocked\n self.muted = muted", "def __init__(self, quant_arc_interface):\n\n super().__init__()\n self.args = quant_arc_interface.args \n self.q_params = nn.Parameter(self.args.q_delta * torch.randn(self.args.q_depth * self.args.n_qubits))\n self.qai = quant_arc_interface", "def __init__(self, qubit, circ=None):\n super().__init__(\"s\", [], [qubit], circ)", "def test_to_qcschema(self):\n # the molecule has no coordinates so this should fail\n ethanol = Molecule.from_smiles(\"CCO\")\n with pytest.raises(InvalidConformerError):\n qcschema = ethanol.to_qcschema()\n\n # now remake the molecule from the sdf\n ethanol = Molecule.from_file(get_data_file_path(\"molecules/ethanol.sdf\"))\n # make sure that requests to missing conformers are caught\n with pytest.raises(InvalidConformerError):\n qcschema = ethanol.to_qcschema(conformer=1)\n # now make a valid qcschema and check its properties\n qcschema = ethanol.to_qcschema(extras={\"test_tag\": \"test\"})\n # make sure the properties match\n charge = 0\n connectivity = [\n (0, 1, 1.0),\n (0, 4, 1.0),\n (0, 5, 1.0),\n (0, 6, 1.0),\n (1, 2, 1.0),\n (1, 7, 1.0),\n (1, 8, 1.0),\n (2, 3, 1.0),\n ]\n symbols = [\"C\", \"C\", \"O\", \"H\", \"H\", \"H\", \"H\", \"H\", \"H\"]\n\n def assert_check():\n assert charge == qcschema.molecular_charge\n assert connectivity == qcschema.connectivity\n assert symbols == qcschema.symbols.tolist()\n assert (\n qcschema.geometry.all() == ethanol.conformers[0].m_as(unit.bohr).all()\n )\n\n assert_check()\n assert qcschema.extras[\"test_tag\"] == \"test\"\n assert qcschema.extras[\n \"canonical_isomeric_explicit_hydrogen_mapped_smiles\"\n ] == ethanol.to_smiles(mapped=True)\n # # now run again with no extras passed, only cmiles entry will be present with fix-720\n qcschema = ethanol.to_qcschema()\n assert_check()\n assert qcschema.extras[\n \"canonical_isomeric_explicit_hydrogen_mapped_smiles\"\n ] == ethanol.to_smiles(mapped=True)", "def mobilenetv2_q(pretrained=False, **kwargs):\n model = MobileNetV2Q(**kwargs)\n if pretrained:\n load_fake_quantized_state_dict(model, model_zoo.load_url(model_urls['mobilenetv2'], map_location='cpu'),\n 'mobilenetv2_q_map.json')\n return model", "def makeCaseQNs(G):\n case = G('MoleculeQnCase')\n if not case: return ''\n\n ElecStateLabel = G(\"MoleculeQNElecStateLabel\")\n elecInv = G(\"MoleculeQNelecInv\")\n elecRefl = G(\"MoleculeQNelecRefl\")\n vi = G(\"MoleculeQNvi\")\n viMode = G(\"MoleculeQNviMode\")\n vibInv = G(\"MoleculeQNvibInv\")\n vibSym = G(\"MoleculeQNvibSym\")\n vibSymGroup = G(\"MoleculeQNvibSymGroup\")\n J = G(\"MoleculeQNJ\")\n Ka = G(\"MoleculeQNKa\")\n Kc = G(\"MoleculeQNKc\")\n rotSym = G(\"MoleculeQNrotSym\")\n rotSymGroup = G(\"MoleculeQNrotSymGroup\")\n I = G(\"MoleculeQNI\")\n InuclSpin = G(\"MoleculeQNInuclSpin\")\n Fj = G(\"MoleculeQNFj\")\n Fjj = G(\"MoleculeQNFjj\")\n FjnuclSpin = G(\"MoleculeQNFjnuclSpin\")\n F = G(\"MoleculeQNF\")\n FnuclSpin = G(\"MoleculeQNFnuclSpin\")\n r = G(\"MoleculeQNr\")\n rName = G(\"MoleculeQNrName\")\n parity = G(\"MoleculeQNparity\")\n S = G(\"MoleculeQNS\")\n N = G(\"MoleculeQNN\")\n v = G(\"MoleculeQNv\")\n F1 = G(\"MoleculeQNF1\")\n F1nuclSpin = G(\"MoleculeQNF1nuclSpin\")\n asSym = G(\"MoleculeQNasSym\")\n Lambda = G(\"MoleculeQNLambda\")\n Sigma = G(\"MoleculeQNSigma\")\n Omega = G(\"MoleculeQNOmega\")\n kronigParity = G(\"MoleculeQNkronigParity\")\n SpinComponentLabel = G(\"MoleculeQNSpinComponentLabel\")\n li = G(\"MoleculeQNli\")\n liMode = G(\"MoleculeQNliMode\")\n l = G(\"MoleculeQNl\")\n vibRefl = G(\"MoleculeQNvibRefl\")\n v1 = G(\"MoleculeQNv1\")\n v2 = G(\"MoleculeQNv2\")\n v3 = G(\"MoleculeQNv3\")\n l2 = G(\"MoleculeQNl2\")\n F2 = G(\"MoleculeQNF2\")\n F2nuclSpin = G(\"MoleculeQNF2nuclSpin\")\n K = G(\"MoleculeQNK\")\n\n result = '<Case xsi:type=\"case:Case\" caseID=\"%s\" xmlns:case=\"http://vamdc.org/xml/xsams/%s/cases/%s\">' % (case, XSAMS_VERSION, case)\n result += '<case:QNs>'\n if ElecStateLabel: result += '<case:ElecStateLabel>%s</case:ElecStateLabel>'%ElecStateLabel\n if elecInv: result += '<case:elecInv>%s</case:elecInv>'%elecInv\n if elecRefl: result += '<case:elecRefl>%s</case:elecRefl>'%elecRefl\n if Lambda: result += '<case:Lambda>%s</case:Lambda>'%Lambda\n if Sigma: result += '<case:Sigma>%s</case:Sigma>'%Sigma\n if Omega: result += '<case:Omega>%s</case:Omega>'%Omega\n if S: result += '<case:S>%s</case:S>'%S\n if v: result += '<case:v>%s</case:v>'%v\n if v1: result += '<case:v1>%s</case:v1>'%v1\n if v2: result += '<case:v2>%s</case:v2>'%v2\n if l2: result += '<case:l2>%s</case:l2>'%l2\n if v3: result += '<case:v3>%s</case:v3>'%v3\n if vi:\n for val,i in enumerate(makeiter(vi)):\n result += '<case:vi mode=\"%s\">%s</case:vi>'%(makeiter(viMode)[i],val)\n if li:\n for val,i in enumerate(makeiter(li)):\n result += '<case:vi mode=\"%s\">%s</case:vi>'%(makeiter(liMode)[i],val)\n if l: result += '<case:l>%s</case:l>'%l\n if vibInv: result += '<case:vibInv>%s</case:vibInv>'%vibInv\n if vibRefl: result += '<case:vibRefl>%s</case:vibRefl>'%vibRefl\n if vibSym:\n if vibSymGroup: result += '<case:vibSym group=\"%s\">%s</case:vibSym>'%(vibSymGroup,vibSym)\n else: result += '<case:vibSym>%s</case:vibSym>'%vibSym\n if J: result += '<case:J>%s</case:J>'%J\n if K: result += '<case:K>%s</case:K>'%K\n if Ka: result += '<case:Ka>%s</case:Ka>'%Ka\n if Kc: result += '<case:Kc>%s</case:Kc>'%Kc\n if rotSym:\n if rotSymGroup: result += '<case:rotSym group=\"%s\">%s</case:rotSym>'%(rotSymGroup,rotSym)\n else: result += '<case:rotSym>%s</case:rotSym>'%rotSym\n if I: result += '<case:I nuclearSpinRef=\"%s\">%s</case:I>'%(InuclSpin,I)\n if Fj:\n for val,i in enumerate(makeiter(Fj)):\n result += '<case:Fj j=\"%s\" nuclearSpinRef=\"%s\">%s</case:Fj>'%(makeiter(Fjj)[i],makeiter(FjnuclSpin)[i],val)\n if N: result += '<case:N>%s</case:N>'%N\n if SpinComponentLabel: result += '<case:SpinComponentLabel>%s</case:SpinComponentLabel>'%SpinComponentLabel\n if F1: result += '<case:F1 nuclearSpinRef=\"%s\">%s</case:F1>'%(F1nuclSpin,F1)\n if F2: result += '<case:F2 nuclearSpinRef=\"%s\">%s</case:F2>'%(F2nuclSpin,F2)\n if F: result += '<case:F nuclearSpinRef=\"%s\">%s</case:F>'%(FnuclSpin,F)\n if r:\n for val,i in enumerate(makeiter(r)):\n result += '<case:r name=\"%s\">%s</case:r>'%(makeiter(rName)[i],val)\n if parity: result += '<case:parity>%s</case:parity>'%parity\n if kronigParity: result += '<case:kronigParity>%s</case:kronigParity>'%kronigParity\n if asSym: result += '<case:asSym>%s</case:asSym>'%asSym\n\n result += '</case:QNs>'\n return result+'</Case>'", "def quasar_to_dict(q: Circuit) -> Dict:\n return dict(\n instructions=quasar_to_list(q),\n qubits=list(q.qubits),\n times=list(q.times),\n times_and_qubits=list(q.times_and_qubits),\n )", "def part(self, channel):\n raise NotImplementedError", "def create_book_ticker_channel(self, symbol: str) -> str:", "def __repr__(self) -> str:\n return (\n \"<TunnellingRequest \"\n f'communication_channel_id=\"{self.communication_channel_id}\" '\n f'sequence_counter=\"{self.sequence_counter}\" '\n f'cemi=\"{self.raw_cemi.hex()}\" />'\n )", "def scopechannels(tree, scope_chan):\n scope_chnum = scope_chan + 1\n chanpath = '.TEK_2024B.TEK.CHANNEL_' + str(scope_chnum)\n datapath = chanpath + '.DATA'\n settingpath = chanpath + '.SETTINGS'\n tree.addNode(chanpath)\n AddNodeWithTag(tree, chanpath + ':STATE', 'TEXT', 'STATE_TEKCH' +\n str(scope_chnum))\n AddNodeWithTag(tree, chanpath + ':DATA', 'SIGNAL', 'DATA_TEKCH' +\n str(scope_chnum))\n tree.addNode(settingpath)\n AddNumericWithUnit(tree, datapath + ':TIME', 'TIMEVALUES_TEKCH' +\n str(scope_chnum), 's')\n AddNumericWithUnit(tree, datapath + ':VOLTAGE', 'VOLTAGEVALUES_TEKCH' +\n str(scope_chnum), 'V')\n AddNodeWithTag(tree, settingpath + ':CHANNEL_NAME', 'TEXT',\n 'CHANNEL_NAME_TEKCH' + str(scope_chnum))\n AddNodeWithTag(tree, settingpath + ':GROUND_STATE', 'TEXT',\n 'GROUND_STATE_TEKCH' + str(scope_chnum))\n AddNodeWithTag(tree, settingpath + ':N_SAMPLES', 'NUMERIC',\n 'NSAMPLES_TEKCH' + str(scope_chnum))\n AddNumericWithUnit(tree, settingpath + ':DELTA_T', 'DT_TEKCH' +\n str(scope_chnum), 's')\n AddNodeWithTag(tree, settingpath + ':N_PRE_SAMP', 'NUMERIC',\n 'NPRESAMP_TEKCH' + str(scope_chnum))\n AddNumericWithUnit(tree, settingpath + ':TIME_DIV', 'TIMEPERDIV_TEKCH' +\n str(scope_chnum), 's/div')\n AddNumericWithUnit(tree, settingpath + ':VOLT_DIV', 'VOLTSPERDIV_TEKCH' +\n str(scope_chnum), 'V/div')\n AddNodeWithTag(tree, settingpath + ':PROBE_ATTEN', 'NUMERIC',\n 'PROBEATTENUATION_TEKCH' + str(scope_chnum))", "def quantum_geometric_tensor(self, qgt_type):\n raise NotImplementedError # pragma: no cover", "def ket(self: Qs) -> Qs:\n\n if self.qs_type == \"ket\":\n return self\n\n ket = conjs(deepcopy(self))\n ket.rows = self.dim\n ket.columns = 1\n\n ket.qs_type = \"ket\" if self.dim > 1 else \"scalar_q\"\n\n return ket", "def _gen_mixnet_s(channel_multiplier=1.0, num_classes=1000, **kwargs):\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c16'], # relu\n # stage 1, 112x112 in\n ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu\n # stage 2, 56x56 in\n ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish\n # stage 3, 28x28 in\n ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish\n # stage 4, 14x14in\n ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish\n # stage 5, 14x14in\n ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish\n # 7x7\n ]\n model = GenEfficientNet(\n _decode_arch_def(arch_def),\n num_classes=num_classes,\n stem_size=16,\n num_features=1536,\n channel_multiplier=channel_multiplier,\n bn_args=_resolve_bn_args(kwargs),\n act_fn=F.relu,\n **kwargs\n )\n return model", "def channel_list(self):\n return_str = self.scpi.query_channel_catalog().split(',')\n channel_dct = {}\n for i in range(int(len(return_str)/2)):\n channel_dct[int(return_str[2 * i])] = return_str[2 * i + 1]\n return channel_dct", "def _convert_q2w(self):\n q_kg = self.data['Specific_Humidity'] / 1000\n\n self.data['Mixing_Ratio'] = (q_kg / (1 - q_kg)) * 1000", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def __init__(self, in_ch=2048, out_ch=256):\n super(ChannelCompress, self).__init__()\n num_bottleneck = 1000\n add_block = []\n add_block += [nn.Linear(in_ch, num_bottleneck)]\n add_block += [nn.BatchNorm1d(num_bottleneck)]\n add_block += [nn.ReLU()]\n\n add_block += [nn.Linear(num_bottleneck, 500)]\n add_block += [nn.BatchNorm1d(500)]\n add_block += [nn.ReLU()]\n add_block += [nn.Linear(500, out_ch)]\n\n # Extra BN layer, need to be removed\n #add_block += [nn.BatchNorm1d(out_ch)]\n\n add_block = nn.Sequential(*add_block)\n add_block.apply(weights_init_kaiming)\n self.model = add_block", "def QCNN_layer_gen(self):\n pixels = self.filter_size**2\n # filter size: 2^n only for this version!\n if np.log2(pixels) % 1 != 0:\n raise NotImplementedError(\"filter size: 2^n only available\")\n cirq_qubits = cirq.GridQubit.rect(self.filter_size, self.filter_size)\n # mapping input data to circuit\n input_circuit = cirq.Circuit()\n input_params = [sympy.symbols('a%d' %i) for i in range(pixels)]\n for i, qubit in enumerate(cirq_qubits):\n input_circuit.append(cirq.rx(np.pi*input_params[i])(qubit))\n # apply learnable gate set to QCNN circuit\n QCNN_circuit = cirq.Circuit()\n step_size = [2**i for i in range(np.log2(pixels).astype(np.int32))]\n for step in step_size:\n for target in range(0, pixels, 2*step):\n QCNN_circuit.append(self._QConv(step, target, cirq_qubits))\n # merge the circuits\n full_circuit = cirq.Circuit()\n full_circuit.append(input_circuit)\n full_circuit.append(QCNN_circuit)\n self.circuit = full_circuit # save circuit to the QCNN layer obj.\n self.params = input_params + self.learning_params\n self.op = cirq.Z(cirq_qubits[0])", "def ADM_QCD2(nf):\n # Mixing of Q_1^(7) into Q_{5,q}^(7) and Q_2^(7) into Q_{6,q}^(7), from Hill et al. [1409.8290]. Note that we have different prefactors and signs. \n gamma_gq = -32/3\n # Mixing of Q_3^(7) into Q_{7,q}^(7) and Q_4^(7) into Q_{8,q}^(7), from Hill et al. [1409.8290]. Note that we have different prefactors and signs. \n gamma_5gq = 8\n gamma_QCD2_gq = np.array([5*[gamma_gq]])\n gamma_QCD2_5gq = np.array([5*[gamma_5gq]])\n gamma_QCD2_1 = np.zeros((34,154))\n gamma_QCD2_2 = np.hstack((np.zeros((1,38)),gamma_QCD2_gq,np.zeros((1,111))))\n gamma_QCD2_3 = np.hstack((np.zeros((1,46)),gamma_QCD2_gq,np.zeros((1,103))))\n gamma_QCD2_4 = np.hstack((np.zeros((1,54)),gamma_QCD2_5gq,np.zeros((1,95))))\n gamma_QCD2_5 = np.hstack((np.zeros((1,62)),gamma_QCD2_5gq,np.zeros((1,87))))\n gamma_QCD2_6 = np.zeros((116,154))\n gamma_QCD2 = [np.vstack((gamma_QCD2_1, gamma_QCD2_2, gamma_QCD2_3, gamma_QCD2_4, gamma_QCD2_5, gamma_QCD2_6))]\n\n if nf == 5:\n return gamma_QCD2\n elif nf == 4:\n return np.delete(np.delete(gamma_QCD2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 1)\\\n , [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 2)\n elif nf == 3:\n return np.delete(np.delete(gamma_QCD2, [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 1)\\\n , [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 2)\n else:\n raise Exception(\"nf has to be 3, 4 or 5\")", "def __init__(self, parent): \n \n self.parent = parent\n \n self.custom_channel_name = _qstring(parent.rhd)\n self.native_channel_name = _qstring(parent.rhd)\n self.native_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.custom_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.signal_type = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.channel_enabled = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.chip_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.board_stream = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_trigger_mode= np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_threshold = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_trigger_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_edge_polarity = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.electrode_impedance_magnitude = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n self.electrode_impedance_phase = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n\n if self.signal_type == 0 and self.channel_enabled:#Add name to the amplifier channel list\n parent._AMPLIFIER_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 1 and self.channel_enabled:#Add name to the aux channel list\n parent._AUX_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 2 and self.channel_enabled:#Supply voltage\n parent._SUPPLY_VOLTAGE_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 3 and self.channel_enabled:#usb board adc input channel\n parent._ADC_INPUT_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 4 and self.channel_enabled:#usb board digital input channel\n parent._DIGITAL_INPUT_CHANNELS.append(self.native_channel_name)", "def line_to_metric(line):\n # convert line of csv to list\n # sigh, the first field is not quoted\n (date, time, fields) = line.split(' ', 2)\n metric = {'timestamp':\" \".join((date, time))}\n fields = fields.split(',')\n fields = [field.strip() for field in fields]\n # convert list of k=v to dict\n fields = [field.split('=') for field in fields]\n metric.update(dict(fields))\n # normalize keys, deserialize values, add derived values\n # would be more rigorous to have a seprate table for derived values\n metric['callerid'] = metric.pop('CALLERID(number)')\n metric['uniqueid'] = metric.pop('UNIQUEID')\n metric['channel'] = metric.pop('CHANNEL')\n # deserialize values\n metric['timestamp'] = datetime.datetime(\n *map(int, re.split('[^\\d]', metric['timestamp'])[:-1]))\n # split ext from eg SIP/668-000002f1 SIP/callcentric-default-000002f3\n (_proto, extension) = metric['channel'].split('/')\n extension = '-'.join(extension.split('-')[:-1])\n metric['channel_extension'] = extension\n return metric", "def create_circuit(self, do_measure=True):\n circuit_name = str(uuid.uuid4())\n circuit = self.quantum_program.create_circuit(circuit_name,\n [self.quantum_register],\n [self.classical_register])\n\n for j in range(self.depth):\n if self.qubits == 1:\n op_ind = 0\n else:\n op_ind = random.randint(0, 1)\n if op_ind == 0: # U3\n qind = random.randint(0, self.qubits - 1)\n circuit.u3(random.random(), random.random(), random.random(),\n self.quantum_register[qind])\n elif op_ind == 1: # CX\n source, target = random.sample(range(self.qubits), 2)\n circuit.cx(self.quantum_register[source],\n self.quantum_register[target])\n\n if do_measure:\n nmeasure = random.randint(1, self.qubits)\n for j in range(nmeasure):\n qind = random.randint(0, self.qubits - 1)\n # doing this if here keeps the RNG from depending on\n # whether measurements are done.\n circuit.measure(self.quantum_register[qind],\n self.classical_register[qind])\n\n return circuit.qasm()", "def to_cms_rhythm(channel):\n ret = []\n for i, x in enumerate(self.pitches[channel]):\n if x > 0:\n ret.append(x)\n else:\n ret.append(-1)\n return ret", "def __init__(self, frame_type, channel_number):\n self.frame_type = frame_type\n self.channel_number = channel_number", "def create_channel_dict(self, chan_name, clim=None, first_chan=False):\n if chan_name == 'Retardance':\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else 1000.0\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else 100.0\n elif chan_name == 'Orientation':\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else np.pi\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else np.pi\n\n elif chan_name == 'Phase3D':\n min = clim[2] if clim else -10.0\n max = clim[3] if clim else 10.0\n start = clim[0] if clim else -0.2\n end = clim[1] if clim else 0.2\n\n elif chan_name == 'BF':\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else 65535.0\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else 5.0\n\n elif chan_name == 'S0':\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else 65535.0\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else 1.0\n\n elif chan_name == 'S1':\n min = clim[2] if clim else 10.0\n max = clim[3] if clim else -10.0\n start = clim[0] if clim else -0.5\n end = clim[1] if clim else 0.5\n\n elif chan_name == 'S2':\n min = clim[2] if clim else -10.0\n max = clim[3] if clim else 10.0\n start = clim[0] if clim else -0.5\n end = clim[1] if clim else 0.5\n\n elif chan_name == 'S3':\n min = clim[2] if clim else -10\n max = clim[3] if clim else 10\n start = clim[0] if clim else -1.0\n end = clim[1] if clim else 1.0\n\n else:\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else 65535.0\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else 65535.0\n\n dict_ = {'active': first_chan,\n 'coefficient': 1.0,\n 'color': 'FFFFFF',\n 'family': 'linear',\n 'inverted': False,\n 'label': chan_name,\n 'window': {'end': end, 'max': max, 'min': min, 'start': start}\n }\n\n return dict_", "def q(self, q: ComType):\n if isinstance(q, complex):\n self._pwr = q\n else:\n self._pwr = complex(0, q)", "def _gen_chamnet_v2(channel_multiplier, num_classes=1000, **kwargs):\n arch_def = [\n ['ir_r1_k3_s1_e1_c24'],\n ['ir_r4_k5_s2_e8_c32'],\n ['ir_r6_k7_s2_e5_c48'],\n ['ir_r3_k5_s2_e9_c56'],\n ['ir_r6_k3_s1_e6_c56'],\n ['ir_r6_k3_s2_e2_c152'],\n ['ir_r1_k3_s1_e6_c112'],\n ]\n model = GenEfficientNet(\n _decode_arch_def(arch_def),\n num_classes=num_classes,\n stem_size=32,\n num_features=1280, # no idea what this is? try mobile/mnasnet default?\n channel_multiplier=channel_multiplier,\n bn_args=_resolve_bn_args(kwargs),\n **kwargs\n )\n return model", "def scopechannels1(tree, scope_chan):\n scope_chnum = scope_chan + 1\n chanpath = '.TEK_2024B.TEK1.CHANNEL_' + str(scope_chnum)\n datapath = chanpath + '.DATA'\n settingpath = chanpath + '.SETTINGS'\n tree.addNode(chanpath)\n AddNodeWithTag(tree, chanpath + ':STATE', 'TEXT', 'STATE_TEK1CH' +\n str(scope_chnum))\n AddNodeWithTag(tree, chanpath + ':DATA', 'SIGNAL', 'DATA_TEK1CH' +\n str(scope_chnum))\n tree.addNode(settingpath)\n AddNumericWithUnit(tree, datapath + ':TIME', 'TIMEVALUES_TEK1CH' +\n str(scope_chnum), 's')\n AddNumericWithUnit(tree, datapath + ':VOLTAGE', 'VOLTAGEVALUES_TEK1CH' +\n str(scope_chnum), 'V')\n AddNodeWithTag(tree, settingpath + ':CHANNEL_NAME', 'TEXT',\n 'CHANNEL_NAME_TEK1CH' + str(scope_chnum))\n AddNodeWithTag(tree, settingpath + ':GROUND_STATE', 'TEXT',\n 'GROUND_STATE_TEK1CH' + str(scope_chnum))\n AddNodeWithTag(tree, settingpath + ':N_SAMPLES', 'NUMERIC',\n 'NSAMPLES_TEK1CH' + str(scope_chnum))\n AddNumericWithUnit(tree, settingpath + ':DELTA_T', 'DT_TEK1CH' +\n str(scope_chnum), 's')\n AddNodeWithTag(tree, settingpath + ':N_PRE_SAMP', 'NUMERIC',\n 'NPRESAMP_TEK1CH' + str(scope_chnum))\n AddNumericWithUnit(tree, settingpath + ':TIME_DIV', 'TIMEPERDIV_TEK1CH' +\n str(scope_chnum), 's/div')\n AddNumericWithUnit(tree, settingpath + ':VOLT_DIV', 'VOLTSPERDIV_TEK1CH' +\n str(scope_chnum), 'V/div')\n AddNodeWithTag(tree, settingpath + ':PROBE_ATTEN', 'NUMERIC',\n 'PROBEATTENUATION_TEK1CH' + str(scope_chnum))", "def makeStoichiometricMatrix(self, var, typed_token):\n # TODO: Change stoichiometric matrix to include the integers of reactions\n model = self.model\n size = self.size_of_variable(var)\n mat = np.zeros(size)\n for i, instance in enumerate(model.typed_tokens[typed_token].instances):\n for j, conversion in enumerate(model.typed_tokens[\n typed_token].conversions):\n if instance in conversion['reactants']:\n mat[i, j] = -1.\n elif instance in conversion['products']:\n mat[i, j] = 1.\n if typed_token == var.index_structures[1]: # Check sequence\n return np.transpose(mat)\n return mat", "def test_QFT(self):\n op = qml.QFT(wires=range(3))\n res = op.matrix()\n exp = QFT\n assert np.allclose(res, exp)", "def _qiskit_to_qlm_param(prog, variables, param):\n if isinstance(param, Parameter):\n name = param.name\n for var in variables:\n if var.name == name:\n return var\n var = prog.new_var(float, name)\n variables.append(var)\n elif isinstance(param, ParameterExpression):\n expression = param._symbol_expr\n return _sympy_arg_to_arith_expr(prog, variables, param, expression)\n return var", "def data_from_ucr_query(self):\n raise NotImplementedError", "def qobj_to_circuits(qobj):\n if qobj.experiments:\n circuits = []\n for x in qobj.experiments:\n if hasattr(x.header, 'compiled_circuit_qasm'):\n circuits.append(\n load_qasm_string(x.header.compiled_circuit_qasm))\n return circuits\n # TODO(mtreinish): add support for converting a qobj if the qasm isn't\n # embedded in the header\n return None", "def from_matrix(matrix: types.Matrix) -> \"MatrixLieGroup\":", "def from_mypackage(mycosmo):\n # Cosmology provides a nice method \"mapping\", so all that needs to\n # be done here is create a dictionary of the parameters\n mapping = {}\n mapping[\"H0\"] = mycosmo.hubble_parameter\n mapping[\"Om0\"] = mycosmo.Omega_matter_initial\n ... # keep building mapping\n\n return Cosmology.from_format(\n mapping, format=\"mapping\", move_to_meta=True\n ) # extra info -> meta", "def getUIQM(x):\n x = x.astype(np.float32)\n ### from https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7300447\n # c1 = 0.4680; c2 = 0.2745; c3 = 0.2576\n ### from https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7300447\n c1 = 0.0282\n c2 = 0.2953\n c3 = 3.5753\n\n uicm = _uicm(x)\n uism = _uism(x)\n uiconm = _uiconm(x, 8)\n uiqm = (c1 * uicm) + (c2 * uism) + (c3 * uiconm)\n return uiqm", "def from_cirq(cls, circuit:cirq.Circuit):\n qubits = quple.get_circuit_qubits(circuit)\n symbols = quple.get_circuit_symbols(circuit)\n cq = cls(qubits)\n cq.append(circuit)\n return cq", "def convert_to_q(self):\n if self.measure == 'Q':\n warnings.warn('Parameters are already converted to Q!')\n else:\n kappa_sp = self.kappa_s\n kappa_yp = self.kappa_y\n self.kappa_s = self.kappa_s - self.lmbd_s * self.eta_s\n self.kappa_y = self.kappa_y - self.lmbd_y * self.eta_y\n self.scale = kappa_sp / self.kappa_s\n self.mean_v *= (kappa_yp / self.kappa_y * self.scale)\n self.lmbd = 0\n self.eta_y *= (self.scale**.5)\n self.measure = 'Q'\n self.update_ajd()", "def cmNew(self, cs, ta, qa):\n\t return self.ciNew(cs, ta, qa)", "def initialize_new_questionnaire(questionnaire, option_type, uuid):\r\n q = {}\r\n if (type(questionnaire) == dict):\r\n for key, val in questionnaire.items():\r\n if key != 'index':\r\n\r\n q[key] = [val] if type(val) != list else val\r\n questionnaire = pd.DataFrame(q)\r\n\r\n\r\n if \"_questionnaire\" not in option_type:\r\n option_type = option_type + \"_questionnaire\"\r\n\r\n option_type = option_type.lower()\r\n if 'option_type' not in questionnaire:\r\n questionnaire['option_type'] = [option_type]\r\n questionnaire['uuid'] = [uuid]\r\n questionnaire['timestamp'] = [datetime.datetime.utcnow()]\r\n print(\"this is questionaire: \", questionnaire)\r\n\r\n questionnaire=questionnaire.set_index('uuid')\r\n print(\"this is questionaire: \", questionnaire)\r\n questionnaire.to_sql(option_type, con=Database.DATABASE.engine, if_exists=\"append\", index=True)" ]
[ "0.7162368", "0.63052386", "0.62322587", "0.6150985", "0.54366237", "0.5430608", "0.5400934", "0.5215017", "0.51926947", "0.5181921", "0.512755", "0.4988747", "0.49690673", "0.4939683", "0.49243224", "0.49112472", "0.48836026", "0.48823464", "0.48380566", "0.48145837", "0.47952867", "0.47652432", "0.47649658", "0.4747722", "0.47333646", "0.47258216", "0.47243524", "0.47145712", "0.47036743", "0.47036535", "0.4699065", "0.46985772", "0.46826252", "0.46816325", "0.4677915", "0.46769702", "0.46751314", "0.4673023", "0.467194", "0.46670058", "0.46624282", "0.46566248", "0.4655585", "0.46552005", "0.46379018", "0.4634235", "0.46237665", "0.4615934", "0.4615199", "0.46091625", "0.46032324", "0.4591363", "0.45577943", "0.45562044", "0.45520556", "0.45507756", "0.45476687", "0.45331505", "0.45303336", "0.45264685", "0.45045078", "0.44910365", "0.4481688", "0.4479749", "0.4476117", "0.44753096", "0.44699126", "0.4467631", "0.44675633", "0.44643328", "0.4458166", "0.44556636", "0.44492638", "0.44468057", "0.44443285", "0.44436625", "0.4436292", "0.4435118", "0.44336942", "0.44276372", "0.44228733", "0.4421224", "0.44181964", "0.4410322", "0.44083562", "0.44057012", "0.44021603", "0.44008726", "0.43987778", "0.43980727", "0.43974805", "0.43972296", "0.43965116", "0.43921492", "0.4392143", "0.43915564", "0.43910018", "0.43872318", "0.43866447", "0.4386301" ]
0.7086064
1
Create a qiskit representation of quantum channel from a myqlm representation of a quantum channel.
def qchannel_to_qiskit(representation): rep = representation.representation # Find what representation it is. # Then create the corresponding matrix and shape it like qiskit is expecting it. # Finally, create the qiskit representation from that matrix. if rep in (RepresentationType.PTM, RepresentationType.CHOI): matri = representation.matrix data_re = [] data_im = [] for i in range(matri.nRows): for j in range(matri.nCols): data_re.append(matri.data[i * matri.nRows + j].re + 0.j) data_im.append(matri.data[i * matri.nRows + j].im) data = np.array(data_re) data.imag = np.array(data_im) data = data.reshape((matri.nRows, matri.nCols)) return PTM(data) if (rep == RepresentationType.PTM) else Choi(data) if rep in (RepresentationType.CHI, RepresentationType.SUPEROP): final_data = [] for matri in representation.basis: data_re = [] data_im = [] for i in range(matri.nRows): for j in range(matri.nCols): data_re.append(matri.data[i * matri.nRows + j].re + 0.j) data_im.append(matri.data[i * matri.nRows + j].im) data = np.array(data_re) data.imag = np.array(data_im) data = data.reshape((matri.nRows, matri.nCols)) final_data.append(data) if rep == RepresentationType.CHI: return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0]) return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0]) if rep == RepresentationType.KRAUS: final_data = [] for matri in representation.kraus_ops: data_re = [] data_im = [] for i in range(matri.nRows): for j in range(matri.nCols): data_re.append(matri.data[i * matri.nRows + j].re + 0.j) data_im.append(matri.data[i * matri.nRows + j].im) data = np.array(data_re) data.imag = np.array(data_im) data = data.reshape((matri.nRows, matri.nCols)) final_data.append(data) return Kraus(final_data) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qiskit_to_qchannel(representation):\n\n qchannel = None\n qiskit_data = representation.data\n # Find what representation it is.\n # Then create the corresponding matrix (kraus_ops|basis|matrix)from the data\n # of the representation.\n # Finally, create the QuantumChannel with the RepresentationType, the arity\n # (got from the qiskit representation) and the matrix.\n if isinstance(representation, Kraus):\n kraus_ops = []\n for arr in qiskit_data:\n kraus_ops.append(array_to_matrix(arr))\n qchannel = QuantumChannel(\n representation=RepresentationType.KRAUS,\n arity=representation.num_qubits,\n kraus_ops=kraus_ops)\n elif isinstance(representation, Chi):\n basis = []\n basis.append(array_to_matrix(qiskit_data))\n qchannel = QuantumChannel(\n representation=RepresentationType.CHI,\n arity=representation.num_qubits,\n basis=basis)\n elif isinstance(representation, SuperOp):\n basis = []\n basis.append(array_to_matrix(qiskit_data))\n qchannel = QuantumChannel(\n representation=RepresentationType.SUPEROP,\n arity=representation.num_qubits,\n basis=basis)\n elif isinstance(representation, PTM):\n matri = array_to_matrix(qiskit_data)\n qchannel = QuantumChannel(\n representation=RepresentationType.PTM,\n arity=representation.num_qubits,\n matrix=matri)\n elif isinstance(representation, Choi):\n matri = array_to_matrix(qiskit_data)\n qchannel = QuantumChannel(\n representation=RepresentationType.CHOI,\n arity=representation.num_qubits,\n matrix=matri)\n return qchannel", "def qlm_to_qiskit(qlm_circuit, qubits=None):\n # Init measured qubits\n if qubits is None:\n qubits = list(range(qlm_circuit.nbqbits))\n\n qreg = QuantumRegister(qlm_circuit.nbqbits)\n creg = None\n param_list = []\n if qlm_circuit.nbcbits > 0:\n creg = ClassicalRegister(max(qlm_circuit.nbcbits, len(qubits)))\n q_circ = QuantumCircuit(qreg, creg)\n else:\n q_circ = QuantumCircuit(qreg)\n dic = _gen_qiskit_gateset(q_circ)\n for gate_op in qlm_circuit:\n if gate_op.type == OpType.GATETYPE:\n name, params = extract_syntax(\n qlm_circuit.gateDic[gate_op.gate], qlm_circuit.gateDic,\n var_dic=qlm_circuit.var_dic)\n nbctrls = name.count('C-')\n # changes variables and expressions to format used by Qiskit\n for index, param in enumerate(params):\n if isinstance(param, Variable):\n params[index] = _variable_to_parameter(\n param_list, variable=param)\n elif isinstance(param, ArithExpression):\n arith_expr_list = param.to_thrift().split()\n params[index] = _arith_expr_list_to_parameter_expression(\n param_list, arith_expr_list, param)\n try:\n if name == \"MS\":\n q_circ.ms(params[0], [qreg[i] for i in gate_op.qbits])\n else:\n if (nbctrls > 0 and name not in SUPPORTED_CTRLS):\n tmp = name\n count = 0\n gate = None\n while True:\n last = tmp\n tmp = tmp.replace(\"C-\", \"\", 1)\n if last == tmp:\n raise ValueError(\n \"Gate {} not supported by Qiskit API\".format(name)\n )\n else:\n count += 1\n gate = _get_qiskit_gate_from_name(tmp)\n if gate != None:\n gate = gate(*params).control(count)\n break\n if gate != None:\n q_circ.append(gate, [qreg[i] for i in gate_op.qbits])\n else:\n dic[name](* params + [qreg[i] for i in gate_op.qbits])\n except KeyError:\n raise ValueError(\n \"Gate {} not supported by Qiskit API\".format(name)\n )\n elif gate_op.type == OpType.MEASURE:\n for index in range(len(gate_op.qbits)):\n q_circ.measure(gate_op.qbits[index], gate_op.cbits[index])\n\n # Adding measures to unify the interface\n for qbit_index, cbit in zip(qubits, creg):\n q_circ.measure(qreg[qbit_index], cbit)\n return q_circ", "def test_myqlm_backend():\n circuit = Circuit()\n circuit += ops.DefinitionBit(name='ro', length=2, is_output=True)\n circuit += ops.RotateZ(qubit=0, theta=0)\n circuit += ops.PauliX(qubit=1)\n circuit += ops.MeasureQubit(qubit=0, readout='ro', readout_index=0)\n circuit += ops.MeasureQubit(qubit=1, readout='ro', readout_index=1)\n\n backend = MyQLMBackend(number_qubits=2,\n number_measurements=5)\n\n # (bit_dict, float_dict, complex_dict) = backend.run_circuit(circuit)\n # npt.assert_equal(float_dict, dict())\n # npt.assert_equal(complex_dict, dict())\n # npt.assert_equal(bit_dict['ro'], [np.array([0., 1.])] * 5)", "def qiskit_to_qlm(qiskit_circuit, sep_measures=False, **kwargs):\n prog = Program()\n qbits_num = 0\n to_measure = []\n for reg in qiskit_circuit.qregs:\n qbits_num = qbits_num + reg.size\n qbits = prog.qalloc(qbits_num)\n\n cbits_num = 0\n for reg in qiskit_circuit.cregs:\n cbits_num = cbits_num + reg.size\n cbits = prog.calloc(cbits_num)\n variables = []\n for gate_op in qiskit_circuit.data:\n if gate_op[0].name == \"barrier\" or gate_op[0].name == \"opaque\":\n continue\n qbit_args = []\n cbit_args = []\n prms = [] # gate parameters\n # Get qbit arguments\n for qarg in gate_op[1]:\n qbit_args.append(\n _get_qindex(qiskit_circuit, qarg.register.name, qarg.index))\n\n # Get cbit arguments\n for carg in gate_op[2]:\n cbit_args.append(\n _get_cindex(qiskit_circuit, carg.register.name, carg.index))\n\n # Get parameters\n for param in gate_op[0]._params:\n if isinstance(param, (Parameter, ParameterExpression)):\n prms.append(_qiskit_to_qlm_param(prog, variables, param))\n else:\n prms.append(float(param))\n # Apply measure #\n if gate_op[0].name == \"measure\":\n if sep_measures:\n to_measure.extend(qbit_args)\n else:\n prog.measure([qbits[i] for i in qbit_args],\n [cbits[i] for i in cbit_args])\n elif gate_op[0].name == \"reset\":\n prog.reset([qbits[i] for i in qbit_args],\n [cbits[i] for i in cbit_args])\n else:\n if gate_op[0].name == \"ms\":\n # In this case, the process function needs the number of qubits\n prms.append(len(qbit_args))\n # Apply gates #\n num_ctrl_qubits = None\n try:\n num_ctrl_qubits = gate_op[0].num_ctrl_qubits\n except:\n None\n gate = get_gate(gate_op[0].name, prms, num_ctrl_qubits)\n prog.apply(gate, *[qbits[i] for i in qbit_args][:gate.arity])\n if sep_measures:\n return prog.to_circ(**kwargs), list(set(to_measure))\n\n return prog.to_circ(**kwargs)", "def job_to_qiskit_circuit(qlm_job):\n # Check processing type\n assert_qpu(qlm_job.type == ProcessingType.SAMPLE,\n \"Only jobs having a SAMPLE processing type \"\n \"could be translated into Qiskit circuits\")\n\n # Convert\n return qlm_to_qiskit(qlm_job.circuit, qlm_job.qubits)", "def _create_quantum_circuit(self):\n reg_list = []\n for entry in self.regdefs:\n is_qreg = self._match_entry_type(entry, [ASTType.QREG])\n\n if is_qreg:\n reg_list.append(QuantumRegister(entry.get('qreg_num'), entry.get('qreg_name')))\n else:\n reg_list.append(ClassicalRegister(entry.get('creg_num'), entry.get('creg_name')))\n\n self.circuit = QuantumCircuit(*reg_list)\n return self.circuit", "def qubit_to_matrix(qubit, format='sympy'):\n return represent(qubit, format=format)", "def to_qobj(self): # -> \"qutip.Qobj\"\n from qutip import Qobj\n\n q_dims = [list(self.hilbert_physical.shape), list(self.hilbert_physical.shape)]\n return Qobj(np.asarray(self.to_matrix()), dims=q_dims)", "def qlm_circ_sep_meas(qiskit_circuit):\n return qiskit_to_qlm(qiskit_circuit, True)", "def quat2dcm(q):\n q0q0 = q[0] * q[0]\n q0q1 = q[0] * q[1]\n q0q2 = q[0] * q[2]\n q0q3 = q[0] * q[3]\n q1q1 = q[1] * q[1]\n q1q2 = q[1] * q[2]\n q1q3 = q[1] * q[3]\n q2q2 = q[2] * q[2]\n q2q3 = q[2] * q[3]\n q3q3 = q[3] * q[3]\n dcm = np.zeros((3, 3))\n dcm[0, 0] = q0q0 + q1q1 - q2q2 - q3q3\n dcm[0, 1] = 2.0*(q1q2 + q0q3)\n dcm[0, 2] = 2.0*(q1q3 - q0q2)\n dcm[1, 0] = 2.0*(q1q2 - q0q3)\n dcm[1, 1] = q0q0 - q1q1 + q2q2 - q3q3\n dcm[1, 2] = 2.0*(q2q3 + q0q1)\n dcm[2, 0] = 2.0*(q1q3 + q0q2)\n dcm[2, 1] = 2.0*(q2q3 - q0q1)\n dcm[2, 2] = q0q0 - q1q1 - q2q2 + q3q3\n return dcm", "def serialize_Q(Q: np.ndarray):\n ret = QMatrix()\n ret.q_matrix = [QMatrixRow() for i in range(64)]\n for i in range(64):\n row = []\n for j in range(9):\n row.append(Q.q_matrix[i][j])\n ret.q_matrix[i].q_matrix_row = row\n return ret", "def format_molecule_for_qchem_old(self, mixedbas=True):\n factor = 1.0 if self.PYunits == 'Angstrom' else psi_bohr2angstroms\n\n text = \"\"\n text += '$molecule\\n'\n text += '%d %d\\n' % (self.molecular_charge(), self.multiplicity())\n\n for i in range(self.natom()):\n [x, y, z] = self.atoms[i].compute()\n if mixedbas:\n text += '%2s ' % (self.symbol(i) if self.Z(i) else \"Gh\")\n else:\n text += '%-3s ' % (('' if self.Z(i) else '@') + self.symbol(i))\n text += '%17.12f %17.12f %17.12f\\n' % (x * factor, y * factor, z * factor)\n text += '$end\\n\\n'\n\n # prepare molecule keywords to be set as c-side keywords\n options = collections.defaultdict(lambda: collections.defaultdict(dict))\n #options['QCHEM'['QCHEM_CHARGE']['value'] = self.molecular_charge()\n #options['QCHEM'['QCHEM_MULTIPLICITY']['value'] = self.multiplicity()\n options['QCHEM']['QCHEM_INPUT_BOHR']['value'] = False\n #options['QCHEM']['QCHEM_COORDINATES']['value'] = 'CARTESIAN'\n #SYM_IGNORE equiv to no_reorient, no_com, symmetry c1\n\n options['QCHEM']['QCHEM_INPUT_BOHR']['clobber'] = True\n\n return text, options", "def DAQchannels(tree, DAQnum, CHnum):\n tree.addNode('.NI_6133.DAQ_' + str(DAQnum) + '.CHANNEL_' + str(CHnum))\n chanpath = ('.NI_6133.DAQ_' + str(DAQnum) + '.CHANNEL_' + str(CHnum)\n + '.CHAN_SETTING')\n tree.addNode(chanpath)\n AddNodeWithTag(tree, chanpath + ':ACTIVE', 'NUMERIC', 'DAQTIVE_DCARD' +\n str(DAQnum) + 'CH' + str(CHnum))\n AddNodeWithTag(tree, chanpath + ':CHANNEL_NAME', 'TEXT', 'USERNAME_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum))\n AddNumericWithUnit(tree, chanpath + ':VOLT_RANGE', 'VOLTRANGE_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum), 'V')\n AddNodeWithTag(tree, chanpath + ':NI_NAME', 'TEXT', 'NINAME_DCARD' \n + str(DAQnum) + 'CH' + str(CHnum))", "def quat2DCM(q):\n\tDCM = quatLeftMat(q) @ quatRightMat(q).T\n\tDCM = DCM[1:, 1:]\n\treturn DCM", "def create_mini_ticker_channel(self, symbol: str) -> str:", "def __init__(self, dmm: \"Keithley_6500\", channel: int, **kwargs) -> None:\n super().__init__(dmm, f\"ch{channel}\", **kwargs)\n self.channel = channel\n self.dmm = dmm\n\n self.add_parameter('resistance',\n unit='Ohm',\n label=f'Resistance CH{self.channel}',\n get_parser=float,\n get_cmd=partial(self._measure, 'RES'))\n\n self.add_parameter('resistance_4w',\n unit='Ohm',\n label=f'Resistance (4-wire) CH{self.channel}',\n get_parser=float,\n get_cmd=partial(self._measure, 'FRES'))\n\n self.add_parameter('voltage_dc',\n unit='V',\n label=f'DC Voltage CH{self.channel}',\n get_parser=float,\n get_cmd=partial(self._measure, 'VOLT'))\n\n self.add_parameter('current_dc',\n unit='A',\n label=f'DC current CH{self.channel}',\n get_parser=float,\n get_cmd=partial(self._measure, 'CURR'))", "def quat2mat(q):\n #leila: https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm\n\n sz = quat.get_size(q)\n q0 = quat.getq0(q)\n q1 = quat.getq1(q)\n q2 = quat.getq2(q)\n q3 = quat.getq3(q)\n qt = quat.get_type(q)\n\n g = np.zeros((sz, 3, 3))\n g[:, 0, 0] = np.square(q0) + np.square(q1) - np.square(q2) - np.square(q3)\n g[:, 0, 1] = 2*(q1*q2 - q0*q3)\n g[:, 0, 2] = 2*(q3*q1 + q0*q2)\n g[:, 1, 0] = 2*(q1*q2 + q0*q3)\n g[:, 1, 1] = np.square(q0) - np.square(q1) + np.square(q2) - np.square(q3)\n g[:, 1, 2] = 2*(q2*q3 - q0*q1)\n g[:, 2, 0] = 2*(q3*q1 - q0*q2)\n g[:, 2, 1] = 2*(q2*q3 + q0*q1)\n g[:, 2, 2] = np.square(q0) - np.square(q1) - np.square(q2) + np.square(q3)\n\n if sz == 1:\n g = g.reshape((3, 3))\n if qt == -1:\n g = -g\n else:\n inds1 = np.where(qt == -1)\n g[inds1, :, :] = -g[inds1, :, :]\n\n return g", "def test_molecule_subclass_from_qcschema(self):\n import qcportal as ptl\n\n client = ptl.FractalClient()\n ds = client.get_collection(\n \"TorsionDriveDataset\", \"Fragment Stability Benchmark\"\n )\n entry = ds.get_entry(\n \"CC(=O)Nc1cc2c(cc1OC)nc[n:4][c:3]2[NH:2][c:1]3ccc(c(c3)Cl)F\"\n )\n # now make the molecule from the record instance with and without the geometry\n mol = MyMol.from_qcschema(entry.dict(encoding=\"json\"))\n assert isinstance(mol, MyMol)\n # Make from object, which will include geometry\n mol = MyMol.from_qcschema(entry, client)\n assert isinstance(mol, MyMol)", "def _create_mc_question(self, description):\n\n mc_dict = {\n 'description': description,\n 'type': models.QuestionDTO.MULTIPLE_CHOICE,\n 'choices': [\n {\n 'text': 'correct answer',\n 'score': 1.0\n },\n {\n 'text': 'incorrect answer',\n 'score': 0.0\n }],\n 'version': '1.5'\n }\n question = models.QuestionDTO(None, mc_dict)\n qid = models.QuestionDAO.save(question)\n return models.QuestionDAO.load(qid)", "def to_qobj(self): # -> \"qutip.Qobj\"\n from qutip import Qobj\n\n q_dims = [list(self.hilbert.shape), [1 for i in range(self.hilbert.size)]]\n return Qobj(np.asarray(self.to_array()), dims=q_dims)", "def convert_to(self, domain):\n if domain == self.domain:\n return self.copy()\n elif domain == QQ and self.domain == ZZ:\n return self._new(flint.fmpq_mat(self.rep), self.shape, domain)\n elif domain == ZZ and self.domain == QQ:\n # XXX: python-flint has no fmpz_mat.from_fmpq_mat\n return self.to_ddm().convert_to(domain).to_dfm()\n else:\n # It is the callers responsibility to convert to DDM before calling\n # this method if the domain is not supported by DFM.\n raise NotImplementedError(\"Only ZZ and QQ are supported by DFM\")", "def from_float(cls, mod, qconfig=None):\n assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \\\n cls._FLOAT_MODULE.__name__\n if not qconfig:\n assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n assert mod.qconfig, 'Input float module must have a valid qconfig'\n qconfig = mod.qconfig\n conv, bn = mod[0], mod[1]\n qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size,\n conv.stride, conv.padding, conv.dilation,\n conv.groups, conv.bias is not None,\n conv.padding_mode,\n bn.eps, bn.momentum,\n False,\n qconfig)\n qat_convbn.weight = conv.weight\n qat_convbn.bias = conv.bias\n qat_convbn.gamma = bn.weight\n qat_convbn.beta = bn.bias\n qat_convbn.running_mean = bn.running_mean\n qat_convbn.running_var = bn.running_var\n qat_convbn.num_batches_tracked = bn.num_batches_tracked\n return qat_convbn", "def buildQuestion():\n #example.com\n QNAME = b\"\\x07\\x65\\x78\\x61\\x6d\\x70\\x6c\\x65\\x03\\x63\\x6f\\x6d\\x00\"\n\n \"\"\"\n A two octet code which specifies the type of the query.\n The values for this field include all codes valid for a\n TYPE field, together with some more general codes which\n can match more than one type of RR.\n \"\"\" \n QTYPE = b\"\\x00\\x01\"\n\n \"\"\"\n A two octet code that specifies the class of the query.\n For example, the QCLASS field is IN for the Internet.\n \"\"\"\n QCLASS = b\"\\x00\\x01\"\n\n dnsBody = QNAME + QTYPE + QCLASS\n #print(dnsBody)\n return dnsBody", "def convert_dds_to_qiskit_quantum_circuit(\n dynamic_decoupling_sequence,\n target_qubits=None,\n gate_time=0.1,\n add_measurement=True,\n algorithm=INSTANT_UNITARY,\n quantum_registers=None,\n circuit_name=None):\n\n if dynamic_decoupling_sequence is None:\n raise ArgumentsValueError('No dynamic decoupling sequence provided.',\n {'dynamic_decoupling_sequence': dynamic_decoupling_sequence})\n\n if not isinstance(dynamic_decoupling_sequence, DynamicDecouplingSequence):\n raise ArgumentsValueError('Dynamical decoupling sequence is not recognized.'\n 'Expected DynamicDecouplingSequence instance',\n {'type(dynamic_decoupling_sequence)':\n type(dynamic_decoupling_sequence)})\n\n if target_qubits is None:\n target_qubits = [0]\n\n if gate_time <= 0:\n raise ArgumentsValueError(\n 'Time delay of identity gate must be greater than zero.',\n {'gate_time': gate_time})\n\n if np.any(target_qubits) < 0:\n raise ArgumentsValueError(\n 'Every target qubits index must be positive.',\n {'target_qubits': target_qubits})\n\n if algorithm not in [FIX_DURATION_UNITARY, INSTANT_UNITARY]:\n raise ArgumentsValueError('Algorithm must be one of {} or {}'.format(\n INSTANT_UNITARY, FIX_DURATION_UNITARY), {'algorithm': algorithm})\n\n if quantum_registers is not None:\n if (max(target_qubits)+1) > len(quantum_registers):\n raise ArgumentsValueError('Target qubit is not present in quantum_registers',\n {'target_qubits': target_qubits,\n 'size(quantum_registers)': len(quantum_registers)},\n extras={'max(target_qubits)': max(target_qubits)})\n quantum_registers = quantum_registers\n else:\n quantum_registers = QuantumRegister(max(target_qubits)+1)\n\n classical_registers = None\n if add_measurement:\n classical_registers = ClassicalRegister(len(target_qubits))\n quantum_circuit = QuantumCircuit(quantum_registers, classical_registers)\n else:\n quantum_circuit = QuantumCircuit(quantum_registers)\n\n if circuit_name is not None:\n quantum_circuit.name = circuit_name\n\n unitary_time = 0.\n if algorithm == FIX_DURATION_UNITARY:\n unitary_time = gate_time\n\n rabi_rotations = dynamic_decoupling_sequence.rabi_rotations\n azimuthal_angles = dynamic_decoupling_sequence.azimuthal_angles\n detuning_rotations = dynamic_decoupling_sequence.detuning_rotations\n\n if len(rabi_rotations.shape) == 1:\n rabi_rotations = rabi_rotations[np.newaxis, :]\n if len(azimuthal_angles.shape) == 1:\n azimuthal_angles = azimuthal_angles[np.newaxis, :]\n if len(detuning_rotations.shape) == 1:\n detuning_rotations = detuning_rotations[np.newaxis, :]\n\n operations = np.vstack((rabi_rotations, azimuthal_angles, detuning_rotations))\n offsets = dynamic_decoupling_sequence.offsets\n\n time_covered = 0\n for operation_idx in range(operations.shape[1]):\n\n offset_distance = offsets[operation_idx] - time_covered\n\n if np.isclose(offset_distance, 0.0):\n offset_distance = 0.0\n\n if offset_distance < 0:\n raise ArgumentsValueError(\"Offsets cannot be placed properly\",\n {'sequence_operations': operations})\n\n if offset_distance > 0:\n while (time_covered+gate_time) <= offsets[operation_idx]:\n for qubit in target_qubits:\n quantum_circuit.iden(quantum_registers[qubit]) # pylint: disable=no-member\n quantum_circuit.barrier(quantum_registers[qubit]) # pylint: disable=no-member\n time_covered += gate_time\n\n rabi_rotation = operations[0, operation_idx]\n azimuthal_angle = operations[1, operation_idx]\n x_rotation = rabi_rotation * np.cos(azimuthal_angle)\n y_rotation = rabi_rotation * np.sin(azimuthal_angle)\n z_rotation = operations[2, operation_idx]\n\n rotations = np.array([x_rotation, y_rotation, z_rotation])\n zero_pulses = np.isclose(rotations, 0.0).astype(np.int)\n nonzero_pulse_counts = 3 - np.sum(zero_pulses)\n if nonzero_pulse_counts > 1:\n raise ArgumentsValueError(\n 'Open Controls support a sequence with one '\n 'valid pulse at any offset. Found sequence '\n 'with multiple rotation operations at an offset.',\n {'dynamic_decoupling_sequence': str(dynamic_decoupling_sequence),\n 'offset': dynamic_decoupling_sequence.offsets[operation_idx],\n 'rabi_rotation': dynamic_decoupling_sequence.rabi_rotations[\n operation_idx],\n 'azimuthal_angle': dynamic_decoupling_sequence.azimuthal_angles[\n operation_idx],\n 'detuning_rotaion': dynamic_decoupling_sequence.detuning_rotations[\n operation_idx]}\n )\n\n for qubit in target_qubits:\n if nonzero_pulse_counts == 0:\n quantum_circuit.u3(\n 0., 0., 0., # pylint: disable=no-member\n quantum_registers[qubit])\n else:\n if not np.isclose(rotations[0], 0.0):\n quantum_circuit.u3(\n rotations[0], -pi / 2, pi / 2, # pylint: disable=no-member\n quantum_registers[qubit])\n elif not np.isclose(rotations[1], 0.0):\n quantum_circuit.u3(\n rotations[1], 0., 0., # pylint: disable=no-member\n quantum_registers[qubit])\n elif not np.isclose(rotations[2], 0.):\n quantum_circuit.u1(\n rotations[2], # pylint: disable=no-member\n quantum_registers[qubit])\n quantum_circuit.barrier(quantum_registers[qubit]) # pylint: disable=no-member\n\n if np.isclose(np.sum(rotations), 0.0):\n time_covered = offsets[operation_idx]\n else:\n time_covered = offsets[operation_idx] + unitary_time\n\n if add_measurement:\n for q_index, qubit in enumerate(target_qubits):\n quantum_circuit.measure(quantum_registers[qubit], #pylint: disable=no-member\n classical_registers[q_index])\n\n return quantum_circuit", "def _gen_mixnet_m(channel_multiplier=1.0, depth_multiplier=1.0, num_classes=1000, **kwargs):\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c24'], # relu\n # stage 1, 112x112 in\n ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu\n # stage 2, 56x56 in\n ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish\n # stage 3, 28x28 in\n ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish\n # stage 4, 14x14in\n ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish\n # stage 5, 14x14in\n ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish\n # 7x7\n ]\n model = GenEfficientNet(\n _decode_arch_def(arch_def, depth_multiplier=depth_multiplier, depth_trunc='round'),\n num_classes=num_classes,\n stem_size=24,\n num_features=1536,\n channel_multiplier=channel_multiplier,\n bn_args=_resolve_bn_args(kwargs),\n act_fn=F.relu,\n **kwargs\n )\n return model", "def QCD(fp):\n lqcd = unpack('>H', fp.read(2))[0]\n sqcd = unpack('B', fp.read(1))[0]\n\n _spqcd = []\n _remaining = lqcd - 3\n bitstring = '{:>08b}'.format(sqcd)\n while _remaining > 0:\n if bitstring[3:] == '00000':\n # xxx0 0000: no quantisation\n _spqcd.append(unpack('B', fp.read(1))[0])\n _remaining -= 1\n elif bitstring[3:] == '00001':\n # xxx0 0001: scalar derived\n _spqcd.append(unpack('>H', fp.read(2))[0])\n _remaining -= 2\n elif bitstring[3:] == '00010':\n # xxx0 0010: scalar expounded\n _spqcd.append(unpack('>H', fp.read(2))[0])\n _remaining -= 2\n else:\n raise NotImplementedError('QCD invalid value')\n\n #guard_bits = int(bitstring[3:], 2)\n\n info = {\n 'Lqcd' : lqcd,\n 'Sqcd' : sqcd,\n 'SPqcd' : _spqcd\n }\n\n return info", "def create_mini_tickers_channel(self) -> str:", "def from_mypackage(mycosmo):\n # Cosmology provides a nice method \"mapping\", so all that needs to\n # be done here is create a dictionary of the parameters\n mapping = {}\n mapping[\"H0\"] = mycosmo.hubble_parameter\n mapping[\"Om0\"] = mycosmo.Omega_matter_initial\n ... # keep building mapping\n\n return Cosmology.from_format(\n mapping, format=\"mapping\", move_to_meta=True\n ) # extra info -> meta", "def create_from(cls, backend):\n backend_config = backend.configuration()\n\n # TODO : Remove usage of config.defaults when backend.defaults() is updated.\n try:\n backend_default = backend.defaults()\n buffer = backend_default.buffer\n except ModelValidationError:\n try:\n buffer = backend_config.defaults.get('buffer', 0)\n except AttributeError:\n buffer = 0\n\n # system size\n n_qubits = backend_config.n_qubits\n n_registers = backend_config.n_registers\n n_uchannels = backend_config.n_uchannels\n\n # generate channels with assuming their numberings are aligned with qubits\n drives = [DriveChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n measures = [MeasureChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n controls = [ControlChannel(i, buffer=buffer) for i in range(n_uchannels)]\n\n acquires = [AcquireChannel(i, buffer=buffer) for i in range(n_qubits)]\n\n qubits = []\n for i in range(n_qubits):\n # TODO: get qubits <-> channels relationship from backend\n qubit = Qubit(i, drives[i], measures[i], acquires[i],\n control_channels=[] if not controls else controls)\n qubits.append(qubit)\n\n registers = [RegisterSlot(i) for i in range(n_registers)]\n # TODO: get #mem_slots from backend\n mem_slots = [MemorySlot(i) for i in range(len(qubits))]\n\n return DeviceSpecification(qubits, registers, mem_slots)", "def construct_qcbm(circuit, n_qubits, depth):\n\n for d in range(depth):\n for i in range(n_qubits):\n circuit.append_gate(Gate('X', target = i, angle = np.random.random()*np.pi*2))\n circuit.append_gate(Gate('Z', target = i, angle = np.random.random()*np.pi*2))\n if n_qubits != 1:\n for i in range(n_qubits):\n circuit.append_gate(Gate('CNOT', control = i, target = (i+1)%n_qubits))\n return circuit", "def quantum_net(self, q_input_features, q_weights_flat):\n\n # Reshape weights\n q_weights = q_weights_flat.reshape(self.args.q_depth, self.args.n_qubits, 3)\n\n # Start from state |+> , unbiased w.r.t. |0> and |1>\n # Amplitude encoding\n qml.QubitStateVector(q_input_features, wires=list(range(self.args.n_qubits)))\n \n # Sequence of trainable variational layers\n for k in range(self.args.q_depth):\n self.entangling_layer(self.args.n_qubits)\n self.Rot_layer(q_weights[k])\n\n # Expectation values in the Z basis\n exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(self.args.target_class)]\n return tuple(exp_vals)", "def mobilenetv2_q(pretrained=False, **kwargs):\n model = MobileNetV2Q(**kwargs)\n if pretrained:\n load_fake_quantized_state_dict(model, model_zoo.load_url(model_urls['mobilenetv2'], map_location='cpu'),\n 'mobilenetv2_q_map.json')\n return model", "def matrix_to_qubit(matrix):\n # Determine the format based on the type of the input matrix\n format = 'sympy'\n if isinstance(matrix, numpy_ndarray):\n format = 'numpy'\n if isinstance(matrix, scipy_sparse_matrix):\n format = 'scipy.sparse'\n\n # Make sure it is of correct dimensions for a Qubit-matrix representation.\n # This logic should work with sympy, numpy or scipy.sparse matrices.\n if matrix.shape[0] == 1:\n mlistlen = matrix.shape[1]\n nqubits = log(mlistlen, 2)\n ket = False\n cls = QubitBra\n elif matrix.shape[1] == 1:\n mlistlen = matrix.shape[0]\n nqubits = log(mlistlen, 2)\n ket = True\n cls = Qubit\n else:\n raise QuantumError(\n 'Matrix must be a row/column vector, got %r' % matrix\n )\n if not isinstance(nqubits, Integer):\n raise QuantumError('Matrix must be a row/column vector of size '\n '2**nqubits, got: %r' % matrix)\n # Go through each item in matrix, if element is non-zero, make it into a\n # Qubit item times the element.\n result = 0\n for i in range(mlistlen):\n if ket:\n element = matrix[i, 0]\n else:\n element = matrix[0, i]\n if format in ('numpy', 'scipy.sparse'):\n element = complex(element)\n if element != 0.0:\n # Form Qubit array; 0 in bit-locations where i is 0, 1 in\n # bit-locations where i is 1\n qubit_array = [int(i & (1 << x) != 0) for x in range(nqubits)]\n qubit_array.reverse()\n result = result + element*cls(*qubit_array)\n\n # If SymPy simplified by pulling out a constant coefficient, undo that.\n if isinstance(result, (Mul, Add, Pow)):\n result = result.expand()\n\n return result", "def __init__(self, qubit={}, blockNum=1, readout={}):\n self.diagram = np.asarray(\n [[np.nan] * blockNum] * (len(qubit) + len(readout)),\n dtype=object\n )\n # check datatype and assign\n if isinstance(qubit, list):\n self._qubitDict = dict(zip(qubit, range(len(qubit))))\n elif isinstance(qubit, dict):\n self._qubitDict = qubit\n else:\n raise TypeError('qubitDict: Unsupported format')\n if isinstance(readout, list):\n used_idx = [val for _, val in self._qubitDict.items()]\n unused_idx = sorted([\n val for val in range(len(readout) + len(qubit))\n if val not in used_idx\n ])\n self._readoutDict = dict(zip(readout, unused_idx))\n elif isinstance(readout, dict):\n self._readoutDict = readout\n else:\n raise TypeError('readoutDict: Unsupported format')\n # index check\n for key, val in {**self._qubitDict, **self._readoutDict}.items():\n if val >= len(self.diagram[:, 0]):\n raise ValueError(\n f'QubitChannel \\'{key}\\' assignment out of bound with ' +\n f'index: {val}'\n )\n self._name = ''", "def _qiskit_to_qlm_param(prog, variables, param):\n if isinstance(param, Parameter):\n name = param.name\n for var in variables:\n if var.name == name:\n return var\n var = prog.new_var(float, name)\n variables.append(var)\n elif isinstance(param, ParameterExpression):\n expression = param._symbol_expr\n return _sympy_arg_to_arith_expr(prog, variables, param, expression)\n return var", "def get_mq(a_fm, description, quark_alias):\n quark = conventions.quark_masses\n mask = utils.bundle_mask(quark, a_fm=a_fm, description=description, alias=quark_alias)\n return utils.extract_unique(quark[mask], 'mq')", "def from_qasm(cls, qasm, **quantum_circuit_opts):\n info = parse_qasm(qasm)\n qc = cls(info['n'], **quantum_circuit_opts)\n qc.apply_gates(info['gates'])\n return qc", "def circuit_from_qasm_string(qasm_string, name=None,\n basis_gates=\"id,u0,u1,u2,u3,x,y,z,h,s,sdg,t,tdg,\"\n \"rx,ry,rz,cx,cy,cz,ch,crz,cu1,cu3,swap,ccx,\"\n \"cswap\"):\n\n node_circuit = Qasm(data=qasm_string).parse()\n unrolled_circuit = Unroller(\n node_circuit, CircuitBackend(basis_gates.split(\",\")))\n circuit_unrolled = unrolled_circuit.execute()\n if name:\n circuit_unrolled.name = name\n return circuit_unrolled", "def convert_question(self, q):\n\n item = {}\n item['id'] = q['id']\n item['title'] = q['title']\n item['body'] = q['text']\n item['author_id'] = q['author']['id']\n item['author'] = q['author']['username']\n item['url'] = q['url']\n item['score'] = q['score']\n item['score_label'] = self.convert_count(q['score'])\n item['answer_count'] = q['answer_count']\n item['answer_count_label'] = self.convert_count(q['answer_count'])\n item['view_count'] = q['view_count']\n item['view_count_label'] = self.convert_count(q['view_count'])\n item['added_at'] = q['added_at']\n item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['last_activity'] = q['last_activity_at']\n item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['has_more_comments'] = False\n item['has_more_answers'] = False\n item['has_accepted_answer'] = q['has_accepted_answer']\n item['closed'] = q['closed']\n\n item['tags'] = []\n for tag in q['tags']:\n item['tags'].append({'name': tag})\n\n return item", "def test_creation_from_choi_operator():\n # Get krauss operators from dephrasure channel\n krauss_ops = set_up_dephrasure_conditions(0.1, 0.2)\n\n # Construct choi matrix from krauss operators\n choi_matrix = sum([np.outer(np.ravel(x, order=\"F\"),\n np.conj(np.ravel(x, order=\"F\"))) for x in krauss_ops])\n numb_qubits, dim_in, dim_out = [1, 1], 2, 3\n choi_obj = ChoiQutip(choi_matrix, numb_qubits, dim_in, dim_out)\n\n # Check if the two constructed krauss operators are the same.\n assert check_two_sets_of_krauss_are_same(krauss_ops, choi_obj.kraus_operators(), numb_qubits,\n dim_in, dim_out)\n\n # Test dimensions must match the choi matrix specified.\n assert_raises(ValueError, ChoiQutip, choi_matrix, numb_qubits, 3, 3)\n assert_raises(ValueError, ChoiQutip, choi_matrix, numb_qubits, 2, 2)\n assert_raises(ValueError, ChoiQutip, choi_matrix, [1, 2], 2, 3)", "def set_qs_type(self: Qs, qs_type: str = \"\", rows: int = 0, columns: int = 0, copy: bool = True) -> Qs:\n\n # Checks.\n if rows and columns and rows * columns != self.dim:\n raise ValueError(\n f\"Oops, check those values again for rows:{rows} columns:{columns} dim:{self.dim}\"\n )\n\n new_q = self\n\n if copy:\n new_q = deepcopy(self)\n\n # Assign values if need be.\n if new_q.qs_type != qs_type:\n new_q.rows = 0\n\n if qs_type == \"ket\" and not new_q.rows:\n new_q.rows = new_q.dim\n new_q.columns = 1\n\n elif qs_type == \"bra\" and not new_q.rows:\n new_q.rows = 1\n new_q.columns = new_q.dim\n\n elif qs_type in [\"op\", \"operator\"] and not new_q.rows:\n # Square series\n root_dim = math.sqrt(new_q.dim)\n\n if root_dim.is_integer():\n new_q.rows = int(root_dim)\n new_q.columns = int(root_dim)\n qs_type = \"op\"\n\n elif rows * columns == new_q.dim and not new_q.qs_type:\n if new_q.dim == 1:\n qs_type = \"scalar_q\"\n elif new_q.rows == 1:\n qs_type = \"bra\"\n elif new_q.columns == 1:\n qs_type = \"ket\"\n else:\n qs_type = \"op\"\n\n if not qs_type:\n raise Exception(\n \"Oops, please set rows and columns for this quaternion series operator. Thanks.\"\n )\n\n if new_q.dim == 1:\n qs_type = \"scalar_q\"\n\n new_q.qs_type = qs_type\n\n return new_q", "def create_ticker_channel(self, symbol: str) -> str:", "def fromDCM(cls,C):\n gamma=np.trace(C)\n w2=(1+gamma)/4.\n Ckk=np.diag(C)\n q2=(1+2*Ckk-gamma)/4.\n q2=np.array([q2[0],q2[1],q2[2],w2])\n\n max_index = np.argmax(q2)\n q=np.zeros(4)\n q[max_index] = np.sqrt(q2[max_index])\n d = 4.*q[max_index]\n C11,C12,C13,C21,C22,C23,C31,C32,C33 = C.flatten()\n if max_index==3:\n q[0] = (C23-C32)/d\n q[1] = (C31-C13)/d\n q[2] = (C12-C21)/d\n elif max_index==0:\n q[3] = (C23-C32)/d\n q[1] = (C12+C21)/d\n q[2] = (C31+C13)/d\n elif max_index==1:\n q[3] = (C31-C13)/d\n q[0] = (C12+C21)/d\n q[2] = (C23+C32)/d\n elif max_index==2:\n q[3] = (C12-C21)/d\n q[0] = (C31+C13)/d\n q[1] = (C23+C32)/d\n quat= Quat(q,order=\"xyzw\")\n quat.normalize()\n return quat", "def from_wram(cry):\n raise NotImplementedError", "def from_q(self, q: np.ndarray) -> np.ndarray:\n return self.from_quaternion(self, q)", "def _generate_qubits(self):\n return cq.LineQubit.range(4)", "def repr_to_spectrogram(self, mdct_norm, intensity=False, channel=0, cmap=None):\n x = tf.cast(mdct_norm[:, :, :, channel:channel+1], tf.float32)\n\n def normalized_dB_scale(ampl, with_sign=True):\n normalized_dB = self.psychoacoustic.amplitude_to_dB_norm(ampl)\n if with_sign:\n # range -1..1\n return tf.sign(ampl) * normalized_dB\n else:\n # range 0..1\n return normalized_dB\n\n # convert to 0..1 range\n if intensity:\n image = normalized_dB_scale(x, with_sign=False)\n else:\n image = (normalized_dB_scale(x, with_sign=True) + 1.) / 2.\n\n image = tf.map_fn(lambda im: tf.image.rot90(im), image)\n\n # colorize with cmap\n if cmap is not None:\n # quantize\n image = image[:, :, :, 0] # remove the dummy channel direction (will be replace with rgb info from color map)\n image_index = tf.cast(tf.round(image * (cmap.N-1)), dtype=tf.int32) # indices in [0, cmap.N-1]\n\n image_index = tf.clip_by_value(image_index, clip_value_min=0, clip_value_max=cmap.N-1)\n\n # gather\n color_map = matplotlib.cm.get_cmap(cmap)(np.arange(cmap.N)) # shape=[cmap.N, 3]\n colors = tf.constant(color_map, dtype=tf.float32)\n image = tf.gather(colors, image_index) # image[b, h, w, c] = color[image_index[b, h, w], c]\n\n return image", "def __init__(self, channel_id=None, name=None, owner_id=None, comparison_key=None, has_password=None, motd=None, allowed=None, operators=None, blocked=None, muted=None): # noqa: E501 # noqa: E501\n\n self._channel_id = None\n self._name = None\n self._owner_id = None\n self._comparison_key = None\n self._has_password = None\n self._motd = None\n self._allowed = None\n self._operators = None\n self._blocked = None\n self._muted = None\n self.discriminator = None\n\n self.channel_id = channel_id\n self.name = name\n self.owner_id = owner_id\n self.comparison_key = comparison_key\n self.has_password = has_password\n self.motd = motd\n self.allowed = allowed\n self.operators = operators\n self.blocked = blocked\n self.muted = muted", "def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)", "def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat", "def _gen_chamnet_v1(channel_multiplier, num_classes=1000, **kwargs):\n arch_def = [\n ['ir_r1_k3_s1_e1_c24'],\n ['ir_r2_k7_s2_e4_c48'],\n ['ir_r5_k3_s2_e7_c64'],\n ['ir_r7_k5_s2_e12_c56'],\n ['ir_r5_k3_s1_e8_c88'],\n ['ir_r4_k3_s2_e7_c152'],\n ['ir_r1_k3_s1_e10_c104'],\n ]\n model = GenEfficientNet(\n _decode_arch_def(arch_def),\n num_classes=num_classes,\n stem_size=32,\n num_features=1280, # no idea what this is? try mobile/mnasnet default?\n channel_multiplier=channel_multiplier,\n bn_args=_resolve_bn_args(kwargs),\n **kwargs\n )\n return model", "def update_svq_time_metric(ml_channel_id, ml_channel_name):\n result = []\n entry = [\"MediaLive\", \"SvqTime\", \"ChannelId\", ml_channel_id, \"Pipeline\", \"0\", {\"label\": ml_channel_name + \"-0\"}]\n result.append(entry)\n entry = [\"MediaLive\", \"SvqTime\", \"ChannelId\", ml_channel_id, \"Pipeline\", \"1\", {\"yAxis\": \"right\",\n \"label\": ml_channel_name + \"-1\"}]\n result.append(entry)\n return result", "def create_circuit(self, do_measure=True):\n circuit_name = str(uuid.uuid4())\n circuit = self.quantum_program.create_circuit(circuit_name,\n [self.quantum_register],\n [self.classical_register])\n\n for j in range(self.depth):\n if self.qubits == 1:\n op_ind = 0\n else:\n op_ind = random.randint(0, 1)\n if op_ind == 0: # U3\n qind = random.randint(0, self.qubits - 1)\n circuit.u3(random.random(), random.random(), random.random(),\n self.quantum_register[qind])\n elif op_ind == 1: # CX\n source, target = random.sample(range(self.qubits), 2)\n circuit.cx(self.quantum_register[source],\n self.quantum_register[target])\n\n if do_measure:\n nmeasure = random.randint(1, self.qubits)\n for j in range(nmeasure):\n qind = random.randint(0, self.qubits - 1)\n # doing this if here keeps the RNG from depending on\n # whether measurements are done.\n circuit.measure(self.quantum_register[qind],\n self.classical_register[qind])\n\n return circuit.qasm()", "def quat2dcm(quat):\n q = asarray(quat).flatten()\n\n a, b, c, d = q[0], q[1], q[2], q[3]\n c11 = a * a + b * b - c * c - d * d\n c12 = 2 * (b * c - a * d)\n c13 = 2 * (b * d + a * c)\n c21 = 2 * (b * c + a * d)\n c22 = a * a - b * b + c * c - d * d\n\n c23 = 2 * (c * d - a * b)\n c31 = 2 * (b * d - a * c)\n c32 = 2 * (c * d + a * b)\n c33 = a * a - b * b - c * c + d * d\n\n return array([[c11, c12, c13],\n [c21, c22, c23],\n [c31, c32, c33]])", "def make_eq(self, eq):\n for bands in eq:\n eq = system.create_dsp_by_type(FMOD_DSP_TYPE_PARAMEQ)\n eq.set_param(0, bands[0]) # centre\n eq.set_param(1, 1.0) # octaves\n eq.set_param(2, bands[1]) # gain \n return eq", "def from_qreq_(cls, qreq_, cm_list, autoinit=False):\n # raise NotImplementedError('do not use')\n aids = ut.unique(ut.flatten([qreq_.qaids, qreq_.daids]))\n nids = qreq_.get_qreq_annot_nids(aids)\n ibs = qreq_.ibs\n infr = cls(ibs, aids, nids, verbose=False, autoinit=autoinit)\n infr.cm_list = cm_list\n infr.qreq_ = qreq_\n return infr", "def qobj_to_circuits(qobj):\n if qobj.experiments:\n circuits = []\n for x in qobj.experiments:\n if hasattr(x.header, 'compiled_circuit_qasm'):\n circuits.append(\n load_qasm_string(x.header.compiled_circuit_qasm))\n return circuits\n # TODO(mtreinish): add support for converting a qobj if the qasm isn't\n # embedded in the header\n return None", "def test_convert_to_q(self):\n\n riskfree = .01\n lmbd = .01\n lmbd_s = .5\n lmbd_y = .5\n mean_v = .5\n kappa_s = 1.5\n kappa_y = .5\n eta_s = .1\n eta_y = .01\n rho = -.5\n\n theta = [riskfree, mean_v, kappa_s, kappa_y, eta_s, eta_y,\n rho, lmbd, lmbd_s, lmbd_y]\n param = CentTendParam.from_theta(theta)\n param.convert_to_q()\n\n kappa_sq = kappa_s - lmbd_s * eta_s\n kappa_yq = kappa_y - lmbd_y * eta_y\n scale = kappa_s / kappa_sq\n\n self.assertEqual(param.measure, 'Q')\n self.assertEqual(param.riskfree, riskfree)\n self.assertEqual(param.lmbd, 0)\n self.assertEqual(param.lmbd_s, lmbd_s)\n self.assertEqual(param.lmbd_y, lmbd_y)\n self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale)\n self.assertEqual(param.kappa_s, kappa_sq)\n self.assertEqual(param.kappa_y, kappa_yq)\n self.assertEqual(param.eta_s, eta_s)\n self.assertEqual(param.eta_y, eta_y * scale**.5)\n self.assertEqual(param.rho, rho)\n self.assertTrue(param.is_valid())", "def qtc2state(self, qtc):\n \n state_rep = []\n for idx, element in enumerate(qtc):\n# val_qtc = validateQtcSequences(element)\n d = element.shape[1]\n mult = 3**np.arange(d-1, -1, -1)\n state_num = np.append(\n 0,\n ((element + 1)*np.tile(mult, (element.shape[0], 1))).sum(axis=1) + 1\n )\n state_num = np.append(state_num, 82)\n state_char = ''\n for n in state_num:\n state_char += chr(int(n)+32)\n state_rep.append(state_num.tolist())\n \n return state_rep", "def std2message(query):\n message = query['query_message']\n message['question_graph'] = message.pop('query_graph')\n for node in message['question_graph']['nodes']:\n node['id'] = node.pop('node_id')\n for edge in message['question_graph']['edges']:\n edge['id'] = edge.pop('edge_id')\n return message", "def compileCkt(self):\n f = np.vectorize(lambda x: isinstance(x, QubitChannel))\n table = f(self.diagram)\n col_bool = np.bitwise_or.reduce(table, axis=1)\n # filter nan in 'qubit' direction\n if not np.bitwise_and.reduce(col_bool):\n raise ValueError('Found unassigned qubit')\n # filter nan in 'time' direction\n row_bool = np.bitwise_or.reduce(table, axis=0)\n diagram = self.diagram[:, row_bool]\n table = table[:, row_bool]\n # align QubitChannel objects in the table column by column\n for time_idx in range(len(table[0, :])):\n diagram[table[:, time_idx], time_idx\n ] = QubitChannel.alignQubitChannels(\n *diagram[table[:, time_idx], time_idx]\n )\n # replace nans with null QubitChannel objects\n for qubit_idx, row in enumerate(table):\n for time_idx, flag in enumerate(row):\n if flag:\n continue\n span_idx = np.where(f(diagram[:, time_idx]))[0][0]\n wire_idx = np.where(f(diagram[qubit_idx, :]))[0][0]\n diagram[qubit_idx, time_idx] = QubitChannel.null(\n diagram[span_idx, time_idx], diagram[qubit_idx, wire_idx]\n )\n try:\n self.compiled = np.sum(diagram, axis=1)\n except SystemError:\n raise ValueError('Error during wire concatenation')", "def convert_to_q(self):\n if self.measure == 'Q':\n warnings.warn('Parameters are already converted to Q!')\n else:\n kappa_sp = self.kappa_s\n kappa_yp = self.kappa_y\n self.kappa_s = self.kappa_s - self.lmbd_s * self.eta_s\n self.kappa_y = self.kappa_y - self.lmbd_y * self.eta_y\n self.scale = kappa_sp / self.kappa_s\n self.mean_v *= (kappa_yp / self.kappa_y * self.scale)\n self.lmbd = 0\n self.eta_y *= (self.scale**.5)\n self.measure = 'Q'\n self.update_ajd()", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def from_QQ_gmpy(K1, a, K0=None):\n if a.denominator == 1:\n return K1.from_ZZ_gmpy(a.numerator)", "def __init__(self, quant_arc_interface):\n\n super().__init__()\n self.args = quant_arc_interface.args\n self.q_params = nn.Parameter(self.args.q_delta * torch.randn(self.args.q_depth * quant_arc_interface.second_qubits))\n self.qai = quant_arc_interface", "def convertToSpectroGram(self):", "def from_QQ_gmpy(K1, a, K0):\n return None", "def makeQuery(specs):\n size = len(specs)\n req = specs[0]\n\n # Create the header\n message = struct.pack('!HHHHHH', 1, 0, 1, 0, 0, 0)\n\n num = req.count('.')\n mylist = req.split('.')\n\n # Create the question and add it to the header\n for string in mylist:\n query = struct.pack(\"!b\"+str(len(string))+\"s\", len(string), string.encode('utf-8'))\n message = message + query\n\n # If the query is Type-A otherwise it is Type-MX\n if size == 1:\n message = message + struct.pack(\"!bHH\", 0,1,1)\n\n elif size == 2:\n message = message + struct.pack(\"!bHH\", 0, 15, 1)\n\n return message", "def convert_channel(name, want_id=True, channel_mapping=channel_mapping):\n from builtins import int\n if isinstance(name, int):\n if name not in channel_mapping:\n raise RuntimeError(\"ChannelID %r is not in channel_mapping\" % name)\n return name if want_id else channel_mapping[name]\n \n elif isinstance(name, str):\n if name not in channel_mapping:\n raise RuntimeError(\"ChannelID %r is not in channel_mapping\" % name)\n return name if not want_id else channel_mapping[name]\n \n raise RuntimeError(\"I don't know how to convert %r into \"\n \"a ChannelSelection\" % name)", "def channel(self):\n return Channel({'id': self.channel_id, 'connection': self.connection})", "def _cim_quality():\n return {\n 'type' : 'class',\n 'name' : 'cim_quality',\n 'base' : None,\n 'is_abstract' : False,\n 'is_entity' : True,\n 'doc' : 'The starting point for a quality record. It can contain any number of issues and reports. An issue is an open-ended description of some issue about a CIM instance. A record is a prescribed description of some specific quantitative measure that has been applied to a CIM instance.',\n 'properties' : [\n ('meta', 'shared.doc_meta_info', '1.1', None),\n ('reports', 'quality.report', '0.N', None),\n ],\n 'decodings' : [\n ('meta', 'self::cim:cIM_Quality'),\n ('reports', 'child::cim:report'),\n ]\n }", "def form_dqxx_word(dqcr, dqch):\n # Loop over all channels and form the DQXX word\n dqxx = [0 for i in range(19 * 16 * 32)]\n for lcn in range(len(dqxx)):\n dqch_word = dqch[lcn]\n # get the crate number\n crate = (lcn & 0x3e00) >> 9\n # get the card number\n card = (lcn & 0x1e0) >> 5\n dqcr_word = dqcr[crate * 16 + card]\n # get the channel number\n ch = lcn & 0x1f\n # Get the daughterboard number for this channel\n db = ch / 8\n dqxx_word = 0\n # 0 DQXX[0] = DQCR[0]\n dqxx_word |= ((not check_bit(dqcr_word, 0)) << 0)\n # 1 DQXX[1] = DQCR[8]\n dqxx_word |= ((not check_bit(dqcr_word, 8)) << 1)\n # 2 DQXX[2] = DQCR[9]\n dqxx_word |= ((not check_bit(dqcr_word, 9)) << 2)\n # 3 DQXX[3] = DQCR[10]\n dqxx_word |= ((not check_bit(dqcr_word, 10)) << 3)\n # 4 DQXX[4] = DQCR[11]\n dqxx_word |= ((not check_bit(dqcr_word, 11)) << 4)\n # 8 DQXX[8] = DQCR[1]\n dqxx_word |= ((not check_bit(dqcr_word, 1)) << 8)\n # 9 DQXX[9] = DQCR[4+db]\n dqxx_word |= ((not check_bit(dqcr_word, 4 + db)) << 9)\n # 10 DQXX[10] = DQCR[3]\n dqxx_word |= ((not check_bit(dqcr_word, 3)) << 10)\n # 16 DQXX[16] = DQCH[2]\n dqxx_word |= ((not check_bit(dqch_word, 2)) << 16)\n # 17 DQXX[17] = DQCH[3]\n dqxx_word |= ((not check_bit(dqch_word, 3)) << 17)\n # 18 DQXX[18] = DQCH[4]\n dqxx_word |= ((not check_bit(dqch_word, 4)) << 18)\n # 19 check for maxed-out threshold ( value of 255 )\n threshold = (dqch_word & 0xff0000) >> 16\n if not (threshold == 255):\n dqxx_word |= (1 << 19)\n # 20 DQXX[20] = DQCH[6]\n dqxx_word |= ((not check_bit(dqch_word, 6)) << 20)\n # 21 DQXX[21] = DQCH[7]\n dqxx_word |= ((not check_bit(dqch_word, 7)) << 21)\n # 22 DQXX[22] = DQCH[8]\n dqxx_word |= ((not check_bit(dqch_word, 8)) << 22)\n # 24 DQXX[24] = DQCR[2]\n dqxx_word |= ((not check_bit(dqcr_word, 2)) << 24)\n # 26 DQXX[26] = DQCR[12+db]\n dqxx_word |= ((not check_bit(dqcr_word, (12 + db))) << 26)\n # 27 DQXX[27] = DQCH[1]\n dqxx_word |= ((not check_bit(dqch_word, 1)) << 27)\n # 28 DQXX[28] = DQCH[0]\n dqxx_word |= ((not check_bit(dqch_word, 0)) << 28)\n # 29 DQXX[29] = DQCH[5]\n dqxx_word |= ((not check_bit(dqch_word, 5)) << 29)\n # 30 DQXX[30] = DQCH[9]\n dqxx_word |= ((not check_bit(dqch_word, 9)) << 30)\n dqxx[lcn] = dqxx_word\n return dqxx", "def quasar_to_dict(q: Circuit) -> Dict:\n return dict(\n instructions=quasar_to_list(q),\n qubits=list(q.qubits),\n times=list(q.times),\n times_and_qubits=list(q.times_and_qubits),\n )", "def test_decode_qdc(self):\n self.assertEqual(td.qdc(), decoder.decode_qdc(BytesIO(td.qdc(True))))", "def test_to_qcschema(self):\n # the molecule has no coordinates so this should fail\n ethanol = Molecule.from_smiles(\"CCO\")\n with pytest.raises(InvalidConformerError):\n qcschema = ethanol.to_qcschema()\n\n # now remake the molecule from the sdf\n ethanol = Molecule.from_file(get_data_file_path(\"molecules/ethanol.sdf\"))\n # make sure that requests to missing conformers are caught\n with pytest.raises(InvalidConformerError):\n qcschema = ethanol.to_qcschema(conformer=1)\n # now make a valid qcschema and check its properties\n qcschema = ethanol.to_qcschema(extras={\"test_tag\": \"test\"})\n # make sure the properties match\n charge = 0\n connectivity = [\n (0, 1, 1.0),\n (0, 4, 1.0),\n (0, 5, 1.0),\n (0, 6, 1.0),\n (1, 2, 1.0),\n (1, 7, 1.0),\n (1, 8, 1.0),\n (2, 3, 1.0),\n ]\n symbols = [\"C\", \"C\", \"O\", \"H\", \"H\", \"H\", \"H\", \"H\", \"H\"]\n\n def assert_check():\n assert charge == qcschema.molecular_charge\n assert connectivity == qcschema.connectivity\n assert symbols == qcschema.symbols.tolist()\n assert (\n qcschema.geometry.all() == ethanol.conformers[0].m_as(unit.bohr).all()\n )\n\n assert_check()\n assert qcschema.extras[\"test_tag\"] == \"test\"\n assert qcschema.extras[\n \"canonical_isomeric_explicit_hydrogen_mapped_smiles\"\n ] == ethanol.to_smiles(mapped=True)\n # # now run again with no extras passed, only cmiles entry will be present with fix-720\n qcschema = ethanol.to_qcschema()\n assert_check()\n assert qcschema.extras[\n \"canonical_isomeric_explicit_hydrogen_mapped_smiles\"\n ] == ethanol.to_smiles(mapped=True)", "def __init__(self, quant_arc_interface):\n\n super().__init__()\n self.args = quant_arc_interface.args \n self.q_params = nn.Parameter(self.args.q_delta * torch.randn(self.args.q_depth * self.args.n_qubits))\n self.qai = quant_arc_interface", "def _gen_chamnet_v2(channel_multiplier, num_classes=1000, **kwargs):\n arch_def = [\n ['ir_r1_k3_s1_e1_c24'],\n ['ir_r4_k5_s2_e8_c32'],\n ['ir_r6_k7_s2_e5_c48'],\n ['ir_r3_k5_s2_e9_c56'],\n ['ir_r6_k3_s1_e6_c56'],\n ['ir_r6_k3_s2_e2_c152'],\n ['ir_r1_k3_s1_e6_c112'],\n ]\n model = GenEfficientNet(\n _decode_arch_def(arch_def),\n num_classes=num_classes,\n stem_size=32,\n num_features=1280, # no idea what this is? try mobile/mnasnet default?\n channel_multiplier=channel_multiplier,\n bn_args=_resolve_bn_args(kwargs),\n **kwargs\n )\n return model", "def __init__(self, qc_mol, br_mol, pc_mol):\n assert isinstance(qc_mol, CMolecule)\n assert isinstance(br_mol, CMolecule)\n assert isinstance(pc_mol, CMolecule)\n\n self.qc_mol = qc_mol\n self.br_mol = br_mol\n self.pc_mol = pc_mol", "def __init__(self, qubit, circ=None):\n super().__init__(\"s\", [], [qubit], circ)", "def quantum_geometric_tensor(self, qgt_type):\n raise NotImplementedError # pragma: no cover", "def create_book_ticker_channel(self, symbol: str) -> str:", "def construct_circuit(self, x, qr=None, inverse=False):\n if not isinstance(x, np.ndarray):\n raise TypeError(\"x must be numpy array.\")\n if x.ndim != 1:\n raise ValueError(\"x must be 1-D array.\")\n if x.shape[0] != self._num_qubits:\n raise ValueError(\"number of qubits and data dimension must be the same.\")\n if qr is None:\n qr = QuantumRegister(self._num_qubits, name='q')\n qc = self._constructor_function(x, qr, inverse, *self._feature_param)\n #qc.draw(output='mpl', filename='C:/Users/RaphaelLambert/Pictures/custom.png')\n return qc", "def test_qcschema_molecule_record_round_trip_from_to_from(self):\n\n # get a molecule qcschema\n import qcportal as ptl\n\n client = ptl.FractalClient()\n\n record = client.query_molecules(molecular_formula=\"C16H20N3O5\")[0]\n\n # now make the molecule from the record instance with the geometry\n mol_qca_record = Molecule.from_qcschema(record, client)\n off_qcschema = mol_qca_record.to_qcschema()\n mol_using_from_off_qcschema = Molecule.from_qcschema(off_qcschema)\n\n assert_molecule_is_equal(\n mol_qca_record,\n mol_using_from_off_qcschema,\n \"Molecule roundtrip to/from_qcschema failed\",\n )", "def crp_to_dcm(q):\n s = q @ q\n return (1/(1 + s))*((1 - s)*np.identity(3) + 2*np.outer(q, q) - 2*ut.cross_product_operator(q))", "def get_channel_info(self):\n items = [('channel_number', int),\n ('range', float),\n ('sampling_rate', float),\n ('digitisation', float),\n ('offset', float),\n ]\n\n attrs = self['/UniqueGlobalKey/channel_id'].attrs\n info = {key: converter(attrs[key]) for key, converter in items}\n new_names = [('range','channel_range'),\n ('sampling_rate', 'channel_sampling_rate'),\n ('digitisation', 'channel_digitisation'),\n ('offset', 'channel_offset'),\n ]\n for old, new in new_names:\n info[new] = info[old]\n del info[old]\n return info", "def tensor_to_qobj(tensor, output_labels, input_labels):\n\n output_dims = []\n input_dims = []\n t = tensor.copy()\n\n if not isinstance(output_labels, list):\n output_labels=[output_labels]\n if not isinstance(input_labels, list):\n input_labels=[input_labels]\n # order the indices according to output_labels and input_labels\n for i, label in enumerate(output_labels+input_labels):\n if label is None:\n label = 'dummy'+str(i)\n t.add_dummy_index(label, i)\n t.move_index(label, i)\n if i < len(output_labels):\n output_dims.append(t.shape[i])\n else:\n input_dims.append(t.shape[i])\n\n output_labels_new = [l if l is not None else 'dummy'+str(i)\n for i,l in enumerate(output_labels)]\n\n data = tn.tensor_to_matrix(t, output_labels_new)\n dims = [output_dims, input_dims]\n return qt.Qobj(data, dims=dims)", "def encode(msg: Message) -> bytes:\n msg = cast(MlTradeMessage, msg)\n message_pb = ProtobufMessage()\n dialogue_message_pb = DialogueMessage()\n ml_trade_msg = ml_trade_pb2.MlTradeMessage()\n\n dialogue_message_pb.message_id = msg.message_id\n dialogue_reference = msg.dialogue_reference\n dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]\n dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]\n dialogue_message_pb.target = msg.target\n\n performative_id = msg.performative\n if performative_id == MlTradeMessage.Performative.CFP:\n performative = ml_trade_pb2.MlTradeMessage.Cfp_Performative() # type: ignore\n query = msg.query\n Query.encode(performative.query, query)\n ml_trade_msg.cfp.CopyFrom(performative)\n elif performative_id == MlTradeMessage.Performative.TERMS:\n performative = ml_trade_pb2.MlTradeMessage.Terms_Performative() # type: ignore\n terms = msg.terms\n Description.encode(performative.terms, terms)\n ml_trade_msg.terms.CopyFrom(performative)\n elif performative_id == MlTradeMessage.Performative.ACCEPT:\n performative = ml_trade_pb2.MlTradeMessage.Accept_Performative() # type: ignore\n terms = msg.terms\n Description.encode(performative.terms, terms)\n tx_digest = msg.tx_digest\n performative.tx_digest = tx_digest\n ml_trade_msg.accept.CopyFrom(performative)\n elif performative_id == MlTradeMessage.Performative.DATA:\n performative = ml_trade_pb2.MlTradeMessage.Data_Performative() # type: ignore\n terms = msg.terms\n Description.encode(performative.terms, terms)\n payload = msg.payload\n performative.payload = payload\n ml_trade_msg.data.CopyFrom(performative)\n else:\n raise ValueError(\"Performative not valid: {}\".format(performative_id))\n\n dialogue_message_pb.content = ml_trade_msg.SerializeToString()\n\n message_pb.dialogue_message.CopyFrom(dialogue_message_pb)\n message_bytes = message_pb.SerializeToString()\n return message_bytes", "def _gen_mixnet_s(channel_multiplier=1.0, num_classes=1000, **kwargs):\n arch_def = [\n # stage 0, 112x112 in\n ['ds_r1_k3_s1_e1_c16'], # relu\n # stage 1, 112x112 in\n ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu\n # stage 2, 56x56 in\n ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish\n # stage 3, 28x28 in\n ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish\n # stage 4, 14x14in\n ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish\n # stage 5, 14x14in\n ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish\n # 7x7\n ]\n model = GenEfficientNet(\n _decode_arch_def(arch_def),\n num_classes=num_classes,\n stem_size=16,\n num_features=1536,\n channel_multiplier=channel_multiplier,\n bn_args=_resolve_bn_args(kwargs),\n act_fn=F.relu,\n **kwargs\n )\n return model", "def Q2Mat(q0,q1,q2,q3):\n m=np.matrix([[1-2*q2**2-2*q3**2,2*q1*q2+2*q0*q3,2*q1*q3-2*q0*q2],\n [2*q1*q2-2*q0*q3,1-2*q1**2-2*q3**2,2*q2*q3+2*q0*q1],\n [2*q1*q3+2*q0*q2,2*q2*q3-2*q0*q1,1-2*q1**2-2*q2**2]])\n return m", "def QCNN_layer_gen(self):\n pixels = self.filter_size**2\n # filter size: 2^n only for this version!\n if np.log2(pixels) % 1 != 0:\n raise NotImplementedError(\"filter size: 2^n only available\")\n cirq_qubits = cirq.GridQubit.rect(self.filter_size, self.filter_size)\n # mapping input data to circuit\n input_circuit = cirq.Circuit()\n input_params = [sympy.symbols('a%d' %i) for i in range(pixels)]\n for i, qubit in enumerate(cirq_qubits):\n input_circuit.append(cirq.rx(np.pi*input_params[i])(qubit))\n # apply learnable gate set to QCNN circuit\n QCNN_circuit = cirq.Circuit()\n step_size = [2**i for i in range(np.log2(pixels).astype(np.int32))]\n for step in step_size:\n for target in range(0, pixels, 2*step):\n QCNN_circuit.append(self._QConv(step, target, cirq_qubits))\n # merge the circuits\n full_circuit = cirq.Circuit()\n full_circuit.append(input_circuit)\n full_circuit.append(QCNN_circuit)\n self.circuit = full_circuit # save circuit to the QCNN layer obj.\n self.params = input_params + self.learning_params\n self.op = cirq.Z(cirq_qubits[0])", "def test_molecule_subclass_from_inchi(self):\n mol = MyMol.from_inchi(\"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3\")\n assert isinstance(mol, MyMol)", "def create_channel_dict(self, chan_name, clim=None, first_chan=False):\n if chan_name == 'Retardance':\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else 1000.0\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else 100.0\n elif chan_name == 'Orientation':\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else np.pi\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else np.pi\n\n elif chan_name == 'Phase3D':\n min = clim[2] if clim else -10.0\n max = clim[3] if clim else 10.0\n start = clim[0] if clim else -0.2\n end = clim[1] if clim else 0.2\n\n elif chan_name == 'BF':\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else 65535.0\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else 5.0\n\n elif chan_name == 'S0':\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else 65535.0\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else 1.0\n\n elif chan_name == 'S1':\n min = clim[2] if clim else 10.0\n max = clim[3] if clim else -10.0\n start = clim[0] if clim else -0.5\n end = clim[1] if clim else 0.5\n\n elif chan_name == 'S2':\n min = clim[2] if clim else -10.0\n max = clim[3] if clim else 10.0\n start = clim[0] if clim else -0.5\n end = clim[1] if clim else 0.5\n\n elif chan_name == 'S3':\n min = clim[2] if clim else -10\n max = clim[3] if clim else 10\n start = clim[0] if clim else -1.0\n end = clim[1] if clim else 1.0\n\n else:\n min = clim[2] if clim else 0.0\n max = clim[3] if clim else 65535.0\n start = clim[0] if clim else 0.0\n end = clim[1] if clim else 65535.0\n\n dict_ = {'active': first_chan,\n 'coefficient': 1.0,\n 'color': 'FFFFFF',\n 'family': 'linear',\n 'inverted': False,\n 'label': chan_name,\n 'window': {'end': end, 'max': max, 'min': min, 'start': start}\n }\n\n return dict_", "def test_motohawk_encode_decode(self):\n\n db = cantools.db.File()\n filename = os.path.join('tests', 'files', 'motohawk.dbc')\n db.add_dbc_file(filename)\n\n example_message_frame_id = 496\n\n # Encode with non-enumerated values.\n data = {\n 'Temperature': 250.55,\n 'AverageRadius': 3.2,\n 'Enable': 1\n }\n\n encoded = db.encode_message(example_message_frame_id, data)\n self.assertEqual(encoded, b'\\xc1\\x1b\\x00\\x00\\x00\\x00\\x00\\x00')\n\n # Encode with enumerated values.\n data = {\n 'Temperature': 250.55,\n 'AverageRadius': 3.2,\n 'Enable': 'Enabled'\n }\n\n encoded = db.encode_message(example_message_frame_id, data)\n self.assertEqual(encoded, b'\\xc1\\x1b\\x00\\x00\\x00\\x00\\x00\\x00')\n\n decoded = db.decode_message(example_message_frame_id, encoded)\n self.assertEqual(decoded, data)", "def _create_qat_model(self, dataset):\n if not self._optimized_model:\n logger.error('Should call `optimize_model()` before `_create_qat_model`.')\n self._qat_model, self._layer_metadata = self._quantizer.create_quantize_model(\n self._optimized_model,\n candidate_layers=self._candidate_layers,\n layer_metadata=self._layer_metadata,\n quantize_strategy=self._quantize_strategy,\n mode='QAT',\n target=self._target,\n dataset=dataset)", "def fromMol(cls, m):\n t = cls()\n ### MINIMUM ATTRIBUTE SETUP ###\n ###TBI: all relevant non-method attributes retrieved ###\n t.set_natoms(m.natoms)\n t.set_xyz(m.xyz)\n t.set_conn(m.conn)\n t.set_elems(m.elems)\n t.set_atypes(m.atypes)\n t.set_cell(m.cell)\n t.add_pconn()\n return t", "def circuit_from_qasm_file(qasm_file, name=None,\n basis_gates=\"id,u0,u1,u2,u3,x,y,z,h,s,sdg,t,tdg,rx,\"\n \"ry,rz,cx,cy,cz,ch,crz,cu1,cu3,swap,ccx,\"\n \"cswap\"):\n if not os.path.exists(qasm_file):\n raise QISKitError('qasm file \"{0}\" not found'.format(qasm_file))\n if not name:\n name = os.path.splitext(os.path.basename(qasm_file))[0]\n\n with open(qasm_file) as file:\n qasm_data = file.read()\n\n return circuit_from_qasm_string(\n qasm_data, name=name, basis_gates=basis_gates)", "def conj(q_1: Q, conj_type: int = 0) -> Q:\n\n end_q_type = f\"{q_1.q_type}*\"\n c_t, c_x, c_y, c_z = q_1.t, q_1.x, q_1.y, q_1.z\n cq = Q()\n\n if conj_type % 4 == 0:\n cq.t = c_t\n if c_x != 0:\n cq.x = -1 * c_x\n if c_y != 0:\n cq.y = -1 * c_y\n if c_z != 0:\n cq.z = -1 * c_z\n\n elif conj_type % 4 == 1:\n if c_t != 0:\n cq.t = -1 * c_t\n cq.x = c_x\n if c_y != 0:\n cq.y = -1 * c_y\n if c_z != 0:\n cq.z = -1 * c_z\n end_q_type += \"1\"\n\n elif conj_type % 4 == 2:\n if c_t != 0:\n cq.t = -1 * c_t\n if c_x != 0:\n cq.x = -1 * c_x\n cq.y = c_y\n if c_z != 0:\n cq.z = -1 * c_z\n end_q_type += \"2\"\n\n elif conj_type % 4 == 3:\n if c_t != 0:\n cq.t = -1 * c_t\n if c_x != 0:\n cq.x = -1 * c_x\n if c_y != 0:\n cq.y = -1 * c_y\n cq.z = c_z\n end_q_type += \"3\"\n\n cq.q_type = end_q_type\n cq.representation = q_1.representation\n\n return cq", "def scalar_q(q_1: Q) -> Q:\n\n end_q_type = f\"scalar_q({q_1.q_type})\"\n s = Q([q_1.t, 0, 0, 0], q_type=end_q_type, representation=q_1.representation)\n return s", "def itkMeshSourceMD2Q_cast(*args):\n return _itkMeshSourcePython.itkMeshSourceMD2Q_cast(*args)", "def __init__(self,l,options):\n #### Setup options\n self.options = options\n # For execution\n self.shots = 1000 if options.get('shots') == None\\\n else options.get('shots')\n self.seed = options.get('seed')\n if self.seed != None:\n from qiskit.aqua import aqua_globals\n aqua_globals.random_seed = self.seed\n self.prnt = options.get('print')\n self.ancilla_measure = options.get('ancilla') if options.get('ancilla') != None else False\n\n self.ibmq = False\n if options.get('ibmq') == True:\n print('Running on real quantum computer')\n self.ibmq = True\n self.backend = options['backend']\n from qiskit.tools.monitor import job_monitor\n self.monitor = job_monitor\n from attributes import get_measurement_fitter\n self.meas_fitter = get_measurement_fitter(l,\n self.backend,\n None,\n self.shots)\n \n else:\n # For Backend\n if options.get('backend') == None:\n self.options['backend'] = 'qasm_simulator' \n self.backend = qk.Aer.get_backend(options['backend'])\n # For noise model, coupling map and basis gates\n self.noise_model, self.coupling_map, self.basis_gates = None,None,None\n self.meas_fitter = None\n if options.get('device') != None:\n device = QuantumComputer(options.get('device'))\n if options.get('noise_model') != None:\n self.noise_model = device.noise_model\n # Create error mitigation fitter\n if options.get('meas_fit') in [None,True]:\n from attributes import get_measurement_fitter\n self.meas_fitter = get_measurement_fitter(l,\n self.backend,\n device,\n self.shots)\n if options.get('coupling_map') != None:\n self.coupling_map = device.coupling_map\n if options.get('basis_gates') != None:\n self.basis_gates = device.basis_gates\n # Qubit layout, virtual to physical\n self.layout = options.get('layout')\n # Optimization level\n self.optimization_level= 1 if options.get('optimization_level')==None else options['optimization_level']\n\n # GPU accelerated\n if options.get('gpu'):\n from qiskit_qcgpu_provider import QCGPUProvider\n Provider = QCGPUProvider()\n self.backend = Provider.get_backend(options['backend'])" ]
[ "0.68651897", "0.6369858", "0.6249039", "0.6095439", "0.5532425", "0.5279432", "0.5224014", "0.5138072", "0.5133675", "0.5082203", "0.5018223", "0.49832156", "0.49755138", "0.49718896", "0.48944896", "0.48510703", "0.48189393", "0.48128456", "0.47943678", "0.478117", "0.47810757", "0.47771052", "0.47602123", "0.4755534", "0.47376066", "0.4717486", "0.4708179", "0.47061953", "0.46948394", "0.468719", "0.4669291", "0.46679318", "0.4657026", "0.4652889", "0.4638156", "0.46289974", "0.46227428", "0.46041524", "0.4595236", "0.45936942", "0.4593039", "0.45815694", "0.4578965", "0.45768273", "0.4568929", "0.45627683", "0.4560154", "0.4548416", "0.45461574", "0.45425776", "0.45422003", "0.4529647", "0.45239154", "0.45169726", "0.44914353", "0.44802153", "0.4479823", "0.44792795", "0.44755897", "0.44646037", "0.44618747", "0.44602415", "0.44522616", "0.4452248", "0.44497278", "0.44390112", "0.44362563", "0.44353357", "0.44270262", "0.44269386", "0.44230285", "0.44215104", "0.44177994", "0.4412989", "0.44065276", "0.4402448", "0.44002602", "0.43978003", "0.43914932", "0.4386254", "0.4383504", "0.4370744", "0.43692207", "0.43685547", "0.4367578", "0.4366994", "0.43653032", "0.43615136", "0.4358569", "0.4355304", "0.4354866", "0.43536425", "0.43506137", "0.43470272", "0.43458894", "0.4344405", "0.43382898", "0.4337748", "0.43366542", "0.4331236" ]
0.6979349
0
This method reurns centence with style of furniture. >>> furniture1 = Furniture("empire", "bedroom") >>> assert(str(furniture1) == "")
def __str__(self): return "<furniture style is " + str(self.style) + ">"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_furniture():", "def __str__(self):\n return \"<This armchair furniture style is \" + str(self.style) + \">\"", "def new_fountain():\n fountain = Composite()\n set_dungeon_feature_components(fountain)\n fountain.set_child(Description(\"Fountain\",\n (\"A Fountain full of clean water\",\n \"surely you will become more\",\n \"healthy by drinking this.\")))\n fountain.set_child(GraphicChar(None, colors.CYAN, icon.FOUNTAIN_FULL))\n fountain.set_child(DrinkFromFountainAction())\n return fountain", "def fusionne(self, new):\n if new == self:\n raise ValueError(\"un titre ne peut être fusionné avec lui même\")\n self.alters_data = True\n if not isinstance(new, type(self)):\n raise TypeError(\"pas la même classe d'objet\")\n if self.type != new.type:\n raise TypeError(\"pas le même type de titre\")\n for cours in self.cours_set.all():\n try:\n if new.cours_set.get(date=cours.date).valeur != cours.valeur:\n raise Gsb_exc(\n \"attention les titre %s et %s ne peuvent etre fusionné à cause histo de cours\" % (self, new))\n except Cours.DoesNotExist:\n new.cours_set.create(date=cours.date, valeur=cours.valeur)\n nb_change = 0\n nb_change += Ope_titre.objects.filter(titre=self).update(titre=new)\n # on doit aussi reaffecter le tiers associe\n self.tiers.fusionne(new.tiers, ok_titre=True)\n self.delete()\n return nb_change", "def fix_cf_31(self, file, error_message):\n\n error_info = \"{} - not CF compliant units\".format(error_message)\n assert file.split('_')[0] == 'sos'\n self.ncatt._run_ncatted('units', 'sos', 'o', 'c', '1.e-3', file)\n dt_string = datetime.datetime.now().strftime('%Y-%m%d %H:%M:%S')\n methods_history_update_comment = \"\\n{}: CP4CDS project changed units from PSU to 1.e-3 to be CF compliant\".format(\n dt_string)\n self.ncatt._run_ncatted('history', 'sos', 'a', 'c', methods_history_update_comment, file, noHistory=True)\n return error_info", "def button_fac_cob_ent(self):\n invoice = self._fac_ent()\n\n # pagar la factura\n # hacer configuracion para modificar esto\n receipt_obj = self.env['account.voucher.receiptbook']\n receipt = receipt_obj.search([('name', 'like', 'Recibos')], limit=1)\n\n journal = self.journal_id\n res = invoice.invoice_pay_customer()\n context = res['context']\n\n account_voucher_obj = self.env['account.voucher']\n voucher = account_voucher_obj.create({\n 'partner_id': context['default_partner_id'],\n 'journal_id': journal.id,\n 'account_id': journal.default_debit_account_id.id,\n 'type': context['type'],\n 'amount': context['default_amount'],\n 'net_amount': context['default_amount'],\n 'receiptbook_id': receipt.id,\n 'company_id': self.env.user.company_id.id\n })\n voucher.signal_workflow('proforma_voucher')\n\n account_move_line_obj = self.env['account.move.line']\n\n # obtener un recordser vacio\n lines2rec = account_move_line_obj.browse()\n\n # obtener las lineas a conciliar de facturas\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', invoice.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n # obtener las lineas a conciliar de pagos\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', voucher.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n period_obj = self.env['account.period']\n period = period_obj.find()\n\n # reconciliar las lineas de factura con pagos\n lines2rec.reconcile('manual',\n journal.default_debit_account_id.id, # writeoff_acc_id\n period.id, # writeoff_period_id,\n journal.id) # writeoff_journal_id)\n\n # imprime factura\n datas = {\n 'ids': invoice.ids,\n 'model': 'account.report_invoice',\n 'form': invoice.read()\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'aeroo_report_ar_einvoice',\n 'datas': datas,\n }", "def done_solidarity(self, cr, uid, ids,context=None):\n record = self.browse(cr, uid, ids, context=context)[0]\n if record.type == 'solidarity':\n employee = record.employee_id.id\n amount = record.amount\n category_obj = self.pool.get('enrich.category')\n\n category = record.enrich_category\n max_amount = category.amount\n max_times = category.times\n\n rest_money = category_obj.search(cr, uid, [('type', '=', 'sol_special')], context=context)\n rest_money = category_obj.browse(cr, uid, rest_money, context=context)\n \n if not rest_money:\n raise osv.except_osv(_('Constraint Error'), _(\"The No Residual Money!\"))\n\n if category.operation_type == 'deposit':\n #increese the rest_money\n rest_money[0].deposit(amount)\n #rest_money[0].write({'amount':rest_money[0].amount + amount})\n if category.operation_type == 'withdrawal': \n #get the amount of the first record in the configuration\n rest_money_amount = rest_money[0].amount\n\n\n if amount > max_amount:\n raise osv.except_osv(_('Constraint Error'), _(\"The the amount is greater than the amount in the category!\"))\n \n #times when this employee get money from this category\n old_times = self.search(cr, uid, [('employee_id', '=', employee), ('type', '=', 'solidarity'), ('enrich_category', '=', category.id), ('state', '=', 'done')])\n times = 1\n if old_times:\n times += len(old_times)\n if times > max_times:\n raise osv.except_osv(_('Constraint Error'), _(\"This employee get all chances of this category!\"))\n\n #rest_money[0].write({'amount':rest_money[0].amount - amount})\n rest_money[0].withdraw(amount)\n return self.write(cr, uid, ids, {'state':'done',}, context=context)", "def calculDeFraisPortuaire():\n TARIFMENSUEL1 = 100\n TARIFMENSUEL2 = 200\n TARIFMENSUEL3 = 400\n TARIFMENSUEL4 = 600\n TAXESPECIALEANNUELLEVOILIERCATEGORIE1 = 100\n TAXESPECIALEANNUELLEVOILIERCATEGORIE2 = 150\n TAXESPECIALEANNUELLEVOILIERCATEGORIE3 = 250\n \n coutMensuel = 0\n coutAnnuel = 0\n taxeSpecialeAnnuelle = 0\n nomDuVoilier = input(\"ENTREZ le nom du voilier: \")\n longueur = float(input(\"Entrez la longueur du voilier: \"))\n categorie = int(input(\"Entrez la categorie du voilier 1 2 ou 3 : \"))\n \n if(longueur<5):\n coutMensuel = TARIFMENSUEL1\n elif(longueur<=10):\n coutMensuel = TARIFMENSUEL2\n elif(longueur<=12):\n coutMensuel = TARIFMENSUEL3\n else:\n coutMensuel = TARIFMENSUEL4\n \n if(categorie==1):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE1\n elif(categorie==2):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE2\n elif(categorie==3):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE3\n \n coutAnnuel = taxeSpecialeAnnuelle+coutMensuel*12\n \n return \"le coût annuel d’une place au port pour le voilier \"+nomDuVoilier+\" est de \"+ str(coutAnnuel)+\" euros\"", "def close(self):\n try:\n return self.cleaning('Cierre')\n except:\n return self.cleaning('Último')", "def clean_firstrade(df):\n # set column names\n df.columns = df.iloc[0]\n # drop unnecessary rows\n df = df[1:-3]\n # select columns we need\n df = df.loc[:, [\n 'Symbol', 'Quantity', 'Date Acquired', 'Date Sold', 'Sales Proceeds',\n 'Cost'\n ]]\n # clean gain/loss in string, and convert them to float\n df['Sales'] = df['Sales Proceeds'].apply(clean_dollar_to_float).astype(\n 'float')\n df = df.drop(['Sales Proceeds'], axis=1)\n df['Cost'] = df['Cost'].apply(clean_dollar_to_float).astype('float')\n # clean datetime format\n df['Date Acquired'] = df['Date Acquired'].apply(clean_date_firstrade)\n df['Date Sold'] = df['Date Sold'].apply(clean_date_firstrade)\n return df", "def add_furniture(itemcode, description, marketprice, rentalprice):\n\n material = input(\"Enter item material: \")\n size = input(\"Enter item size (S,M,L,XL): \")\n newitem = Furniture(itemcode, description,\n marketprice, rentalprice\n , material, size)\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")", "def __str__(self) -> str:\n return (\n f\"offre de {self.beneficiaire} sur {self.proposition} \"\n \"(cagnotte {self.proposition.cagnotte})\"\n )", "def afficher(self):\n bordRect = (self.pos[0]-5, self.pos[1]-5, self.dim[0]+5, self.dim[1]+5)\n Fond = pygame.draw.rect(self.ecran.surface, self.ecran.couleur, bordRect, 0) # Efface le precedant text\n\n rang = 0\n verif = \"\"\n compteur = 0\n self.lignes = []\n if self.txt == \"\": self.txt = \" \"\n \n while verif != self.txt:\n verif =\"\"\n rang += self.correction(self.txt[rang:], compteur)\n compteur += 1\n for k in self.lignes:\n verif += k.txt\n\n for compteur in range(len(self.lignes)):\n self.lignes[compteur].afficher()\n\n self.dim = (self.dim[0], self.hLigne*(compteur+1)) # +1 -> Boucle for\n \n pygame.display.flip()", "def __str__(self):\n return f'{self._name} has {self._calories} calories, {self._carbohydrates}' +\\\n f'g. carbohydrates, {self._fat}g. of fat and {self._proteins}g. of proteins'", "def factura(num_factura):\n cursor_factura = connectiondb.cursor()\n sql_factura = \"\"\"SELECT * FROM Factura\n WHERE num_factura = '%s'\"\"\" % num_factura\n cursor_factura.execute(sql_factura)\n factura = cursor_factura.fetchone()\n cursor_factura.close()\n\n cursor_items_factura = connectiondb.cursor()\n sql_items_factura = \"\"\"SELECT * FROM ItemsFactura\n WHERE num_factura = '%s'\"\"\" % num_factura\n cursor_items_factura.execute(sql_items_factura)\n items_factura = cursor_items_factura.fetchall()\n cursor_items_factura.close()\n\n cursor_parametros = connectiondb.cursor()\n sql_parametros = \"\"\"SELECT * FROM ParametrosFactura\"\"\"\n cursor_parametros.execute(sql_parametros)\n parametros = cursor_parametros.fetchone()\n cursor_parametros.close()\n\n # Formatea el numero de factura para la resolución\n cero = '0'\n param_num_fac_ini = str(parametros['num_fac_ini'])\n param_num_fac_fin = str(parametros['num_fac_fin'])\n while len(param_num_fac_ini) < len(param_num_fac_fin):\n param_num_fac_ini = cero[:] + param_num_fac_ini\n\n # Formatea el numero de factura\n numero_factura = str(factura['num_factura'])\n ultm_consecutivo = str(parametros['num_fac_fin'])\n\n while len(numero_factura) < len(ultm_consecutivo):\n numero_factura = cero[:] + numero_factura\n\n # Datos del cliente\n cursor_datos_cliente = connectiondb.cursor()\n sql_datos_cliente = \"\"\"SELECT Factura.num_factura, Cliente.nombre_cliente,\n Cliente.identificacion_cliente, Cliente.direccion,\n Cliente.ciudad, Cliente.telefono\n FROM Factura\n INNER JOIN Cliente\n ON Factura.identificacion_cliente = Cliente.identificacion_cliente\n WHERE Factura.num_factura = '{}'\n ORDER BY Factura.num_factura DESC;\"\"\".format(factura['num_factura'])\n cursor_datos_cliente.execute(sql_datos_cliente)\n datos_cliente = cursor_datos_cliente.fetchone()\n cursor_datos_cliente.close()\n\n return render_template('factura.html',\n parametros=parametros,\n param_num_fac_ini=param_num_fac_ini,\n datos_cliente=datos_cliente,\n factura=factura,\n numero_factura=numero_factura,\n items_factura=items_factura,\n name=current_user.username)", "def fusionne(self, new):\n if new == self:\n raise ValueError(\"un exercice ne peut etre fusionné avec lui même\")\n self.alters_data = True\n if not isinstance(new, type(self)):\n raise TypeError(\"pas la même classe d'objet\")\n nb_change = Echeance.objects.filter(exercice=self).update(exercice=new)\n nb_change += Ope.objects.filter(exercice=self).update(exercice=new)\n if self.date_debut != new.date_debut:\n new.date_debut = min(new.date_debut, self.date_debut)\n if self.date_fin != new.date_fin:\n new.date_fin = max(new.date_fin, self.date_fin)\n new.save()\n self.delete()\n return nb_change", "def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")", "def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")", "def limpiarentry(fila):\n for i in range(len(fila)):\n fila[i].set_text('')", "def limpiarentry(fila):\n for i in range(len(fila)):\n fila[i].set_text('')", "def limpiarentry(fila):\n for i in range(len(fila)):\n fila[i].set_text('')", "def __init__(self, name, cash=1000000):\n self.name = name\n self.cash = cash\n self.term = 7\n self.stock_money = 0.40 * cash\n self.margin_money = 0.40 * cash\n self.fee = 30\n self.portfolios = list()\n self.maintenance_threshold = 0.25", "def clean_data(gross):\n\n # modify data type\n gross['week_ending'] = pd.to_datetime(gross['week_ending']).dt.strftime('%Y-%m-%d')\n gross['label'] = [1 if x >= 0 else 0 for x in gross['diff_in_dollars']]\n # ain't too proud has weird special characters\n gross['show'] = [x.split('¡')[0] for x in gross['show']]\n # beautiful carol king has different names\n gross['show'] = [re.sub(r'\\s+', ' ', x.translate(\n str.maketrans(string.punctuation, ' ' * len(string.punctuation))).lower().strip()) for x in gross['show']]\n gross.loc[gross['show'] == 'beautiful the carole king musical', 'show'] = 'beautiful'\n\n return gross", "def new_blood_fountain():\n fountain = Composite()\n set_dungeon_feature_components(fountain)\n fountain.set_child(Description(\"Fountain Of Sacrifice\",\n (\"The fountain is filled thick red liquid.\",\n \"You have a feeling that it calls out for you.\")))\n fountain.set_child(GraphicChar(None, colors.RED, icon.FOUNTAIN_FULL))\n fountain.set_child(SacrificeFountainAction())\n return fountain", "def draw(self):\r\n #if the UFO has only 1 life left, turn it red\r\n if(self.life <= 1):\r\n TARGET_UFO_COLOR = arcade.color.RED\r\n #If UFO has more than 1 life left, keep it silver\r\n else:\r\n TARGET_UFO_COLOR = arcade.color.SILVER\r\n arcade.draw_circle_outline(self.center.x, self.center.y, TARGET_UFO_HEIGHT, TARGET_UFO_COLOR, 3)\r\n arcade.draw_ellipse_filled(self.center.x, self.center.y, TARGET_UFO_WIDTH, TARGET_UFO_HEIGHT, TARGET_UFO_COLOR, 15)", "def processa_compra(self, compra):\n\n print(\"Boleto criado!\\n\" + compra.nota_fiscal)", "def __init__(self, marqueur, allele, hauteur, concordance_mere_foetus, informatif, num_foetus, contamination, taux):\n\n super().__init__(marqueur, allele, hauteur, informatif)\n self.num_foetus = num_foetus\n self.contamination = contamination\n self.taux = taux\n self.concordance_mere_foetus = concordance_mere_foetus", "def _reset(self):\n\t\tself._style = TextStyle()", "def imprime(nota_fiscal):\n\n print(\"Imprimindo nota fiscal %s\" % nota_fiscal.cnpj)", "def test_celcius_is_converted_to_farenheit(self):\n\t\tactual = convert_celcius_to_farenheit(10)\n\t\texpected = 50\n\t\tself.assertEqual(actual, expected, 'celcius should convert to correct farenheit')\n\t\tself.assertEqual(convert_celcius_to_farenheit(20), 68, 'celcius should convert to correct farenheit')", "def setUp(self):\n self.item = Furniture('11', 'sofa', '4', '5', 'suede', 'xl')", "def afficher(self):\n self.text = Texte(self.ecran, self.txt, self.police, self.taille, self.pos, self.couleurText) # Actualise\n self.textRect = self.text.textRend.get_rect() # Actualise\n self.dim = (self.textRect.w + 5, self.textRect.h + 5) # Actualise\n self.bordRect = (self.pos[0]-5, self.pos[1]-5, self.dim[0]+5, self.dim[1]+5) # -5 pour le faire partir de plus loin /// +5 pour le faire aller plus loin\n Fond=pygame.draw.rect(self.ecran.surface, self.couleurFond, self.bordRect, 0) # fond\n Bords=pygame.draw.rect(self.ecran.surface, (0,0,0), self.bordRect, 1) # bord\n self.text.afficher() # Affiche le texte", "def cria_carro_fiat(self):\n\n self.total_de_carros_fiat += 1\n print(\"Carro Fiat #\", self.total_de_carros_fiat, \" criado\")", "def IC(stock):\n return Invested_Book_Capital(stock)", "def fusionne(self, new):\n if new == self:\n raise ValueError(\"une catégorie ne peut être fusionnée avec elle même\")\n self.alters_data = True\n if not isinstance(new, type(self)):\n raise TypeError(\"pas la même classe d'objet\")\n if self.type != new.type:\n raise TypeError(\"pas le même type de catégorie, %s est %s alors que %s est %s\" % (\n self.nom, self.type, new.nom, new.type))\n nb_change = Echeance.objects.filter(cat=self).update(cat=new)\n nb_change += Ope.objects.filter(cat=self).update(cat=new)\n self.delete()\n return nb_change", "def field_style(field_name, bushfire=None):\r\n if bushfire:\r\n try:\r\n value = getattr(bushfire, field_name)\r\n if field_name == \"dfes_incident_no\":\r\n return \"\" if value else \"color:red;\"\r\n else:\r\n return \"\"\r\n except:\r\n return \"\"\r\n else:\r\n return \"\"", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.food = cuisine_type", "def opening(self):\n return self.cleaning('Apertura')", "def fayyaz(type,noOfSugarSpoon):\n print(\"Preparing \"+type +\" with \"+str(noOfSugarSpoon) +\" spoon of Sugar\")\n print(\"cleaning cups\")\n print(\"filling cups\")\n return type+\" Hazir Hai\"", "def reset_spreadsheet(worksheet, theme_location, fooditem_range):\n\n\t# Clear dinner theme\n\tworksheet.update_acell(theme_location, '')\n\n\t# Clear dinner items\n\trange_of_cells = worksheet.range(fooditem_range)\n\tfor cell in range_of_cells:\n\t cell.value = ''\n\tworksheet.update_cells(range_of_cells)", "def make_cup(drink_name, order_ingredients):\n for item in order_ingredients:\n resources[item] -= order_ingredients[item]\n print(f\"Here is your {drink_name} ☕️Enjoy!\")", "def _str_make(self):\n return self._name if self._fact is None else f\"{self._fact} × {self._name}\"", "def __repr__(self):\n return f\"Fact-Sheet: '{self.title}'\"", "def make_spondaic(self, scansion: str) -> str:\n mark_list = string_utils.mark_list(scansion)\n vals = list(scansion.replace(\" \", \"\"))\n new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1]\n corrected = \"\".join(new_vals)\n new_line = list(\" \" * len(scansion))\n for idx, car in enumerate(corrected):\n new_line[mark_list[idx]] = car\n return \"\".join(new_line)", "def scraper_notizie(self, contenuto_articoli: list):\n tot_menzioni = []\n for articolo in contenuto_articoli:\n # estraggo qualsisasi frase che menziona il giocatore\n sel_regex = f\"[\\w ,;()'’-]+{self.name}[\\w ,;()'’-]+\"\n results = re.findall(sel_regex, articolo)\n\n for res in results:\n # rimuovo il caso in cui sia solo in un elenco, come ad inizio articoli su ATTACCO\n if not re.search(f\", {self.name},\", res):\n tot_menzioni.append(res)\n if len(tot_menzioni) > 0:\n self.news = \"• \" + \"<br>•\".join(tot_menzioni)", "def cliquer_sur_unité(self):", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name.title()\n\t\tself.cuisine_type = cuisine_type", "def substitute_display(self, category_id, food_id):\n ref = category_id, food_id\n self.cursor.execute(\"\"\" SELECT food.name, store.name,\n food.link_openffacts,\n food.nutriscore, food.description, food.id\n FROM food\n INNER JOIN store_food\n ON food.id = store_food.food_id\n INNER JOIN store\n ON store_food.store_id = store.id\n WHERE food.id IN (SELECT category_food.food_id\n FROM category_food\n WHERE category_food.category_id = %s)\n AND food.id != %s\n ORDER BY food.nutriscore\n LIMIT 1 OFFSET 0\"\"\", ref)\n row = self.cursor.fetchone()\n print(\"Voici un subistitut de votre choix initial : \")\n print(\"Nom du produit : \" + row[0])\n print(\"Grade nutriscore : \" + row[3])\n print(\"Lien OpenFoodFacts : \" + row[2])\n print(\"Magasin(s) : \" + row[1])\n print(\"Description du produit : \" + row[4])\n return row[5]", "def free_cash_flow(df_cashflow):\n\n # Calculate FCF.\n # Because CAPEX is defined as the Disposition *minus* Acquisition of\n # Fixed Assets and Intangibles, we have to add CAPEX to NET_CASH_OPS in\n # order to subtract the Acquisition-part and calculate the Free Cash Flow.\n # This is discussed in more detail in Tutorial 04.\n df_result = df_cashflow[NET_CASH_OPS].fillna(0) \\\n + df_cashflow[CAPEX].fillna(0)\n\n # Rename the result to FCF.\n df_result.rename(FCF, inplace=True)\n\n return df_result", "def __init__(self, nome, tipo):\n self.restaurant_name = nome\n self.cuisine_type = tipo", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_colour='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour= map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def __init__(self, nome, sobrenome, cpf):\n self._nome = nome\n self._sobrenome = sobrenome\n self._cpf = cpf", "def SetStyle(self, start=None, end=None, style=None):\n # Global default styles for all languages\n self.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"face:%(helv)s,size:%(size)d\" % faces)\n self.StyleClearAll() # Reset all to be like the default\n\n # Global default styles for all languages\n self.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"face:%(helv)s,size:%(size)d\" % faces)\n self.StyleSetSpec(stc.STC_STYLE_LINENUMBER, \"back:#C0C0C0,face:%(helv)s,size:%(size2)d\" % faces)\n self.StyleSetSpec(stc.STC_STYLE_CONTROLCHAR, \"face:%(other)s\" % faces)\n self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, \"fore:#FFFFFF,back:#0000FF,bold\")\n self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, \"fore:#000000,back:#FF0000,bold\")\n\n # Python styles\n # Default\n self.StyleSetSpec(stc.STC_P_DEFAULT, \"fore:#000000,face:%(helv)s,size:%(size)d\" % faces)\n # Comments\n self.StyleSetSpec(stc.STC_P_COMMENTLINE, \"fore:#007F00,face:%(other)s,size:%(size)d\" % faces)\n # Number\n self.StyleSetSpec(stc.STC_P_NUMBER, \"fore:#007F7F,size:%(size)d\" % faces)\n # String\n self.StyleSetSpec(stc.STC_P_STRING, \"fore:#7F007F\")\n # Single quoted string\n self.StyleSetSpec(stc.STC_P_CHARACTER, \"fore:#7F007F,face:%(helv)s,size:%(size)d\" % faces)\n # Keyword\n self.StyleSetSpec(stc.STC_P_WORD, \"fore:#00007F,bold,size:%(size)d\" % faces)\n # Triple quotes\n self.StyleSetSpec(stc.STC_P_TRIPLE, \"fore:#7F0000,size:%(size)d\" % faces)\n # Triple double quotes\n self.StyleSetSpec(stc.STC_P_TRIPLEDOUBLE, \"fore:#7F0000,size:%(size)d\" % faces)\n # Class name definition\n self.StyleSetSpec(stc.STC_P_CLASSNAME, \"fore:#0000FF,bold,size:%(size)d\" % faces)\n # Function or method name definition\n self.StyleSetSpec(stc.STC_P_DEFNAME, \"fore:#007F7F,bold,size:%(size)d\" % faces)\n # Operators\n self.StyleSetSpec(stc.STC_P_OPERATOR, \"bold,size:%(size)d\" % faces)\n # Identifiers\n self.StyleSetSpec(stc.STC_P_IDENTIFIER, \"fore:#000000,face:%(helv)s,size:%(size)d\" % faces)\n # Comment-blocks\n self.StyleSetSpec(stc.STC_P_COMMENTBLOCK, \"fore:#7F7F7F,size:%(size)d\" % faces)\n # End of line where string is not closed\n self.StyleSetSpec(stc.STC_P_STRINGEOL, \"fore:#000000,face:%(mono)s,back:#E0C0E0,eol,size:%(size)d\" % faces)\n\n self.SetCaretForeground(\"BLUE\")", "def finaliza_nfce_com_xml(self):\r\n mensagem = self.mensagem_promocional()\r\n status = self.finaliza_nfce(mensagem)\r\n info_ultima_nfce = self.informacoes_ultima_nfce() # informacoes ultima nfce emitida\r\n info_ultima_nfce[\"CONTINGENCIA\"] = status # 1- Autorizada, 2- Ctg.Offline, 3- Ctg.OnLine\r\n\r\n\tif status in (1, 2, 3):\r\n # Atualiza serie/nnf no gne.ini para reconfiguracao caso necessario\r\n self.set_param_section_config_ini(args.file, args.loja, 'NNF', info_ultima_nfce['nrnota'])\r\n self.set_param_section_config_ini(args.file, args.loja, 'SERIE', info_ultima_nfce['nrserie'])\r\n\r\n return info_ultima_nfce", "def clue(self):\n if self.item == \"receipt\":\n print(\"The receipt reads that Jay bought 'diltiazem' medication 4 days ago.\")\n print(\"Diltiazem: medication for high blood pressure, when \"\n \"consumed by an individual in large quantities without high blood\"\n \"pressure, can cause heart failure.\")\n else:\n print(\"That is the wrong item!\")", "def template_nfce(self):\r\n #self.verifica_comunicacao() # particionando\r\n self.cancela_nfce_andamento() # Verifica comunicacao 1\r\n self.abre_nfce_com_xml()\r\n self.item_nfce_com_xml()\r\n self.totaliza_nfce_com_xml()\r\n self.pagamento_nfce_com_xml()\r\n #self.informar_valor_imposto()\r\n #self.informar_mensagem_do_imposto()\r\n return self.finaliza_nfce_com_xml()", "def make_coffee(drink_type):\r\n for resource in MENU[drink_type][\"ingredients\"]:\r\n resources[resource] -= MENU[drink_type][\"ingredients\"][resource]\r\n print(f\"Here is your {drink_type} ☕. Enjoy!\")", "def new_cent(self, newcent):\n self.r, self.c = newcent\n self.center = (self.r, self.c)\n self.updatedata()", "def graf_F(self):\n vert_funktion(self, typ='D', titel='$Empirische\\; Verteilungsfunktion$' + '\\n ')", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_colour='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.1cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour=map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.1cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def makecoffee(drink_name, ingredients):\n for item in ingredients:\n resources[item] -= ingredients[item]\n print(f\"Please enjoy your {drink_name}\")", "def test_add_furniture(self):\n\n add_furniture('invoice.csv', 'Elisa Miles', 'LR04', 'Leather Sofa', 25)\n add_furniture('invoice.csv', 'Edward Data', 'KT78', 'Kitchen Table', 10)\n add_furniture('invoice.csv', 'Alex Gonzales', 'BR02', 'Queen Mattress', 17)\n\n # Generate list of rentals\n with open('invoice.csv', 'r') as csvfile:\n rentals = []\n for row in csvfile:\n rentals.append(row)\n\n print(rentals)\n\n # Assert statements\n self.assertEqual(rentals[0], ('Elisa Miles,LR04,Leather Sofa,25\\n'))\n self.assertEqual(rentals[1], ('Edward Data,KT78,Kitchen Table,10\\n'))\n self.assertEqual(rentals[2], ('Alex Gonzales,BR02,Queen Mattress,17\\n'))", "def getFactura(self): \n return self.caja", "def getFactura(self): \n return self.caja", "def final_info_printing(self, title_string, amount_to_display):\n self.__string_to_print = f\"{title_string} \" \\\n f\"{'.' * (40 - len(title_string))} \" \\\n f\"$ {'.' * (11 - len('{:0,.2f}'.format(amount_to_display)))}\" \\\n f\"{amount_to_display:0,.2f}\"\n return self.__string_to_print", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_color='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.1cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color=_map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.1cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def __repr__(self):\n return f'<RestaurantCourier restaurant: {self.restaurant_id} courier: {self.courier_id}>'", "def _apply_style(self):\n for actor in self.clean_actors:\n if settings.SHADER_STYLE != \"cartoon\":\n style = settings.SHADER_STYLE\n else:\n if self.backend: # notebook backend\n print(\n 'Shader style \"cartoon\" cannot be used in a notebook'\n )\n style = \"off\"\n\n try:\n actor.mesh.reverse() # flip normals\n actor.mesh.lighting(style=style)\n\n actor._mesh.reverse()\n actor._mesh.lighting(style=style)\n except AttributeError:\n pass", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_color='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color= _map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def report(self):\n print(f\"Money: {self.CURRENCY}{self.profit}\")", "def buscarFactura(self):\n\n if not self.lineNumero.isEnabled() and self.facturaSeleccionada != None:\n QtGui.QMessageBox.information(self,\"Aviso\",\"Ya se ha seleccionado una factura\")\n elif not self.lineNumero.isEnabled():\n self.lineNumero.setEnabled(True)\n self.lineNumero.clear()\n self.limpiarTabla(self.tableFactura)\n else:\n self.numeroFacturaActual=str(self.lineNumero.text())\n if len(self.numeroFacturaActual)==0:\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"No se ha ingresado número de factura\"))\n else:\n self.facturaSeleccionada=FacturaModel.existeFactura(int(self.numeroFacturaActual),self.sesion)\n if self.facturaSeleccionada==None:\n QtGui.QMessageBox.warning(self,\"Aviso\",\"La factura seleccionada no existe\")\n elif self.facturaSeleccionada.getNC()!=None:\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"La factura ya ha posee una Nota de Crédito\"))\n self.facturaSeleccionada = None\n elif self.facturaSeleccionada.getFechaEmision()+timedelta(days=int(self.plazo))<date.today():\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"El tiempo permitido para la devolución ha expirado\"))\n elif self.facturaSeleccionada.estaLiquidada(self.sesion):\n print self.facturaSeleccionada.estaLiquidada(self.sesion)\n QtGui.QMessageBox.information(self,\"Aviso\",\"La factura se encuentra liquidada a la Obra Social\")\n else:\n self.lineNumero.setEnabled(False)\n self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetalles(self.sesion),\n [\"nro_linea\",\"producto\",\"cantidad\",\"importe\"])", "def __str__(self):\n return f'{self.product.title} - Color {self.product.color}'", "def calc_class(stock):\n if 'M' in stock['Cap']:\n val = float(stock['Cap'].replace('M', \"\"))\n if val >= 250:\n stock['Class'] = 'Small'\n return\n if 'B' in stock['Cap']:\n val = float(stock['Cap'].replace('B', \"\"))\n if val >= 10:\n stock['Class'] = 'Large'\n elif val >= 1:\n stock['Class'] = 'Mid'", "def restock(self):\n self.money = 9999", "def __init__(self, centre):\n super().__init__()\n self.centre = centre", "def __init__(self, restaurant_name,cuisine_type):\r\n self.restaurant = restaurant_name\r\n self.cuisine = cuisine_type", "def pristine_wc(f):\n @functools.wraps(f)\n def revert_wc(self, *args, **kwargs):\n self.wc.revert()\n return f(self, *args, **kwargs)\n return revert_wc", "def add_furniture():\n print(\"Attempting to seed the furniture collection.....\")\n print()\n\n chair_path = Path(\"chair.png\")\n\n couch = FurnitureItem(\n \"Comfy couch\",\n \"Well loved, but still in pretty good condition\",\n 60.00,\n 40,\n \"swiessle@stevens.edu\",\n \"Couch\",\n \"beige\",\n [50, 20, 10],\n )\n couch.set_image_filepath(chair_path)\n Database.add_item(couch)\n print(\"couch has been successfully added\")\n\n table = FurnitureItem(\n \"Dining room table\",\n \"Wooden dining room table. Has a few scuffs, but not bad!\",\n 30.00,\n 15,\n \"gracem730@gmail.com\",\n \"Table\",\n \"wood\",\n [40, 20, 40],\n )\n table.set_image_filepath(chair_path)\n Database.add_item(table)\n print(\"table has been successfully added\")\n\n bed = FurnitureItem(\n \"Bed Frame\",\n \"Just selling the bed frame, you'll have \\\n to get your own mattress\",\n 55.00,\n 50,\n \"erotside@stevens.edu\",\n \"Bed\",\n \"white\",\n [10, 20, 10],\n )\n bed.set_image_filepath(chair_path)\n Database.add_item(bed)\n print(\"bed has been successfully added\")\n\n desk = FurnitureItem(\n \"Ikea desk, no longer need it\",\n \"In great condition, this is truly a steal\",\n 60.00,\n 35,\n \"jlora@stevens.edu\",\n \"Ikea Desk\",\n \"navy\",\n [20, 20, 30],\n )\n desk.set_image_filepath(chair_path)\n Database.add_item(desk)\n print(\"desk has been successfully added\")\n\n shelf = FurnitureItem(\n \"Book shelf, never used\",\n \"Brand new\",\n 110.00,\n 25,\n \"dcarpent@stevens.edu\",\n \"Book Shelf\",\n \"black\",\n [10, 20, 100],\n )\n shelf.set_image_filepath(chair_path)\n Database.add_item(shelf)\n print(\"shelf has been successfully added\")\n\n print()\n print(\"Done seeding the furniture collection!\")\n print(\"----------------------------------------------\")", "def correction(self, text, compteur):\n borneInf = 0\n borneSup = len(text)\n pos = (self.pos[0], self.pos[1] + compteur*self.hLigne)\n\n ligne = Ligne(self.ecran, text, self.police, self.taille, pos) # pos -> seulement pour le test ==> Ne pas peut etre (0,0) il test le depassement de l ecran ...\n while ligne.depassementTest():\n borneSup -= 1\n ligne = Ligne(self.ecran, text[borneInf:borneSup], self.police, self.taille, pos)\n self.lignes.append(Ligne(self.ecran, text[borneInf:borneSup], self.police, self.taille, pos, self.couleurText, self.couleurFond))\n return borneSup", "def __init__(self, name, cuisine):\n self.name = name\n self.cuisine = cuisine", "def __init__(self, name, cuisine):\n self.name = name\n self.cuisine = cuisine", "def storefront_fixer():\n\n\t\n\tprint \"CREATING REPORT...PLEASE WAIT...\"\n\tPrintBreakLine()\n\tcurrentView = uidoc.ActiveView\n\t#levelName = currentView.GenLevel.Name\n\t#levelElevation = currentView.GenLevel.Elevation\n\n\toneByWidth = 1.75/12\n\ttol = 0.001\n\n\tstorefrontConfig = storefront_options()\n\t\n\tsystemName = None\n\n\n\tmullionDict = GetMullionTypeDict()\n\tpanelTypeDict = GetWindowTypeDict()\n\tdoorDict = storefrontConfig.doorDict\n\n\n\tfrom rpw.ui.forms import Label, ComboBox, Separator, Button, FlexForm\n\t# Select the report type\n\tcomponents = [Label('Specify System'),\n\t\t\t\t\tComboBox(\"combobox1\", {\"Elite\": \"Elite\", \"MODE\": \"MODE\", \"Extravega\": \"Extravega\"}),\n\t\t\t\t\tSeparator(),\n\t\t\t\t\tButton('Go')]\n\n\tform = FlexForm(\"Storefront Report\", components)\n\tform.show()\n\n\tif not form.values:\n\t\tsys.exit()\n\telse:\n\t\tsystemName = form.values[\"combobox1\"]\n\n\n\t\tif not systemName.lower() in storefrontConfig.currentConfig[\"currentSystem\"].lower():\n\t\t\tstorefrontConfig.storefront_set_config()\n\t\t\tsystemName = storefrontConfig.currentConfig[\"currentSystem\"]\n\t\t\tstorefrontConfig.storefront_save_config()\n\n\n\tallStorefrontWalls = rpw.db.Collector(of_class='Wall', \n\t\t\t\t\t\t\t\t\t\t\tview=currentView, \n\t\t\t\t\t\t\t\t\t\t\twhere=lambda x: (str(x.WallType.Kind) == \"Curtain\") and (systemName.lower() in x.Name.lower()))\n\n\tallStorefrontPanels = []\n\tallStorefrontMullions = []\n\n\tfor sfwall in allStorefrontWalls:\n\n\t\tfor sfMullionsId in sfwall.CurtainGrid.GetMullionIds():\n\t\t\tallStorefrontMullions.append(doc.GetElement(sfMullionsId))\n\n\t\tfor panelId in sfwall.CurtainGrid.GetPanelIds():\n\t\t\tallStorefrontPanels.append(doc.GetElement(panelId))\n\n\n\twith rpw.db.Transaction(\"Storfront Fixer\"):\n\n\t\tfor sfmullion in allStorefrontMullions:\n\n\t\t\tmullionLength = sfmullion.get_Parameter(BuiltInParameter.CURVE_ELEM_LENGTH).AsDouble()\n\n\t\t\tif mullionLength > 0 and sfmullion.LocationCurve:\n\n\t\t\t\tmullionName = sfmullion.Name.lower()\n\t\t\t\tmullionRoom = sfmullion.get_Parameter(BuiltInParameter.ALL_MODEL_INSTANCE_COMMENTS).AsString()\n\t\t\t\tmullionPoint = sfmullion.Location.Point\n\t\t\t\tmullionPoint = XYZ(mullionPoint.X,mullionPoint.Y, 0)\n\n\t\t\t\tmullionCurve = sfmullion.LocationCurve\n\t\t\t\tmullionCenter = mullionCurve.Evaluate(0.5, True)\n\n\t\t\t\tif \"post\" in mullionName or \"wallstart\" in mullionName:\n\t\t\t\t\t\n\t\t\t\t\t#Intermediate posts\"\n\t\t\t\t\t#if not storefrontConfig.currentConfig[\"mullionContinuousVerticalIntermediateTop\"]:\n\t\t\t\t\tsfmullion.JoinMullion()\n\t\t\t\t\tdoc.Regenerate()\n\t\t\t\t\tmullionLengthAfter = sfmullion.get_Parameter(BuiltInParameter.CURVE_ELEM_LENGTH).AsDouble()\n\t\t\t\t\tif mullionLengthAfter > mullionLength:\n\t\t\t\t\t\toutput = script.get_output()\n\t\t\t\t\t\tclickable_link = output.linkify(sfmullion.Id)\n\t\t\t\t\t\tprint \"fixed: \" + mullionName + \" // \" + str(mullionLength) + \" to \" + str(mullionLengthAfter) + \" // -->\" + clickable_link\n\n\t\t\t\t\t# print out all errors\n\t\t\t\n\t\t\t\tif \"door\" in mullionName:\n\t\t\t\t\tsfmullion.JoinMullion()\n\n\t\t\t\tif \"intermediate\" in mullionName:\n\t\t\t\t\tsfmullion.BreakMullion()\n\n\n\t\tfor sfwall in allStorefrontWalls:\n\n\t\t\tsfGrid = sfwall.CurtainGrid\n\n\t\t\tfor panelId in sfGrid.GetPanelIds():\n\n\t\t\t\tpanel = doc.GetElement(panelId)\n\n\n\t\t\t\tpanelWidth = panel.get_Parameter(BuiltInParameter.WINDOW_WIDTH).AsDouble()\n\t\t\t\tpanelHeight = panel.get_Parameter(BuiltInParameter.WINDOW_HEIGHT).AsDouble()\n\n\t\t\t\tif (panelWidth > 0) and (panelHeight > 0):\n\n\t\t\t\t\tcondition = None\n\t\t\t\t\tvarient01 = None\n\t\t\t\t\tvarient02 = None\n\n\t\t\t\t\tpanelType = panel.get_Parameter(BuiltInParameter.ELEM_TYPE_PARAM).AsValueString()\n\t\t\t\t\tpanelSizeName = str(panelWidth) + \" x \" + str(panelHeight)\n\n\t\t\t\t\t#Get panel point and flatten\n\t\t\t\t\tpanelPoint = panel.GetTransform().Origin\n\t\t\t\t\tpanelPoint = XYZ(panelPoint.X, panelPoint.Y, 0)\n\n\t\t\t\t\t#Join or break the head mullions based on the system config\n\t\t\t\t\tif \"empty\" in panelType.lower():\n\t\t\t\t\t\tdoorHeads = GetHorizontalMullionsAtPoint(sfGrid, panelPoint, nameFilter= \"head\")\n\n\t\t\t\t\t\tif storefrontConfig.currentConfig[\"mullionContinuousHorizontalHeadAtDoor\"]:\n\t\t\t\t\t\t\tif doorHeads:\n\t\t\t\t\t\t\t\tfor mull in doorHeads:\n\t\t\t\t\t\t\t\t\tmull.JoinMullion()\n\t\t\t\t\t\t\t\t\tprint \"fixed: \" + mull.Name\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif doorHeads:\n\t\t\t\t\t\t\t\tfor mull in doorHeads:\n\t\t\t\t\t\t\t\t\tmull.BreakMullion()\n\t\t\t\t\t\t\t\t\tprint \"fixed: \" + mull.Name", "def Calcification(self):\n s = self.calcification\n assert s in range(1,7), \"Calcification score out of bounds.\"\n if s == 1: return 'Popcorn'\n elif s == 2: return 'Laminated'\n elif s == 3: return 'Solid'\n elif s == 4: return 'Non-central'\n elif s == 5: return 'Central'\n elif s == 6: return 'Absent'", "def __init__(self, restaurant_name, cuisine_type):\r\n\t\tself.restaurant_name = restaurant_name\r\n\t\tself.cuisine_type = cuisine_type", "def __init__(self, ecran, text, police, taille, pos = (0,0), couleurText = (0,0,0), couleurFond = (255,255,255)):\n super().__init__(ecran, text, police, taille, pos, couleurText, couleurFond)", "def test_CRit(self):\n\n manual_calculated = list(self._manual_calculate_formal(self.currency))\n\n self.portfolio._generate_currency()\n test_column = self._convert_df_to_list(self.portfolio._df_currency)\n self.assertAlmostEqual(self._clear_column(manual_calculated),\n self._clear_column(test_column))", "def set_up_wireframe_freestyle(self):\n scene = self.set_as_active()\n \n # sets up renderlayer(s) (depending on 'Composited wireframing' checkbox) and freestyle wireframing\n # also saves freestyle linestyle name\n self.set_up_rlayer('wireframe', rlname_other='other')\n scene.wirebomb.data_freestyle_linestyle = self.add_wireframe_freestyle().name\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clear_materials and w_var.is_any_affected:\n self.clear_materials()\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n if w_var.cb_clay:\n self.set_up_clay()\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao and not w_var.cb_composited:\n self.set_up_all_ao()\n\n elif w_var.cb_composited:\n\n # sets up composition for wireframe and sets up ambient occlusion lighting if used\n self.comp_add_wireframe_freestyle()\n \n if scene.render.engine == 'CYCLES':\n scene.cycles.film_transparent = True\n\n else:\n scene.render.alpha_mode = 'TRANSPARENT'\n\n if w_var.cb_ao:\n self.set_up_world_ao()\n\n # deselects all objects as a last thing to clean up\n self.select('DESELECT', objects={'ALL'})", "def nueva_factura():\n num_fac = str(generar_numero_factura())\n\n return render_template('facturar.html',\n numero_factura=num_fac,\n name=current_user.username)", "def make_coffee(self, drink):\n for ingredient in drink.ingredients:\n self.resources[ingredient] -= drink.ingredients[ingredient]\n self.profit += drink.cost", "def setFactura(self, caja): \n self.caja = caja", "def replace_contractions(self):\n self.text = contractions.fix(self.text)\n return self", "def fama (self , diccionario):\n\n decoracion_list = []\n for key , value in diccionario.items():\n a=[]\n a.append(key)\n a.append(value)\n decoracion_list.append (a)\n\n paredes_list = decoracion_list [0:3]\n suelo_list = decoracion_list [3:6]\n reforma_list = decoracion_list [6:]\n\n paredes = 1\n suelo = 1\n reforma = 1\n\n for i in range (len(paredes_list)):\n if paredes_list [i][1] == 1 :\n paredes = i+2 \n\n for i in range (len(suelo_list)):\n if suelo_list [i][1] == 1 :\n suelo = i+2\n\n for i in range (len(reforma_list)):\n if reforma_list [i][1] == 1 :\n reforma = i+2\n\n modificador_fama = 0\n\n if paredes >= 4 and suelo >= 4 and reforma >= 4 :\n modificador_fama = 45\n\n elif paredes >= 3 and suelo >= 3 and reforma >= 3 :\n modificador_fama = 33 \n\n elif paredes >= 2 and suelo >= 2 and reforma >= 2 :\n modificador_fama = 12\n\n fama = (10*paredes)+(10*suelo)+(10*reforma) + modificador_fama + kasino.modificador_fama\n\n \"\"\" FORMULA FAMA : Con esta formula se calcula la fama, que dependera de la decoracion e influira en los visitantes \n Se puede usar modificador_fama para calibrar el juego o añadir niveles de dificulad \"\"\"\n \n return fama , paredes , suelo , reforma", "def set_style(self):", "def ben_s(self) -> str:\n return str(self.beneficiaires or \"∞\")", "def incumbant_firm(self, wage):\n \n \n \n # a. demand for capital (capital policy function)\n pol_k = (self.alpha /(self.ret *(1+self.tau_capital)))**((1-self.gamma)/(1-self.gamma-self.alpha)) \\\n * (self.gamma /(wage * (1+self.tau_labor)))**(self.gamma/(1-self.gamma-self.alpha)) \\\n * (self.grid_s_matrix*(1-self.tau_output))**(1/(1-self.alpha-self.gamma))\n \n # b. demand of labor (labor policy function)\n pol_n = (1+self.tau_capital) * self.ret * self.gamma / ((1+self.tau_labor) * wage * self.alpha) * pol_k\n #pol_n = ((smatrix*(1-self.tau_output) * gamma) / wage)**(1/(1-gamma)) * pol_k**(alpha/(1-gamma))\n \n # c. incumbant profit\n pi=(1-self.tau_output) * self.grid_s_matrix * pol_k**self.alpha * pol_n**self.gamma \\\n - (1+self.tau_labor)* wage * pol_n - (1+self.tau_capital) * self.ret * pol_k - self.cf\n \n # d. discounted present value of an incumbent establishment, W(s,pol_k(s,theta))\n W = pi / (1-self.rho)\n \n return pol_k, pol_n, pi, W", "def __init__(self, fen): \n self.d_engine = Stockfish()\n self.setFen(fen)", "def make_coffee(drink, ingredients):\n for item in ingredients:\n resources[item] -= ingredients[item]\n print(f\"Here is your {drink}. Enjoy!\")", "def make_struc(alat,atom,clat):\r\n if atom == 'Cu' or atom == 'Au':\r\n fcccell = bulk(atom, 'fcc', a=alat)\r\n write('fcc.cif', fcccell)\r\n print(fcccell, fcccell.get_atomic_numbers())\r\n structure = Struc(ase2struc(fcccell))\r\n elif atom == 'CuAu':\r\n lattice = alat * numpy.identity(3)\r\n lattice[2][2] = clat\r\n symbols = ['Cu','Au']\r\n sc_pos = [[0,0,0],[0.5,0.5,0.5]]\r\n bctcell = Atoms(symbols=symbols, scaled_positions=sc_pos, cell=lattice)\r\n write('bct.cif', bctcell)\r\n print(bctcell, bctcell.get_atomic_numbers())\r\n structure = Struc(ase2struc(bctcell))\r\n # check how your cell looks like\r\n print(structure.species)\r\n return structure", "def apply_web_cros_common_fixes(font, unhinted, family_name):\n subfamily_name = font_data.get_name_records(font)[2].encode('ASCII')\n assert(subfamily_name in\n ['Thin', 'Thin Italic',\n 'Light', 'Light Italic',\n 'Regular', 'Italic',\n 'Medium', 'Medium Italic',\n 'Bold', 'Bold Italic',\n 'Black', 'Black Italic'])\n\n if 'Condensed' in font_data.get_name_records(font)[1]:\n family_name += ' Condensed'\n full_name = family_name\n if subfamily_name != 'Regular':\n full_name += ' ' + subfamily_name\n\n # Family, subfamily names\n font_data.set_name_record(font, 16, family_name)\n style_map = ['Regular', 'Bold', 'Italic', 'Bold Italic']\n if subfamily_name in style_map:\n font_data.set_name_record(font, 1, family_name)\n else:\n weight = subfamily_name.split()[0]\n new_family_name = family_name\n if weight != 'Regular':\n new_family_name += ' ' + weight\n font_data.set_name_record(font, 1, new_family_name)\n\n # all weights outside regular and bold should only have subfamily\n # \"Regular\" or \"Italic\"\n italic = subfamily_name.endswith('Italic')\n font_data.set_name_record(font, 2, style_map[italic << 1])\n\n # Unique identifier and full name\n font_data.set_name_record(font, 3, full_name)\n font_data.set_name_record(font, 4, full_name)\n font_data.set_name_record(font, 18, None)\n\n # PostScript name\n font_data.set_name_record(\n font, 6, (family_name+'-'+subfamily_name).replace(' ', ''))\n\n # Copyright message\n font_data.set_name_record(\n font, 0, 'Copyright 2011 Google Inc. All Rights Reserved.')\n\n # hotpatch glyphs by swapping\n # https://github.com/google/roboto/issues/18\n glyf = font['glyf']\n tmp = glyf['chi']\n glyf['chi'] = glyf['chi.alt']\n glyf['chi.alt'] = tmp\n\n # make glyph orders consistent for feature copying\n # https://github.com/google/roboto/issues/71\n glyph_order = font.getGlyphOrder()\n for i, glyph_name in enumerate(glyph_order):\n if glyph_name.endswith('.lnum'):\n new_name = glyph_name.replace('.lnum', '.pnum')\n glyph_order[i] = new_name\n font['glyf'][new_name] = font['glyf'][glyph_name]\n\n # append old name to glyph order so del succeeds\n glyph_order.append(glyph_name)\n del font['glyf'][glyph_name]\n\n # copy features from unhinted\n # https://github.com/google/roboto/pull/163\n for table in ['GDEF', 'GPOS', 'GSUB']:\n font[table] = unhinted[table]", "def convertFarenheitToCelsius(F):\n if isinstance(F, str) == True:\n raise ValueError(\"Farenheit cannot be a string value\")\n if isinstance(F,complex) == True:\n raise ValueError(\"Farenheit cannot be a complex value\")\n if isinstance(F,int) == True:\n raise ValueError(\"Farenheit should be a float value, example: 120.50\")\n \n C = (F-32)/1.8\n return C" ]
[ "0.5750459", "0.5182642", "0.49739367", "0.48258784", "0.4760474", "0.47537947", "0.47426933", "0.47203162", "0.46918836", "0.46697134", "0.46030477", "0.45801994", "0.45153394", "0.45117316", "0.44764778", "0.4470842", "0.44334066", "0.44276795", "0.44269252", "0.44269252", "0.44269252", "0.44216144", "0.4405317", "0.43971524", "0.43887183", "0.43804574", "0.436501", "0.43596882", "0.4357722", "0.43458667", "0.43442473", "0.43372753", "0.43364957", "0.43286687", "0.43111002", "0.43037957", "0.430239", "0.42984653", "0.4287957", "0.4270127", "0.4267988", "0.42553976", "0.42551264", "0.4246429", "0.42416", "0.4240328", "0.4234012", "0.423209", "0.4225055", "0.4222364", "0.42216083", "0.42215434", "0.42200574", "0.42185992", "0.4218386", "0.42174768", "0.42107338", "0.42107144", "0.42068577", "0.41892943", "0.41866368", "0.41813052", "0.41806996", "0.41806996", "0.4176932", "0.4171676", "0.4171634", "0.4169906", "0.41682804", "0.41632667", "0.41610065", "0.41591495", "0.41555497", "0.41524705", "0.4150838", "0.41505554", "0.41405565", "0.41324317", "0.41322982", "0.41319102", "0.41319102", "0.41272002", "0.41249612", "0.412298", "0.41225225", "0.4120954", "0.41157705", "0.41157687", "0.41126427", "0.4106587", "0.4099136", "0.40980121", "0.40979177", "0.40978345", "0.40968522", "0.4092171", "0.4091585", "0.408831", "0.4088143", "0.40770558" ]
0.59496
0
This method reurns centence with whole information given about the chair. >>> chair1 = Chair("empire", "bedroom", "armchair") >>> assert(str(chair1) == "")
def __str__(self): return "<This armchair furniture style is " + str(self.style) + ">"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_chairs(self):\n self.chairs = self.p_constants[\"NPHILOSOPHERS\"] * [0]\n for i in xrange(self.p_constants[\"NPHILOSOPHERS\"]):\n x, y, angle = self._get_chair_coord(i, 5.0)\n self.chairs[i] = self._load_model(\n \"chair1\", scale=[7, 7, 7], pos=[x, y - 1, 0], H=rad2deg(angle) + 15)\n self.chairs[i].setTexture(self.chair_tex)\n self.chairs[i].setTexScale(\n TextureStage.getDefault(), 0.005, 0.005)", "def updateHairColor(self, hairColor): \n self.avatarConfiguration[\"hairColor\"] = str(hairColor)\n self.paintHair()", "def __init__(self, car):\n super(self.__class__, self).__init__(car)\n self.coordinator_name = car.get_faulty_coordinator_name()\n self.second_at_charge_name = car.get_second_at_charge_name()\n self.coordinated_car_name = car.get_corrected_coordinated_car_name()\n self.following_car_name = car.get_corrected_following_car_name()", "def cp(wair,pres,entr=None,temp=None,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,chkbnd=False,\n mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n h_s = temp\n h_ss = iceair_h(0,2,0,wair,pres,temp=temp,airf=airf,dhum=dhum)\n cp = h_s/h_ss\n return cp", "def holy_guard(self):\n self.name = \"Holy Guard's Armor\"\n self.rarity = \"Legendary\"\n self.pdef_value = 100\n self.mdef_value = 200\n self.increase_crit = 5\n self.desc = \"Armor of the Holy Guard, made of unknown materials.\"", "def assign(self, circuit:'cirq.Circuit'):\n self._moments = circuit._moments", "def move(self, new_home):\n #checked#\n ###your code here###\n if self.home!=None:\n self.home.occupant=None\n new_home.occupant=self\n self.home=new_home", "def __init__(self, restaurant_name,cuisine_type):\r\n self.restaurant = restaurant_name\r\n self.cuisine = cuisine_type", "def __init__(self, car):\n super(self.__class__, self).__init__(car)\n self.cars_at_intersection = car.get_cars_at_intersection()\n self.new_supervisor_name = car.get_new_supervisor_name()\n self.transmitter_receiver_dict = car.get_transmitter_receiver_dict()\n self.faulty_cars_names = car.faulty_cars_names", "def rencontrer(self, joueur):\n print(\"Vous rencontrez le mythique \" + self._nom + \"!! Il détient peut-être un objet rare.\")\n input()", "def hk_armor(self):\n self.name = \"Holy Knight's Armor\"\n self.rarity = \"Common\"\n self.pdef_value = 40\n self.mdef_value = 10\n self.increase_crit = 0\n self.desc = \"Armor of the Holy Guard, you feel the light flowing.\"", "def __init__(self, strength, constitution, dexterity,\r\n intelligence, wisdom, charisma):\r\n super().__init__(strength, constitution, dexterity,\r\n intelligence, wisdom, charisma)\r\n if self.constitution < 13:\r\n self.constitution = 13\r\n self.hp = (12 + self.conMod)", "def space_chrom(self,chrom):\n\n #actual value/number of actegory\n for spot in range(GAConfig[\"num_categories\"]):\n while(True):\n if(chrom.amount_of_genes(spot) < int(GAConfig[\"category_restriction_count\"])):\n go = True\n #grabs a category that can be take from.\n while(go):\n random_cat= self.get_rand_cat()\n if(chrom.can_move(random_cat,int(GAConfig[\"category_restriction_count\"]))):\n go = False\n genes = chrom.get_genes()\n remove_gene = genes[random_cat].pop() #just takes the back value\n chrom.remove_gene(remove_gene)\n chrom.insert_into_category(spot,remove_gene)\n else:\n break\n\n\n return chrom", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name.title()\n\t\tself.cuisine_type = cuisine_type", "def create_from_ui(self, chair):\n chair_id = chair.pop('id', False)\n if chair_id:\n self.browse(chair_id).write(chair)\n else:\n chair_id = self.create(chair).id\n return chair_id", "def modifier_classement_joueur(self, championnat, rapport):\r\n rapport.affichage_classement_championnat(championnat)\r\n championnat = sorted(championnat, key=lambda x: x.classement) # tri joueurs du championnat par classement\r\n print(\"Veuillez indiquer le numéro du joueur à modifier:\")\r\n choix = int(input())\r\n if choix <= len(championnat): # test si choix numero joueur valide\r\n index = choix - 1 # car liste commence a 0\r\n joueur = championnat[index]\r\n nouveau_joueur = copy.deepcopy(joueur)\r\n print(\"Veuillez indiquer le nouveau classement de \" + joueur.nom)\r\n nouveau_classement = int(input())\r\n nouveau_joueur.classement = nouveau_classement\r\n championnat.remove(joueur) # enleve ancienne position du joueur dans classement\r\n championnat.append(nouveau_joueur) # ajoute joueur avec classement actualise\r\n return championnat\r\n else:\r\n print(\"Numero joueur invalide\")\r\n return", "def wrap_hair_plates(character):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.wrap_hair_plates\")\n\n flg.info(\"Wrapping hair plates to {}\".format(character.get_charName()))\n\n char_col = character.get_current_collection()\n flg.debug(\"Current Collection: {}\".format(char_col))\n\n char_mesh = search_namespaces_for_mesh(character)\n char_hair_plates = char_col.get_hairPlates()\n flg.info(\"Character mesh object: {}\".format(char_mesh))\n flg.info(\"Character hair plate objects: {}\".format(char_hair_plates))\n\n deformer_input_list = []\n\n history_list = mc.listHistory(char_mesh)\n flg.debug(\"Character mesh history nodes: {}\".format(history_list))\n\n filtered_list = node_type_filter(history_list,\n \"joint\",\n \"animCurveUU\",\n )\n flg.debug(\"Character mesh history nodes, filtered: \".format(filtered_list))\n\n for n in filtered_list:\n print n\n node_attr = mc.listAttr(n, leaf=True)\n if \"envelope\" in node_attr:\n deformer_input_list.append(n)\n flg.debug(\"Objects containing envelope attributes: {}\".format(deformer_input_list))\n\n for o in deformer_input_list:\n flg.info(\"Setting {0} input {1} envelope to 0\".format(char_mesh, o))\n mc.setAttr(\"{}.envelope\".format(o), 0)\n\n flg.info(\"Viewport refresh\")\n mc.refresh()\n\n for hp in char_hair_plates:\n tools.create_wrap(char_mesh, hp,\n exclusiveBind=True,\n falloffMode=1,\n shapeDeformed=True\n )\n flg.info(\"Binding {0} to {1}\".format(hp, char_mesh))\n\n flg.info(\"Viewport refresh\")\n mc.refresh()\n\n for o in deformer_input_list:\n flg.info(\"Setting {0} input {1} envelope to 1\".format(char_mesh, o))\n mc.setAttr(\"{}.envelope\".format(o), 1)", "def _onchange_chair_id(self):\n if 'active_id' in self._context.keys():\n self.chair_id = self._context['active_id']", "def __init__(self, restaurant_name, cuisine_type):\r\n\t\tself.restaurant_name = restaurant_name\r\n\t\tself.cuisine_type = cuisine_type", "def autocenter(self):\n if self.contour_exists:\n corr_r, corr_c = find_center_mass(self.largest_cont)\n self.new_cent(\n (self.r - self.radius + corr_r, self.c - self.radius + corr_c)\n )", "def test_corporation_crashing_removes_citizenship(self):\n\n\t\tcitizenship = self.p.citizenship\n\t\tcitizenship.corporation = self.c\n\t\tcitizenship.save()\n\n\t\tself.c.set_market_assets(0)\n\t\tself.c.save()\n\n\t\tself.g.resolve_current_turn()\n\n\t\tself.assertIsNone(self.reload(self.p).citizenship.corporation)", "def __repr__(self):\n return \"Classroom('{}', {}, {})\".format(self.number, self.capacity,\n str(self.equipment))", "def __init__(self, name, suit):\n\n name=name.lower()\n norm_name=self.NAME_CONVERSIONS[name]\n\n\n suit=suit.lower()\n norm_suit=self.SUIT_CONVERSIONS[suit]\n\n # Save to internal representation:\n self.name = norm_name\n self.suit = norm_suit", "def gm_robe(self):\n self.name = \"Grand Mage's Robe\"\n self.rarity = \"Uncommon\"\n self.pdef_value = 8\n self.mdef_value = 30\n self.increase_crit = 0\n self.desc = \"The robe of an accomplished mage, has many enchantments.\"", "def hire(name):\r\n print(\"A CEO cannot be hired outright\")", "def affichage(self):\r\n print(self.ship)\r\n for a in range(self.dim+2):\r\n for b in range(self.dim+2):\r\n print(self.plat[b][a],end=\" \")\r\n print(\"\")", "def crier_ordres(self, personnage):\n msg = \"{} s'écrie : rameurs, laissez courir !\".format(\n personnage.distinction_audible)\n self.navire.envoyer(msg)", "def get_chakra(self, chakra_list):\n for c in chakra_list:\n if c.raga == self.name:\n self.chakra = c.name\n self.chakra_num = c.num\n break", "def recharge(self):\n self.battery=self.full\n return self", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def hairness_do_stuff(h, y, c):\n\n print('sup hairness!')\n\n return None", "def __init__(self, car, second_at_charge_name):\n super(self.__class__, self).__init__(car)\n self.second_at_charge_name = second_at_charge_name\n self.cars_at_intersection = car.get_cars_at_intersection()\n self.transmitter_receiver_dict = car.get_transmitter_receiver_dict()\n self.faulty_cars_names = car.faulty_cars_names", "def __call__(self,camp):\n if self.npc in camp.party:\n camp.assign_pilot_to_mecha(self.npc,None)\n camp.party.remove(self.npc)\n for mek in list(camp.party):\n if hasattr(mek,\"owner\") and mek.owner is self.npc:\n camp.party.remove(mek)", "def __init__(self, car, following_car_name, coordinator_name):\n super(self.__class__, self).__init__(car)\n self.following_car_name = following_car_name\n self.coordinator_name = coordinator_name", "def __init__(self, cash=1000, rawg_quantity=50, rawg_demand=0, rawg_price=0, rig_quantity=30, rig_supply=0, rig_price=0, username='player', num_factories=0):\n self.username = username\n self.update(cash=cash, rawg_quantity=rawg_quantity, rawg_demand=rawg_demand, rawg_price=rawg_price, rig_quantity=rig_quantity, rig_supply=rig_supply, rig_price=rig_price, num_factories=num_factories)", "def apply_primary_effect(self, player: Player) -> None:\n \"\"\"\n Buy 1 cube (any resource but gold) from the stock with 1 denier.\n \"\"\"\n # Remark: Hard-coded! We don't use the tags <cost><n_deniers>-1 and <gain><CHOICES>... in <game_elements><buildings><neutral_buildings><neutral_building>.\n print(indent(3) + 'Effect of the neutral building ' + self.name +\n ' for a worker of the player ' + player.name() + ': ' + self.primary_effect.text)\n self.apply_peddler_effect(player)", "def characters(self, text):\n if text.isspace(): return\n text = str(text)\n if self.curelement == \"residue\":\n self.newresname = text\n elif self.curelement == \"atom\":\n self.newatomname = text\n elif self.curelement == \"useatomname\":\n self.oldatomname = text\n elif self.curelement == \"useresname\":\n self.oldresname = text", "def apply_primary_effect(self, player: Player) -> None:\n print(indent(3) + 'Effect of the neutral building ' + self.name +\n ' for a worker of the player ' + player.name() + ': ' + self.primary_effect.text)\n self.apply_no_cost_only_gain_effect(self.primary_effect.money_resources_gain, player)", "def __init__(self, suit, rank):\n self.suit = suit.lower()\n self.rank = rank.lower()", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.food = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name\n\t\tself.cuisine_type = cuisine_type\n\t\tself.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name\n\t\tself.cuisine_type = cuisine_type\n\t\tself.number_served = 0", "def extract_crime_category(self,match):\n \n\n #\n # wouldn't be calling this function if we didn't already know there's a match\n assert(match!=None)\n\n #\n # extract crime category\n line=match.string\n start_index=match.start('crime')\n stop_index=match.end('crime')\n crime_key=line[start_index:stop_index]\n crime_key=crime_key.lower()\n\n my_logger.debug('match(%d,%d)=%s' % (start_index,stop_index,crime_key))\n \n return crime_key", "def func(self):\n account = self.account\n city_name = 'Phoenix' if not self.args else self.args\n a = Astral()\n a.solar_depression = 'civil'\n city = a[city_name]\n if not city:\n return\n timezone = city.timezone\n sun = city.sun(date=datetime.date.today(), local=True)\n\n account.msg('Information for %s/%s\\n' % (city_name, city.region))\n account.msg('Timezone: %s' % timezone)\n account.msg('Latitude: %.02f; Longitude: %.02f' % (city.latitude, city.longitude))\n account.msg('Dawn: %s' % str(sun['dawn']))\n account.msg('Sunrise: %s' % str(sun['sunrise']))\n account.msg('Noon: %s' % str(sun['noon']))\n account.msg('Sunset: %s' % str(sun['sunset']))\n account.msg('Dusk: %s' % str(sun['dusk']))", "def claimchore():\n user = dbwrangler.get_current_user()\n userchores = Userchore.query.filter_by(address_id=user.address, \n commitment='INIT').all()\n #Below: chores that this belong to this user's address\n address_chores = [Chore.query.filter_by(chore_id=userchore.chore_id).first()\n for userchore in userchores]\n chores = []\n for chore in address_chores:\n #gather userchore entries for this chore\n userchores = Userchore.query.filter_by(chore_id=chore.chore_id).all()\n #isolate the item from ^ that is the clean (first) member of that chore inside userchores(table)\n base_userchore = [userchore for userchore in userchores if userchore.commitment == 'INIT']\n #get the rest of the chore data associated with that chore\n days_left = chore.days_weekly.split(\"|\")\n days_left = helpers.find_days_left(chore, userchores, days_left)\n if len(days_left) != 0:\n chores.append(chore)\n for chore in chores:\n #recast the occurance dates for these as lists\n chore.days_weekly = chore.days_weekly.split(\"|\")\n if chore.date_monthly:\n chore.date_monthly = inflect.engine().ordinal(chore.date_monthly)\n\n\n return render_template(\"takeachore.html\", chores=chores, \n userchores=userchores, user=user)", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def repair(self, *args, **kwargs):\n return self(AbilityId.EFFECT_REPAIR, *args, **kwargs)", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def test_created_chassis_properties(self):\n self.assertEqual(self.chassis.description, self.chassis_description)\n self.assertEqual(self.chassis.extra, self.chassis_extra)", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.name = restaurant_name\n\t\tself.type = cuisine_type", "def recharged(self, charge):\n # Duplicate CMolecules\n qc_mol = CMolecule(self.qc_mol.geom)\n br_mol = CMolecule(self.br_mol.geom)\n pc_mol = CMolecule(self.pc_mol.geom)\n\n additional_charge = (charge - self.charge)/(len(br_mol) + len(pc_mol))\n\n br_mol.charges = br_mol.charges + additional_charge\n pc_mol.charges = pc_mol.charges + additional_charge\n\n return Cluster(qc_mol, br_mol, pc_mol)", "def changeCard(self, card):\n if not card in self.influence:\n # todo: create a Coup-specific exception\n raise BaseException(\"%s is not found in player's influence. Something went wrong\" % card)\n \n self.influence.remove(card)\n GameState.AddToDeck(card)\n \n newCard = GameState.DrawCard()\n self.influence.append(newCard)", "def crier_ordres(self, personnage):\n adverse = self.adverse\n msg = \"{} s'écrie : un boulet sur {} !\".format(\n personnage.distinction_audible, adverse.desc_survol)\n self.navire.envoyer(msg)", "def __init__(self, name, cuisine):\n self.name = name\n self.cuisine = cuisine", "def __init__(self, name, cuisine):\n self.name = name\n self.cuisine = cuisine", "def clear_labor_market(self, wages: MultiAgentDict):\n occupation = self.firm.hire_worker(wages)\n for agent in self.agents.values():\n agent.earn(occupation[agent.agent_id], wages[agent.agent_id])\n self.firm.produce(occupation)\n self.inflation = self.firm.set_price(occupation, wages)\n self.unemployment = self.get_unemployment()", "def __init__(self, car, supervisor_message, second_at_charge_message):\n super(self.__class__, self).__init__(car)\n self.coordinator_name = car.get_faulty_coordinator_name()\n self.second_at_charge_name = car.get_name()\n self.cars_at_intersection = car.get_cars_at_intersection()\n self.coordinated_car_name = car.get_faulty_coordinated_car_name()\n self.following_car_name = car.get_faulty_following_car_name()\n self.supervisor_message = supervisor_message\n self.second_at_charge_message = second_at_charge_message", "def __init__(self, _table_num, _chairs=3, _name=\"walk in\"):\n self.table_num = _table_num\n self.chairs = _chairs\n self.customers = []\n for person in range(self.chairs):\n self.customers.append(Person.Person(_name))\n self.reservation_holder = self.customers[0]", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.numbers_served = 0", "def test_citizenship_is_affected(self):\n\t\tself.g.resolve_current_turn()\n\n\t\tself.assertEqual(self.reload(self.p).citizenship.corporation, self.c)", "def evalHand(hand):\n # os.system(\"clear\")\n #print(\"dealer hand before evalHand is: \", hand.showHand())\n if (1 in hand.cards) and (21 - hand.handSum() >= 10):\n print(\"found a 1 value Ace in the hand\")\n hand.cards[hand.cards.index(1)] = 11 # Change the first ace from value 1\n # to value 11\n if (11 in hand.cards) and (hand.handSum() >= 22):\n print(\"found an 11 value Ace in the hand and sum > 21\")\n hand.cards[hand.cards.index(11)] = 1 # Change the first ace from value 1\n # to value 11", "def lapserate(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n h_sp = iceair_h(0,1,1,wair,pres,entr=entr,airf=airf,temp=temp,dhum=dhum,\n chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,dhum0=dhum0,\n chkbnd=chkbnd,mathargs=mathargs)\n gamma = h_sp\n return gamma", "def die(self):\n print(\"Company\", self.ticker, \"left the universe\")\n del self\n Company.population -= 1", "def __init__(self, env: BaseAviary):\n self.gravity = env.GRAVITY\n \"\"\"float: gravity, the product of the drone's mass M by acc g.\"\"\"\n self.timestep = env.TIMESTEP\n \"\"\"float: simulation and control timestep.\"\"\"\n self.kf_coeff = env.KF\n \"\"\"float: RPMs to force coefficient.\"\"\"\n self.km_coeff = env.KM\n \"\"\"float: RPMs to torque coefficient.\"\"\"\n ############################################################\n ############################################################\n #### HOMEWORK CODE (START) #################################\n ############################################################\n ############################################################\n self.p_coeff_position = None\n \"\"\"proportional coefficient(s) for position control.\"\"\"\n self.i_coeff_position = None\n \"\"\"integral coefficient(s) for position control.\"\"\"\n self.d_coeff_position = None\n \"\"\"derivative coefficient(s) for position control.\"\"\"\n self.p_coeff_attitude = None\n \"\"\"proportional coefficient(s) for attitude control.\"\"\"\n self.i_coeff_attitude = None\n \"\"\"integral coefficient(s) for attitude control.\"\"\"\n self.d_coeff_attitude = None\n \"\"\"derivative coefficient(s) for attitude control.\"\"\"\n ############################################################\n ############################################################\n #### HOMEWORK CODE (END) ###################################\n ############################################################\n ############################################################\n self.reset()", "def home_money_line(self, home_money_line):\n\n self._home_money_line = home_money_line", "def initChassisByXml(self, physxScene, carNode):\r\n # Start initializing the chassis ...\r\n chassisNode = carNode.getElementsByTagName( 'chassis' )[0]\r\n bodyNode = chassisNode.getElementsByTagName( 'body' )[0]\r\n \r\n bodyDesc = PhysxBodyDesc()\r\n bodyDesc.setMass( float( bodyNode.getAttribute( 'mass' ) ) )\r\n \r\n actorDesc = PhysxActorDesc()\r\n actorDesc.setName( 'Chassis' )\r\n actorDesc.setBody( bodyDesc )\r\n actorDesc.setGlobalPos( readPoint3( chassisNode.getElementsByTagName( 'global-pos' )[0] ) )\r\n \r\n boxShapeNodes = bodyNode.getElementsByTagName( 'boxshape' )\r\n for boxNode in boxShapeNodes:\r\n shapeDesc = PhysxBoxShapeDesc()\r\n shapeDesc.setDimensions( readVec3( boxNode.getElementsByTagName('dimensions')[0] ) )\r\n shapeDesc.setLocalPos( readPoint3( boxNode.getElementsByTagName('local-pos')[0] ) )\r\n actorDesc.addShape( shapeDesc )\r\n \r\n self.chassis = physxScene.createActor( actorDesc )\r\n vMassCenter = readPoint3( chassisNode.getElementsByTagName( 'center-of-mass' )[0] )\r\n self.chassis.setCMassOffsetLocalPos( vMassCenter )\r\n self.massCenter = vMassCenter \r\n self.chassisModel = loader.loadModel( chassisNode.getAttribute( 'model' ))\r\n self.chassisModel.reparentTo( render )\r\n for hideNode in chassisNode.getElementsByTagName( 'hide' ):\r\n part = self.chassisModel.find( hideNode.getAttribute( 'part' ))\r\n if part is not None:\r\n part.hide()\r\n for mirrorNode in chassisNode.getElementsByTagName( 'mirror' ):\r\n mirror = self.chassisModel.find( mirrorNode.getAttribute( 'part' ) )\r\n if mirror is not None:\r\n self.setAsMirror( mirror )", "def setup(self, name, chem, conc, Kw, D, x_coord, y_coord, dx, dy, dz): \r\n self.name = name\r\n \r\n self.chem = chem # a list of objectives of the class Chemical\r\n if chem is None:\r\n self.nSpecies = 0\r\n elif type(chem) is not list :\r\n self.nSpecies = 1\r\n else :\r\n self.nSpecies = len(chem) # Number of chemcial species\r\n self.conc = conc # The dependent variable (named 'conc') of interest\r\n self.Kw = Kw\r\n self.D = D\r\n \r\n self.x_coord = x_coord\r\n self.y_coord = y_coord\r\n self.dx = dx\r\n self.dy = dy\r\n self.dz = dz", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type", "def equipment_chassis(self, equipment_chassis):\n\n self._equipment_chassis = equipment_chassis", "def __call__(self,camp):\n if self.npc not in camp.party:\n camp.party.append(self.npc)\n level = max(self.npc.renown,15)\n if hasattr(self.npc,\"relationship\") and self.npc.relationship:\n level = max(level + self.npc.relationship.data.get(\"mecha_level_bonus\",0),10)\n mek = gears.selector.MechaShoppingList.generate_single_mecha(level,self.npc.faction,gears.tags.GroundEnv)\n if self.npc.mecha_colors:\n mek.colors = self.npc.mecha_colors\n camp.party.append(mek)\n camp.assign_pilot_to_mecha(self.npc,mek)\n for part in mek.get_all_parts():\n part.owner = self.npc", "def neutralise_raw(self):\n # kekulization has to be done, otherwise u will encounter\n # issues when assigning bond types later\n Chem.Kekulize(self.m)\n\n # get pairs of charged atoms\n self.get_charged_pairs()\n\n # eliminate the charges by rebuilding the molecule\n m = Chem.Mol()\n mc = Chem.EditableMol(m)\n for i, az in enumerate(self.zs):\n ai = Chem.Atom( az )\n ci = self.charges[i]\n if ci != 0:\n if ci == 1:\n filt = (self.cpairs[:,0] == i)\n if np.any(filt):\n ai.SetFormalCharge( 1 )\n elif ci == -1:\n filt = (self.cpairs[:,1] == i)\n if np.any(filt): ai.SetFormalCharge( -1 )\n else:\n print((' -- i, charges[i] = ', i, self.charges[i]))\n print(' #ERROR: abs(charge) > 1??')\n raise\n mc.AddAtom( ai )\n\n ijs = np.array( np.where( np.triu(self.bom) > 0 ) ).astype(np.int)\n nb = ijs.shape[1]\n for i in range(nb):\n i, j = ijs[:,i]\n mc.AddBond( i, j, bo2bt[ '%.1f'%self.bom[i,j] ] )\n\n m = mc.GetMol()\n m2 = assign_coords(m, self.coords)\n self.m = m2", "def setInitialConcentration(self, *args):\n return _libsbml.Species_setInitialConcentration(self, *args)", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type\n self.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type\n self.number_served = 0", "def func(self):\n char = self.character\n if not self.args:\n char.msg(\"Usage: uncover <worn clothing object>\")\n return\n\n to_uncover = char.search(self.args, candidates=char.contents)\n if not to_uncover:\n return\n if not to_uncover.db.worn:\n char.msg(\"You're not wearing {item}!\".format(item=to_uncover.get_display_name(char)))\n return\n if not to_uncover.db.covered_by:\n char.msg(\"{item} isn't covered by anything!\".format(item=to_uncover.get_display_name(char)))\n return\n covered_by = to_uncover.db.covered_by\n if covered_by.db.covered_by:\n char.msg(\"{item} is under too many layers to uncover.\".format(item=to_uncover.get_display_name(char)))\n return\n char.location.msg_contents(\"{wearer} uncovers {item}.\", mapping=dict(wearer=char, item=to_uncover))\n to_uncover.db.covered_by = None", "def _createCadence(self, harmonicRhythm):\n random = RandomManager.getActive()\n\n # choose cadence to apply\n cadences = self.chordProfile.getCadences()\n\n #TODO: Choose cadence in more intelligent way\n cadence = random.choice(cadences)\n\n scale = self.chordProfile.getScale().getName()\n cadenceChordProgression = []\n\n reversedHarmonicRhythm = reversed(harmonicRhythm[:])\n # create as many cadence\n for count, durationObj in enumerate(reversedHarmonicRhythm):\n duration = durationObj.getDuration()\n code = cadence[-count+1]\n chord = Chord(code, duration=duration, scale=scale, octave=4)\n\n if count >= len(cadence):\n return cadenceChordProgression\n\n # prepend chord\n cadenceChordProgression.insert(0, chord)\n return cadenceChordProgression", "def __init__(self, player_num):\n self.name = \"Computer player \" + str(player_num)\n self.ward = str(player_num)\n self.budget = 10\n self.happiness = 100", "def __init__(self, name, cash):\n self.name = name\n self.cash = cash\n self.risk_money = 0.5 * self.cash\n self.credit_line = 100000\n self.portfolios = []\n self.investment_seed = 0.5 * self.risk_money\n # Add to global list of INVESTORS\n INVESTORS.append(self)", "def Champs(self, summoner):\n encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')\n region = summoner.get('region', DEFAULT_REGION)\n r = self._rito.ListChampionMasteries(region, encrypted_summoner_id)\n if r:\n logging.info('Got champ mastery data for %s/%s [%s]', region,\n encrypted_summoner_id, summoner['summoner'])\n # Calculate total number of chests received\n total_chests = sum(1 for x in r.champion_masteries if x.chest_granted)\n\n top_champs = []\n for champ in r.champion_masteries[:3]:\n top_champs.append(self._game.champion_id_to_name[str(\n champ.champion_id)])\n top_champ_lvl = r.champion_masteries[0].champion_level\n\n chest_verb = ''\n chest_verb_dict = {\n (0, 2): 'receiving',\n (2, 4): 'collecting',\n (4, 8): 'earning',\n (8, 16): 'amassing',\n (16, 32): 'hoarding'\n }\n for range_spec, verb in chest_verb_dict.items():\n if total_chests in range(*range_spec):\n chest_verb = verb\n break\n\n if chest_verb:\n chest_str = '%s %s' % (chest_verb,\n inflect_lib.Plural(total_chests, 'chest'))\n else:\n chest_str = 'with a boatload of chests (%d)' % total_chests\n\n return (u'{0} is a L{1} {2[0]} main, but sometimes likes to play {2[1]} '\n 'and {2[2]}, {3} this season.').format(summoner['summoner'],\n top_champ_lvl, top_champs,\n chest_str)", "def cria_carro_ford(self):\n\n self.total_de_carros_ford += 1\n print(\"Carro Ford #\", self.total_de_carros_ford, \" criado\")", "def destroy(self, cause:str, *, warp_core_breach:bool=False, self_destruct:bool=False):\n gd = self.game_data\n #gd.grid[self.sector_coords.y][self.sector_coords.x].removeShipFromSec(self)\n is_controllable = self.is_controllable\n #wc_value = self.sys_warp_core.get_effective_value\n\n if self.is_controllable:\n self.game_data.cause_of_damage = cause\n try:\n self.life_support.able_crew = 0\n self.life_support.injured_crew = 0\n except AttributeError:\n pass\n try:\n for k in self.torpedo_launcher.torps.keys():\n self.torpedo_launcher.torps[k] = 0\n self.torpedo_launcher.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.shield_generator.shields = 0\n self.shield_generator.shields_up = False\n self.shield_generator.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.polarized_hull.polarization_amount = 0\n self.polarized_hull.is_polarized = False\n self.polarized_hull.integrety = 0.0\n except AttributeError:\n pass\n self.power_generator.energy = 0\n self.power_generator.integrety = 0\n try:\n self.warp_drive.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.beam_array.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.cannons.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.impulse_engine.integrety = 0.0\n except AttributeError:\n pass\n self.sensors.integrety = 0.0\n try:\n self.cloak.cloak_status = CloakStatus.INACTIVE\n self.cloak.integrety = 0.0\n except AttributeError:\n pass\n try:\n self.transporter.integrety = 0.0\n except AttributeError:\n pass\n\n if is_controllable:\n gd.engine.message_log.print_messages = False\n\n if warp_core_breach or self_destruct:\n \n self.warp_core_breach(self_destruct)\n self.hull = -self.ship_class.max_hull\n \n if self is self.game_data.selected_ship_planet_or_star:\n self.game_data.selected_ship_planet_or_star = None\n \n self.get_sub_sector.destroy_ship(self)", "def attic(self):\n option = None\n leave_attic = False\n\n # Code to describe the attic.\n print(\"You are in the attic.\")\n time.sleep(1)\n print(\"It is dark and humid.\")\n time.sleep(1)\n print(\"You see a chest and a desk.\")\n time.sleep(1)\n\n # While loop allows player to stay in the attic for as \n # long as they want.\n while leave_attic == False:\n print(\"What do you do?\")\n time.sleep(1)\n print(\"Look in the chest, examine the desk or return to the hallway?\")\n option = input(\"(Chest/Desk/Hallway) \\n\")\n # If statement checks for valid input.\n if option == \"Chest\" or option == \"chest\":\n self.objects.chest(character = self.character)\n elif option == \"Desk\" or option == \"desk\":\n self.character.desk()\n elif option == \"Hallway\" or option == \"hallway\":\n # Escapes the while loop.\n self.room_name = \"hallway\"\n leave_attic = True\n else:\n print(\"Not a valid room name, try again.\")", "def __init__(self, car=None):\n if car is not None:\n self.actual_coordinates = car.get_actual_coordinates()\n self.origin_coordinates = car.get_origin_coordinates()\n self.acceleration = car.get_acceleration()\n self.speed = car.get_speed()\n self.name = car.get_name()\n self.lane = car.get_lane()\n self.creation_time = car.get_creation_time()\n self.new = car.is_new()\n self.intention = car.get_intention()\n self.caravan_depth = car.get_caravan_depth()\n self.supervisor = car.is_supervisor\n else:\n self.actual_coordinates = (384, 384, 0, 1)\n self.origin_coordinates = (384, 384, 0, 1)\n self.acceleration = 3\n self.name = -1\n self.speed = 10\n self.creation_time = -1\n self.new = True\n self.intention = \"s\"\n self.caravan_depth = 0\n self.supervisor = False\n self.receiver = None\n self.follower = None\n self.follow = False\n self.value = self.value_dict[self.__class__.__name__]", "def __repr__(self):\n\n return \"<Chore chore_id=%s name=%s>\" % (self.id, self.name)", "def CreadorComentario(hora, fecha, contenido, act, usuario): \n nuevoComentario = Comentario(horacomentario=hora, fechacomentario=fecha, contenido=contenido, idactcomentario=act,loginusuario=usuario)\n nuevoComentario.save()\n Accion.objects.crearAccion(\n usuario,\n \"El usuario %s hizo un comentario en la actividad %s\" % (usuario.username, act.nombreact),\n 'i')\n\n Accion.objects.crearAccion(\n usuario,\n \"Se creo una instancia de Comentario con los valores Fecha: %s, Contenido: %s\" % (fecha, contenido),\n 'd'\n )", "def __init__(self, car, cdr):\n # be the cache for string form of this pair\n self._str = ''\n # here can't use prop to update _str\n # because _car and _cdr aren't constructed\n self._car = car\n self._cdr = cdr\n # invoke update_str() manually only here\n self.update_str()", "def paintHair(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"hair_\"+self.avatarConfiguration[\"hairStyle\"], self.avatarConfiguration[\"hairColor\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"hair\")", "def __init__(self,\n xyz_le: np.ndarray = np.array([0, 0, 0]),\n chord: float = 1.,\n twist_angle: float = 0,\n twist_axis: np.ndarray = np.array([0, 1, 0]),\n airfoil: Airfoil = Airfoil(\"naca0012\"),\n control_surface_is_symmetric: bool = True,\n control_surface_hinge_point: float = 0.75,\n control_surface_deflection: float = 0.,\n ):\n\n self.xyz_le = np.array(xyz_le)\n self.chord = chord\n self.twist = twist_angle\n self.twist_axis = np.array(twist_axis)\n self.airfoil = airfoil\n self.control_surface_is_symmetric = control_surface_is_symmetric\n self.control_surface_hinge_point = control_surface_hinge_point\n self.control_surface_deflection = control_surface_deflection", "def get_cheap_conformer(self):\n num_confs = min(500, max(50, len(self.mol.atoms) * 3))\n rd_mol, rd_index_map = conformers.embed_rdkit(label=self.label, mol=self.mol, num_confs=num_confs)\n xyzs, energies = conformers.rdkit_force_field(label=self.label, rd_mol=rd_mol, rd_index_map=rd_index_map,\n mol=self.mol, force_field='MMFF94', return_xyz_strings=True)\n if energies:\n min_energy = min(energies)\n min_energy_index = energies.index(min_energy)\n self.cheap_conformer = xyzs[min_energy_index]\n elif xyzs:\n self.cheap_conformer = xyzs[0]\n else:\n logger.warning('Could not generate a cheap conformer for {0}'.format(self.label))\n self.cheap_conformer = None", "def presenetCar():", "def reset(self) -> None:\n self.cash_balance = self.initial_cash_balance()", "def __repr__(self):\n return self.name + self.suit", "def func(self):\n char = self.character\n clothing = char.search(self.args, candidates=char.contents)\n if not clothing:\n return\n if not clothing.db.worn:\n char.msg(\"You're not wearing that!\")\n return\n if clothing.db.covered_by:\n char.msg(\"You have to take off %s first.\" % clothing.db.covered_by.name)\n return\n clothing.remove(char)", "def CombatRoll(self):\t\t\n\t\tprint(self.name.Title() + \"CombatRoll\")", "def healthcare():", "def war_tie(cls, card1, card2):\n print(\"------------------------------------------------\")\n print(\"Tie!!\")\n print(f\"{card1.show()} is equal to {card2.show()}\")\n print(\"------------------------------------------------\")", "def modifier_classement_joueur_tournoi(self, joueurs_tournoi, championnat, rapport):\r\n rapport.affichage_classement_championnat(championnat)\r\n championnat = sorted(championnat, key=lambda x: x.classement) # tri joueurs du championnat par classement\r\n print(\"Veuillez indiquer le numéro du joueur à modifier:\")\r\n choix = int(input())\r\n if choix <= len(championnat): # test si choix numero joueur valide\r\n index = choix - 1 # car liste commence a 0\r\n joueur = championnat[index]\r\n nouveau_joueur = copy.deepcopy(joueur)\r\n print(\"Veuillez indiquer le nouveau classement de \" + joueur.nom)\r\n nouveau_classement = int(input())\r\n nouveau_joueur.classement = nouveau_classement\r\n championnat.remove(joueur) # enleve ancienne position du joueur dans classement\r\n joueurs_tournoi.remove(joueur) # enleve ancienne position du joueur dans tournoi\r\n championnat.append(nouveau_joueur) # ajoute joueur avec classement actualise\r\n joueurs_tournoi.append(nouveau_joueur) # ajoute joueur classement actualise dans liste participants tournoi\r\n return joueurs_tournoi, championnat\r\n else:\r\n print(\"Numero joueur invalide\")\r\n return", "def __repr__(self):\n return f'<RestaurantCourier restaurant: {self.restaurant_id} courier: {self.courier_id}>'" ]
[ "0.50800323", "0.49419627", "0.48987815", "0.48388648", "0.46962285", "0.46602952", "0.4589004", "0.45846203", "0.45836064", "0.45684838", "0.45631933", "0.45429364", "0.45290777", "0.45177618", "0.45131966", "0.4502921", "0.45016715", "0.44828025", "0.4480651", "0.4474967", "0.4462841", "0.44622973", "0.4459921", "0.44564182", "0.4455777", "0.44506836", "0.44417816", "0.4428676", "0.44183275", "0.4415764", "0.4415764", "0.4415764", "0.44125718", "0.4389924", "0.43873486", "0.4385266", "0.43822974", "0.43752632", "0.4374519", "0.43626112", "0.43557262", "0.43504736", "0.43416727", "0.43416727", "0.43381825", "0.43368265", "0.43324208", "0.4330543", "0.4330543", "0.43194392", "0.43101045", "0.42824665", "0.42798954", "0.42792073", "0.42785847", "0.42749953", "0.4273784", "0.4273784", "0.4269452", "0.4268462", "0.42683765", "0.4264931", "0.4262654", "0.42579788", "0.42562062", "0.42483565", "0.4239046", "0.42345494", "0.42314675", "0.42308116", "0.4225592", "0.42191246", "0.42178562", "0.42102745", "0.42071068", "0.419545", "0.419545", "0.41895634", "0.41845247", "0.41844326", "0.41778037", "0.4171705", "0.41690376", "0.41675103", "0.41658643", "0.41633937", "0.41582283", "0.41565496", "0.41536587", "0.41503447", "0.414688", "0.41466016", "0.41455132", "0.41349554", "0.41306722", "0.41295463", "0.4124521", "0.41234282", "0.41221792", "0.41196075", "0.4118416" ]
0.0
-1
This mehod returnschair`s assign. >>> chair1 = Chair("empire", "bedroom", "armchair") >>> assert(chair1.get_assign() == "bedroom")
def get_assign(self): return self.assign
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_assign_identity(self) -> Union[str, None]:\n\n return self._get_assign_identity(enable_validation=True)", "def __getitem__(self, name: str) -> Set[BaseAssignment]:\n ...", "def perform_exercise_assign(args):\n FBDPCommon.callSelectionHook(args, 'trades', 'exercise_assign_selection')\n e = Exercise('Exercise Assign', args['Testmode'], args)\n e.perform()\n e.end()\n\n # Fix physically settled future closeouts. Please see JIRA ABITFA-2562\n # for more detail about this fix.\n if args['DoFixPhysicals']:\n fixPhysicals(args['trades'], args['Testmode'])", "def assign(self,player,hcp):\n\n # Higher hcp = higher bonus potention (max 100)\n assert hcp <= 100, 'Skill handicap cannot be >100 hcp : {0}'.format(\n hcp)\n\n if self.level is not None:\n base,bonus = RandomRoll(player,self,hcp)\n\n if base and bonus:\n self.level += random.randint(3)+1\n elif base:\n self.level += random.randint(2)", "def assign(self, *args):\n return _ida_hexrays.cif_t_assign(self, *args)", "def test_get_role_inherit_equal(self):\n project_as = self.make_assignment(\n self.project, self.user_alice, self.role_owner\n )\n self.assertEqual(self.project.get_role(self.user_alice), project_as)", "def get_suit(self):\r\n return self.suit", "def test_get_role_inherit_higher(self):\n cat_as = self.make_assignment(\n self.category, self.user_bob, self.role_contributor\n )\n self.make_assignment(self.project, self.user_bob, self.role_guest)\n self.assertEqual(self.project.get_role(self.user_bob), cat_as)", "def test_auto_assign_one(self):\n shift1 = RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=5,\n )\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)\n self.assertIn(self.profile, shift1.current_assignees.all())\n\n instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)\n self.assertGreater(instances.count(), 0)\n self.assertTrue(all(\n instance.workshifter == self.profile\n for instance in instances\n ))\n\n pool_hours = self.profile.pool_hours.get(pool=self.p1)\n self.assertEqual(\n pool_hours.assigned_hours,\n pool_hours.hours,\n )", "def assign(self, *args):\n return _ida_hexrays.cinsn_t_assign(self, *args)", "def is_assigned(self):\n if \"isAssigned\" in self._prop_dict:\n return self._prop_dict[\"isAssigned\"]\n else:\n return None", "def test_get_role_inherit_lower(self):\n self.make_assignment(self.category, self.user_bob, self.role_guest)\n project_as = self.make_assignment(\n self.project, self.user_bob, self.role_contributor\n )\n self.assertEqual(self.project.get_role(self.user_bob), project_as)", "def case_detail_assign_view(request, pk):\n issue = _get_issue(request, pk)\n serializer = IssueAssignmentSerializer(data=request.data, instance=issue)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({\"issue\": IssueDetailSerializer(issue).data})", "def get(self, assignment_id):\n return self.__assignments[self.find_assignment_index(assignment_id)]", "def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"assign: \", tok )\n\tif re.match( Lexer.identifier, tok ):\n\t\tident = VarRef( tok )\n\telse: \n\t\terror( \"Invalid identifier\" )\n\ttok = tokens.next( )\n\tequals = match( \"=\" )\n\ttok = tokens.peek( )\n\texpr = expression( )\n\tmatch( \";\" )\n\tequals = VarRef( equals )\n\tstatement = assign( equals, ident, expr )\n\treturn statement", "def getAssignmentRule(self, *args):\n return _libsbml.Model_getAssignmentRule(self, *args)", "def on_assign(self):", "def _get_equipment(self):\r\n eq = self._pvsr.getEquipmentByName(self._meas[\"equipment\"])\r\n if eq is None:\r\n site = self._pvsr.getSiteByName(self._default_site)\r\n if site is None:\r\n logging.info(\"Creating new default site {0}\".format(self._default_site))\r\n site = self._pvsr.create_pvsr_object(\"Site\")\r\n site.ParentId = 1\r\n site.Name = self._default_site\r\n site=self._pvsr.addSite(site)\r\n else:\r\n logging.debug(\"Default site ID is {0}\".format(site.Id))\r\n \r\n logging.info(\"Creating new equipment: {0}\".format(self._meas[\"equipment\"]))\r\n if self._meas[\"collector_type\"] == 'J':\r\n eq = self._pvsr.create_pvsr_object(\"JagaEquipment\")\r\n eq.ASCII_0000_EQ_COLL_KEY = self._meas[\"equipment\"] + \"key\"\r\n elif self._meas[\"collector_type\"] == 'Y':\r\n eq = self._pvsr.create_pvsr_object(\"SynthTransEquipment\")\r\n else:\r\n raise ValueError(\"The equipment does not exist in PVSR\") \r\n eq.Name = self._meas[\"equipment\"]\r\n eq.ParentId = site.Id\r\n eq.CollectorType = self._meas[\"collector_type\"]\r\n eq.IntervalInSec = 300\r\n eq.RetainRawData = 365\r\n eq.CollectData = \"Yes\"\r\n \r\n eq = self._pvsr.addEquipment(eq)\r\n logging.info(\"Added equipment {0}, id: {1}\".format(self._meas[\"equipment\"],eq.Id))\r\n else:\r\n logging.debug(\"Found equipment: {0}, id: {1}\".format(self._meas[\"equipment\"],eq.Id))\r\n return eq", "def _assignment(info):\n\n return info.ui.context['object']", "def get_assignments(self) -> List :\n return self.assignments", "def _get_assign_identity(self, enable_validation: bool = False) -> Union[str, None]:\n # read the original value passed by the command\n assign_identity = self.raw_param.get(\"assign_identity\")\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.identity and\n self.mc.identity.user_assigned_identities is not None\n ):\n value_obtained_from_mc = safe_list_get(\n list(self.mc.identity.user_assigned_identities.keys()), 0, None\n )\n if value_obtained_from_mc is not None:\n assign_identity = value_obtained_from_mc\n\n # this parameter does not need dynamic completion\n # validation\n if enable_validation:\n if assign_identity:\n if not self._get_enable_managed_identity(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--assign-identity can only be specified when --enable-managed-identity is specified\"\n )\n else:\n if self.decorator_mode == DecoratorMode.CREATE:\n if self._get_assign_kubelet_identity(enable_validation=False):\n raise RequiredArgumentMissingError(\n \"--assign-kubelet-identity can only be specified when --assign-identity is specified\"\n )\n return assign_identity", "def getInitialAssignment(self, *args):\n return _libsbml.Model_getInitialAssignment(self, *args)", "def isAssigned(self):\n if self.getProton1Assignments() and self.getProton2Assignments():\n return 1\n else:\n return 0", "def get_assignment_by_request(self, request):\r\n return self.context.provider.get_assignment_by_request(request)", "def test_handle_force_assign(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name -f\",\r\n user),\r\n (\"Project successfully assigned!\", 200))", "def get_combat_assignment(self):\n mapping = defaultdict(list)\n for uid, state in self._uid_to_creature_state.items():\n if state.attacking:\n # Touch uid to create an empty tuple.\n mapping[uid]\n elif state.blocking:\n mapping[state.blocking].append(uid)\n return CombatAssignment(mapping)", "def assign(self, available_workers):\n \n status = self.getStatus()\n\n assert len(available_workers) == 1\n worker = available_workers[0]\n assignment = {}\n\n w_id = str(worker.id)\n task_id = self.task_id\n\n #tracks \n worker_assignments_var = redis_get_worker_assignments_var(task_id, w_id)\n\n print \"WORKER ID:\", w_id\n print \"STATUS:\", status\n print \"ASSIGNMENTS FOR WORKER SO FAR:\", app.redis.smembers(worker_assignments_var)\n\n\n # sort questions by pomdp expected reward...\n # XXX this isn't quite what we want...\n # want to sort by value of getting another label\n # so we don't have all workers getting assigned to the same question\n unfinished_unsorted_qs = [(q,v) for (q,v) in status.iteritems() if v['best_action_str'] == 'create-another-job']\n # NOTE REVERSE ORDER\n sorted_qs = sorted(unfinished_unsorted_qs, key=lambda x:x[1]['best_expected_reward'], reverse=True)\n print \"sorted_qs\", sorted_qs\n# print \"worker %s has done the following questions\" % w_id\n# for (q_id,er) in sorted_qs:\n# if app.redis.sismember(worker_assignments_var, q_id):\n# print \"+\", q_id\n# else:\n# print \"-\", q_id\n\n for idx in range(len(sorted_qs)):\n q_id,expected_reward = sorted_qs[idx]\n\n if not app.redis.sismember(worker_assignments_var, q_id):\n assignment[w_id] = q_id\n print \"assignment=\", assignment\n app.redis.sadd(worker_assignments_var, q_id)\n return assignment\n\n #if here no assignment was made to our worker!\n assert len(assignment) == 0\n print \"no assignment made yet\"\n\n #NOTE POMDP doesn't think there are any questions available to the worker \n #that need another label, but let's give them an assignment anyway\n #Pick question where submitting would have worst expected reward \n # (implying it may benefit from another label)\n finished_qs = [(q,v) for (q,v) in status.iteritems() if v['best_action_str'] != 'create-another-job']\n sorted_finished_qs = sorted(finished_qs, key=lambda x:x[1]['best_expected_reward']) # no reverse\n for idx in range(len(sorted_finished_qs)):\n q_id,expected_reward = sorted_finished_qs[idx]\n\n if not app.redis.sismember(worker_assignments_var, q_id):\n assignment[w_id] = q_id\n print \"gave worker a finished q assignment=\", assignment\n app.redis.sadd(worker_assignments_var, q_id)\n return assignment\n\n return assignment", "def assign(self):\n df = pd.read_csv(self.summary, '\\t', index_col=0)\n df.columns = [os.path.basename(i) for i in df.columns.tolist()]\n ## total\n total = df.sum(axis=0, skipna=True)\n ## assign\n assign = df.loc['Assigned', ] / total * 100\n ## pct\n assign_df = assign.to_frame('assigned%')\n ## minimum\n min_pct = assign.min()\n if assign.min() < 60:\n log.warning('!!!! most of the reads are not assigned, Visual checking recommended !!!!')\n\n return assign_df", "def assign(self, *args):\n return _libsbml.string_assign(self, *args)", "def test_handle_assign_assign_error(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (self.testcommand.assigned_error, 200))", "def getCamp(self, id):\n return self.__camps[id];", "def assignlock(self, nick, channel, assignee, resourcestr):\n return (channel, self._lock(nick, assignee, resourcestr))", "def get_armor_equipped(self):\n\t\treturn self.equippedArmor", "def get_assignment(self, assignment_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"assignments\", \"assignment_id\", assignment_id)", "def ability(self, ability):\n\n return self.race.abilities.get(ability)", "def get_privilege(cls, slug, assignment=None):\n cache = cls.get_cache()\n if cache.disabled:\n roles = Role.objects.filter(slug=slug)\n if roles:\n return roles[0].instantiate(assignment or {})\n return None\n privileges = cache.get(cls.PRIVILEGES_BY_SLUG)\n if privileges is None:\n cls.update_cache()\n privileges = cache.get(cls.PRIVILEGES_BY_SLUG)\n privilege = privileges.get(slug)\n if privilege is None:\n return None\n if assignment:\n return privilege.role.instantiate(assignment)\n return privilege", "def get(self, name):\n return self.cm.get(name)", "def get_chern(self, attribute=True):\n chern = kcgfns.load_chern(self)\n if chern is None:\n chern = kcgfns.calc_chern(self)\n if attribute:\n self.chern = chern\n\n return chern", "def __repr__(self):\n return self.name + self.suit", "def get_assignment_info(self):\n url = self.server_url + \"/api/v1/courses/\" + str(self.course_id) + '/assignments/' + str(self.assignment_id)\n r = requests.get(url, headers=self.headers, params=self.params)\n assignment = json.loads(r.text)\n return assignment", "def get_assignment_by_id(self, assignment_id):\r\n\r\n provider = self.context.provider\r\n assert isinstance(provider, AlchemyProvider)\r\n return self.context.provider.get_assignment_by_id(assignment_id)", "def rhs(self):\n if not self.is_assign():\n raise AssertionError('Not an assignment')\n return self.initializer", "def getSuit(self):\r\n return self.suit", "def _getitem_from_self_or_parent(self, name: str) -> Set[BaseAssignment]:\n return self[name]", "def lhs(self):\n if not self.is_assign():\n raise AssertionError('Not an assignment')\n return self.var.name", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'RoleManagementPolicyAssignment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = RoleManagementPolicyAssignmentArgs.__new__(RoleManagementPolicyAssignmentArgs)\n\n __props__.__dict__[\"effective_rules\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"policy_assignment_properties\"] = None\n __props__.__dict__[\"policy_id\"] = None\n __props__.__dict__[\"role_definition_id\"] = None\n __props__.__dict__[\"scope\"] = None\n __props__.__dict__[\"type\"] = None\n return RoleManagementPolicyAssignment(resource_name, opts=opts, __props__=__props__)", "def get_corresponding_tank(self):\n pump_assign_dict = constants.CORRESPONDING_TANK_DICT\n\n assert self.en_id in pump_assign_dict, f\"Pump {self.en_id} does not have corresponding tank!\"\n\n return pump_assign_dict[self.en_id]", "def pass_assign_for_mentor(cls):\n assignments_list = cls.list_from_sql()\n return assignments_list", "def achieve_mission(mission_id):\n from models import Mission\n mission = Mission.objects.get(id=mission_id)\n return getattr(mission,\n 'achieve_mission_{}'.format(mission.mission_type),\n #let it commented for debug purpose\n #self.achieve_mission_stay\n )()", "def get_assignment_by_name(self, assignment_name, assignments=None):\n if assignments is None:\n assignments = self.get_assignments()\n for assignment in assignments:\n if assignment['name'] == assignment_name:\n return assignment['assignmentId'], assignment\n return None, None", "def getArmor(self):\n return self.av", "def get_teacher_assign():\n assignment_data = query_db(\n \"SELECT assignments.id, assignments.name, assignments.due_date \"\n \"FROM assignments JOIN topics ON assignments.topic_id=topics.id \"\n \"JOIN classes ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n assignments = []\n for assignment in assignment_data:\n assignment_dict_teach = {}\n assignment_dict_teach[\"id\"] = assignment[0]\n assignment_dict_teach[\"name\"] = assignment[1]\n assignment_dict_teach[\"due_date\"] = assignment[2]\n assignments.append(assignment_dict_teach)\n return assignments", "def assign(self, *args):\n return _ida_hexrays.cnumber_t_assign(self, *args)", "def __str__(self): \n \n return self.suit + self.rank", "def assignment(self):\n return {}", "def __str__(self):\r\n return self.suit + self.rank", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def assignment(self, request, pk=None):\n\n obj = self.get_object()\n obj_mapping = {\n 'teacher': obj\n }\n try:\n user = self.request.user\n query = models.Assignment.objects.filter(\n subject__teacher__user=user,\n subject=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.Assignment,\n pk=id,\n subject=obj\n )\n return self.filtering(request, q)\n\n self.actionhelper(request, query, obj_mapping)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def test_single_quant_assign(self):\n pick = self.quant_1.create_picking(\n self.picking_type_pick, assign=True, user_id=self.test_user.id\n )\n # Check it is in state assigned\n self.assertEqual(pick.state, \"assigned\")\n # Check user is assigned\n self.assertEqual(pick.user_id, self.test_user)\n # Check quant_1 is now reserved\n self.assertEqual(self.quant_1.reserved_quantity, 10)", "def auto_assign(self) -> Optional[pulumi.Input[Union[str, 'BfdEnabled']]]:\n return pulumi.get(self, \"auto_assign\")", "def instantiated_to_role(self, assignment):\n composed_assignment = {}\n if assignment:\n for key in self.to_role.parameters & set(assignment.keys()):\n composed_assignment[key] = assignment[key]\n if self.assignment:\n composed_assignment.update(self.assignment)\n return self.to_role.instantiate(composed_assignment)", "def assign_categories(self):\n if \"assignCategories\" in self._prop_dict:\n return self._prop_dict[\"assignCategories\"]\n else:\n return None", "def career_choice (upp): #input upp list\r\n\tif upp[4]==max(upp):\r\n\t\tcareer=Navy\r\n\telif upp[0]==max(upp):\r\n\t\tcareer=stellagama.random_choice([Scouts, Marines])\r\n\telif upp[2]==max(upp):\r\n\t\tcareer=Army\r\n\telif upp[3]==max(upp):\r\n\t\tcareer=Merchants\r\n\telse:\r\n\t\tcareer=Other\r\n\treturn career #outputs the chatacter's career\r", "def get_iupac(self):\n if self.id_type == 'iupac':\n self.iupac = self.drug_id\n else:\n _pubchem_id_type_checker(self.id_type)\n r = requests.get(f\"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/{self.id_type}/{self.drug_id}/property/iupacname/json\")\n response = r.json()\n data = response['PropertyTable']['Properties'][0]\n self.iupac = data['IUPACName']\n return self.iupac", "def project_assign(request, project_code):\n if request.user.is_authenticated:\n projects = Project.objects.all()\n context = {'projects': projects}\n selected_project = get_object_or_404(Project, code=project_code)\n try:\n selected_project.status = 2 # project is assigned\n selected_project.save()\n\n # getting the head\n assigned_head = User.objects.get(department=selected_project.department,\n role__name__iexact=role_department_head)\n # create new task history object\n task_history = TaskHistory()\n task_history.project = selected_project\n task_history.description = (model_to_dict(selected_project))\n task_history.status = 'New Project'\n task_history.user = assigned_head\n task_history.save()\n\n \"\"\" Setting notification as project is assigned to a head \"\"\"\n assigned_head.notification_count += 1\n assigned_head.save()\n selected_project.assigned_at = datetime.now() # setting the assigned time\n selected_project.save()\n # print(assigned_head, '------------------------------------------*********************',\n # assigned_head.notification_count)\n messages.success(request, f\"Project '{selected_project.name}' is assigned to the department head.\")\n return redirect('project-list')\n except Exception as e:\n # print('error at assign project ====', e)\n messages.error(request, f\"Error: {e}\")\n return render(request, 'projectmanager/project_list.html', context)", "def role_assignment():\n\n # Create a dictionary of roles keyed by the role name.\n all_roles = {}\n\n try:\n roles = get_permissions_manager().policy_manager.policy_storage.all_roles()\n except PolicyStorageError, e:\n error(None, str(e))\n return\n\n for name, description in roles:\n all_roles[name] = Role(name=name, description=description)\n\n assignment = Assignment()\n view = _AssignmentView(all_roles)\n handler = _AssignmentHandler(all_roles=all_roles)\n\n assignment.edit_traits(view=view, handler=handler)", "def getMaintainer(self, name, email):\r\n if self.maintainers.has_key(name):\r\n return self.maintainers[name]\r\n else:\r\n self.maintainers[name] = Maintainer(name, email)\r\n return self.maintainers[name]", "def __str__(self):\n return str(self.rank)+str(self.suit)", "def assign(self, assignee, created_by, unit):\n assignment = ReferralAssignment.objects.create(\n assignee=assignee,\n created_by=created_by,\n referral=self,\n unit=unit,\n )\n ReferralActivity.objects.create(\n actor=created_by,\n verb=ReferralActivityVerb.ASSIGNED,\n referral=self,\n item_content_object=assignee,\n )\n # Notify the assignee by sending them an email\n Mailer.send_referral_assigned(\n referral=self,\n assignment=assignment,\n assigned_by=created_by,\n )\n\n if self.state in [ReferralState.IN_VALIDATION, ReferralState.PROCESSING]:\n return self.state\n\n return ReferralState.ASSIGNED", "def assign(self, *args):\n return _ida_hexrays.cexpr_t_assign(self, *args)", "def getSuit(self):\n return self.suit", "def get_assigner(self):\n return self.ewma_trainer.apply([self.mean, self.variance])", "def get_assigner(self):\n return self.ewma_trainer.apply([self.mean, self.variance])", "def get_occupant(self):\n\t\treturn self.occupant", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def assignments(self):\n if \"assignments\" in self._prop_dict:\n return AssignmentsCollectionPage(self._prop_dict[\"assignments\"])\n else:\n return None", "def assignments(self):\n if \"assignments\" in self._prop_dict:\n return AssignmentsCollectionPage(self._prop_dict[\"assignments\"])\n else:\n return None", "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node", "def __getitem__(self, comp):\n \n return self.compartimentos[comp]", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def __str__(self):\r\n #create a dictionary of suits\r\n self.suitDic = {\"s\": \"spades\",\"d\":\"diamonds\",\"h\":\"hearts\",\"c\":\"clubs\"}\r\n #create a list of ranks\r\n self.rankList = [\"\", \"Ace\",\"Two\",\"Three\",\"Four\",\"Five\",\"Six\",\"Seven\",\"Eight\",\r\n \"Nine\",\"Ten\",\"Jack\",\"Queen\",\"King\"]\r\n self.rank = self.rankList[self.rank]\r\n self.suit = self.suitDic[self.suit]\r\n self.name = self.rank + \" of \" + self.suit\r\n return self.name", "def get(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n organization = model.organization.get_organization(orgname)\n return get_card(organization)\n\n raise Unauthorized()", "def get_player(self,p):\n self._validate(p)\n return p.player()", "def assign(obj, path, val, missing=None):\n return glom(obj, Assign(path, val, missing=missing))", "def __repr__(self):\n return self.rank + self.suit", "def get_hp():\n\n return character['HP']", "def test_get_user_assignable_assessments(self):\n api_instance = relias_api_client.AssessmentsApi(relias_api_client.ApiClient())\n result = api_instance.get_user_assignable_assessments(\"test@dka.im\")\n self.assertEqual(result.total_count, 255)", "def ensure_chern(self, attribute=True):\n chern = kcgfns.load_chern(self)\n if chern is None:\n chern = kcgfns.calc_chern(self)\n\n if attribute:\n self.chern = chern\n return chern", "def visit_AugAssign(self, node):\n # FIXME: Gensym the LHS to avoid two evaluations.\n self.generic_visit(node)\n rhs = to_call(self.op_to_function(node.op),\n [set_ctx(node.target), node.value])\n return ast.Assign([node.target], rhs)", "def assign(self, assignee: np.ndarray):\n if isinstance(self.data, pd.DataFrame):\n self.data = pd.concat([self.data, assignee], axis=1, ignore_index=True)\n else:\n self.data = pd.DataFrame(data=assignee)", "def __repr__(self):\n return \"<Credit({0} {1})>\".format(self.chore_id, self.worker_id)", "def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)", "def getAircraft(self, code):\n \t\n return self.aircraftDict[code.upper()]", "def test_assign(self):\n dbpool = buildConnectionPool(self, jobSchema + schemaText)\n yield self._enqueue(dbpool, 1, 2)\n\n # Make sure we have one JOB and one DUMMY_WORK_ITEM\n def checkJob(txn):\n return JobItem.all(txn)\n\n jobs = yield inTransaction(dbpool.connection, checkJob)\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n\n @inlineCallbacks\n def assignJob(txn):\n job = yield JobItem.load(txn, jobs[0].jobID)\n yield job.assign(datetime.datetime.utcnow(), ControllerQueue.queueOverdueTimeout)\n yield inTransaction(dbpool.connection, assignJob)\n\n jobs = yield inTransaction(dbpool.connection, checkJob)\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is not None)\n self.assertEqual(jobs[0].isAssigned, 1)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n destination_pool_name: Optional[pulumi.Input[str]] = None,\n ip: Optional[pulumi.Input[str]] = None) -> 'DedicatedIpAssignment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _DedicatedIpAssignmentState.__new__(_DedicatedIpAssignmentState)\n\n __props__.__dict__[\"destination_pool_name\"] = destination_pool_name\n __props__.__dict__[\"ip\"] = ip\n return DedicatedIpAssignment(resource_name, opts=opts, __props__=__props__)", "def get_user_assignd_identity_from_mc(self) -> Union[str, None]:\n user_assigned_identity = None\n if self.mc and self.mc.identity and self.mc.identity.user_assigned_identities:\n user_assigned_identity = safe_list_get(list(self.mc.identity.user_assigned_identities.keys()), 0, None)\n return user_assigned_identity", "def assigned_to(self) -> Optional[str]:\n return pulumi.get(self, \"assigned_to\")", "def assign_employee(id):\r\n check_admin()\r\n\r\n employee = Employee.query.get_or_404(id)\r\n\r\n # prevent admin from being assigned a department or role\r\n if employee.is_admin:\r\n abort(403)\r\n\r\n form = EmployeeAssignForm(obj=employee)\r\n if form.validate_on_submit():\r\n employee.department = form.department.data\r\n employee.role = form.role.data\r\n db.session.add(employee)\r\n db.session.commit()\r\n flash('You have successfully assigned a department and role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_employees'))\r\n\r\n return render_template('admin/employees/employee.html',\r\n employee=employee, form=form,\r\n title='Assign Employee')", "def __str__(self):\n return \"{} of {}\".format(self.rank,self.suit)" ]
[ "0.567902", "0.5271101", "0.5108276", "0.5069352", "0.50534844", "0.50037473", "0.4950295", "0.49384058", "0.4930697", "0.49107504", "0.4898811", "0.48804885", "0.48743683", "0.48659533", "0.4856359", "0.4852952", "0.48237786", "0.48076132", "0.47997218", "0.47993913", "0.4785297", "0.47750956", "0.47211322", "0.47086668", "0.46942687", "0.46763316", "0.46745923", "0.46642625", "0.4663645", "0.46558547", "0.46517313", "0.46496913", "0.4634478", "0.46138826", "0.46121487", "0.46090585", "0.4605492", "0.460536", "0.4601376", "0.45981562", "0.45921662", "0.45906436", "0.45763108", "0.45663056", "0.4564159", "0.45564285", "0.454946", "0.45089158", "0.45052385", "0.4491801", "0.44865903", "0.4481599", "0.44780275", "0.44779333", "0.44632927", "0.44572675", "0.44497147", "0.44470194", "0.44403845", "0.4432564", "0.44278514", "0.44239902", "0.44201094", "0.441878", "0.4415265", "0.44094235", "0.4405319", "0.43986672", "0.43879932", "0.43872374", "0.4378794", "0.43681654", "0.43681654", "0.43660888", "0.43654302", "0.43654302", "0.43607312", "0.43607312", "0.43445808", "0.43403584", "0.43382737", "0.43360743", "0.43346268", "0.4327499", "0.4323199", "0.43221787", "0.43214875", "0.43104237", "0.43066025", "0.43061078", "0.43049866", "0.43004754", "0.4296776", "0.42938265", "0.42916557", "0.4288247", "0.42877415", "0.4284062", "0.4281449", "0.4274267" ]
0.7331236
0
Called whenever an item is yielded by the spider
def process_item(self, item, spider): # strip non ascii chars item['raw_content'] = filter(lambda x : ord(x) < 128, item['raw_content']) #item['raw_content'] = ''.join(c for c in item['raw_content'] if ord(c) < 128) # hash the filename to prevent storing too-long file names hash_data = item['filename'] + item['user_agent'].ua_string filename = sha1(hash_data).hexdigest() # Javascript MIME types js_mimes = ('text/javascript', 'application/x-javascript', 'application/javascript') # Parse each file based on what its MIME specifies if 'text/html' == item['content_type']: # First save the request contents into a URLContent urlcontent,_ = model.URLContent.objects.get_or_create( url_scan=item['urlscan'], user_agent=item['user_agent'], defaults={'redirected_from':item['redirected_from']}) # Store raw markup file_content = ContentFile(item['raw_content']) urlcontent.raw_markup.save(filename, file_content) urlcontent.raw_markup.close() # Store raw headers file_content = ContentFile(item['headers']) urlcontent.headers.save(filename, file_content) urlcontent.headers.close() urlcontent.save() elif any(mime == item['content_type'] for mime in js_mimes): urlcontent = model.URLContent.objects.get( url_scan=item['urlscan'], user_agent=item['user_agent']) linkedjs, _ = model.LinkedJS.objects.get_or_create( batch=spider.batch, url_hash=sha256(item['url']).hexdigest(), defaults={'url': item['url']}, ) # Store raw js file_content = ContentFile(item['raw_content']) linkedjs.raw_js.save(filename, file_content) linkedjs.raw_js.close() linkedjs.save() # Create relationship with url content linkedjs.linked_from.add(urlcontent) elif 'text/css' == item['content_type']: urlcontent = model.URLContent.objects.get( url_scan=item['urlscan'], user_agent=item['user_agent']) linkedcss, created = model.LinkedCSS.objects.get_or_create( batch = spider.batch, url_hash=sha256(item['url']).hexdigest(), defaults={ 'url': item['url'], }, ) # Store raw css file_content = ContentFile(item['raw_content']) linkedcss.raw_css.save(filename, file_content) linkedcss.raw_css.close() linkedcss.save() # Create relationship with url content linkedcss.linked_from.add(urlcontent) if created: # Parse out rules and properties use_celery = getattr(settings, 'USE_CELERY', False) if use_celery: parse_css.delay(linkedcss) else: spider.log("Parsing css {0}".format(linkedcss)) self.css_parser.parse(linkedcss) spider.log("Ended parsing css {0}".format(linkedcss)) return item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_item(self, response):\n NewhouseSpider.crawled_urls.append(response.url)\n item = FocusedScrapyCrawlerItem()\n item['url'] = response.url\n item['link_text'] = response.meta.get('link_text', '') if response.meta else ''\n soup = BeautifulSoup(response.body, 'html.parser')\n\n item['body_p_tags'] = self._getBodyText(soup)\n item['head_title'] = self._getHeadTitle(soup)\n item['last_crawled'] = time.time()\n links = self._getLinks(response, soup)\n\n # get score of the page based upon classifier\n if self.classifier:\n score = self.classifier.score(item['link_text'], item['head_title'], item['body_p_tags'])\n else:\n score = 0.0\n\n item['score'] = score\n yield item\n if score <= 0:\n self.log(\"item={} does not belong to new home so stop crawling\".format(item),\n logging.INFO)\n else:\n for link in links:\n req = Request(link, priority=int(score * 1000000), # after the request is done, run parse_item to train the apprentice\n callback=self.parse_item)\n yield req", "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def _crawler_result(item, response, spider):\n output_data.clear()\n output_data.append(dict(item))", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:", "def process_item(self, item, spider):\n if item['name'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: %s\" % item)\n else:\n self.ids_seen.add(item['name'])\n return item #return the item", "def pytest_itemcollected(item):\n item.name = item.name.split('[', 1)[1][:-1]\n # pylint: disable=protected-access\n item._nodeid = item.name", "def open_spider(self, spider):\n pass", "def parse_items(self,response):\n sel = Selector(response)\n item = response.meta['job_item']\n company_item = response.meta['company_item']\n company_item['introduction'] = sel.xpath('//div[@class=\"job-item main-message noborder\"]/div[@class=\"content content-word\"]/text()').extract_first()\n company_item['address'] = sel.xpath('//div[@class=\"company-infor\"]/p/text()').extract_first()\n item['link'] = response.url\n item['requirement'] = sel.xpath('//div[@class=\"content content-word\"][1]/text()').extract_first()\n item['website_id'] = 7\n item['company'] = company_item\n print item\n yield item", "def process_item(self, item, spider):\n item['url'] = spider.config['site_domain'] + item[\"url\"]\n item[\"rating\"] = extract_rating(item[\"rating\"])\n item['price'] = get_price(item['price_integer'], item[\"price_decimal\"])\n item['no_discount_price'] = get_price(item['no_discount_price_integer'], item[\"no_discount_price_decimal\"])\n item[\"brand\"] = get_brand(item[\"brand\"])\n item[\"number_of_ratings\"] = get_number_of_ratings(item[\"number_of_ratings\"])\n del item['price_integer']\n del item['price_decimal']\n del item['no_discount_price_integer']\n del item[\"no_discount_price_decimal\"]\n return item", "def action(self,item):\r\n pass", "def yield_item(self, response):\n item = BrobotBotsItem()\n item.update(self.data)\n yield item", "def yield_item(self, response):\n item = BrobotBotsItem()\n item.update(self.data)\n yield item", "def process_item(self, item, spider):\n session = self.Session()\n # deal = Deals(**item)\n entry = Lyrics(item['song'], item['text'])\n\n try:\n session.add(entry)\n session.commit()\n print(f\"\\n\\nInserted {item['song']} into DB!\\n\\n\")\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n ###OLD###\n # # print(\"Pipeline test\" + item['song'])\n # self.conn.execute(f\"\"\"INSERT INTO lyrics VALUES\n # ({item['song']}, {item['text']});\n # \"\"\")\n\n return item", "def parse(self, response):\n item = Top100ShopsItem()\n item['url'] = response.url\n\n yield scrapy.Request(url=response.url, callback=self.parse_webpage, meta={'item': item})", "def process_item(self, item, spider):\n try:\n\n url_md5 = md5(item['url'])\n # self.r_conn.set(url_md5, html_body.read())\n # item['html_body'] = None\n\n sqli = \"insert into spider_content values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n news = {'content': item}\n\n if item['From'] == '0':\n # self.mysqlop.execute(\"insert into spider_content values('url_md5')\")\n # self.mysqlop.execute(sqli, (url_md5, None, item['spider_name'], item['catch_date'],\n # item['From'], item['url'], item['title'].encode('utf-8'), item['summary'].encode('utf-8'), item['site_url'],\n # None, None, None, item['site_name'].encode('utf-8'), None))\n\n self.db.emergency.insert(news)\n elif item['From'] == '1' or item['From'] == '3':\n # self.mysqlop.execute(sqli, (url_md5, item['publish_time'], item['spider_name'], item['catch_date'],\n # item['From'], item['url'], item['title'].encode('utf-8'), item['summary'].encode('utf-8'), item['site_url'],\n # None, None, None, item['site_name'].encode('utf-8'), None))\n\n self.db.news.insert(news)\n elif item['From'] == '2':\n\n # self.mysqlop.execute(sqli, (url_md5, item['publish_time'], item['spider_name'], item['catch_date'],\n # item['From'], item['url'], item['title'].encode('utf-8'), item['summary'].encode('utf-8'), item['site_url'],\n # item['author'].encode('utf-8'), item['replay_times'], item['view_times'], item['site_name'].encode('utf-8'), None))\n self.db.bbs.insert(news)\n\n except Exception, e:\n print 'pipeline error', e", "def process_item(self, item, spider):\n if item['id'] in self.ids_seen:\n raise DropItem(\"Duplicate item found: {0}\".format(item))\n else:\n self.ids_seen.add(item['id'])\n\n session = Session()\n\n if 'sex' in item:\n friends = item.pop('friends')\n for friend in friends:\n try:\n session.execute(friendship.insert(), params={\"friend_a_id\": item['id'], \"friend_b_id\": friend})\n session.commit()\n except:\n session.rollback()\n continue\n item.pop('image_urls')\n pictures = item.pop('images')\n if pictures:\n item['picture'] = pictures[0]['path']\n data = User(**item)\n else:\n data = City(**item)\n\n try:\n session.add(data)\n session.commit()\n except:\n session.rollback()\n raise Exception(\n \"[ERROR]: {0} - {1}\".format(sys.exc_info()[0], sys.exc_info()[1])\n )\n finally:\n session.close()\n\n return item", "def done(self,item): \n self.extra_context['object'] = item \n return self.render(None)", "def process_item(self, item, spider):\n session = self.Session()\n if 'isAlbum' in item:\n album = Album()\n album.imgSrc = item[\"imgSrc\"]\n album.titleCn = item[\"titleCn\"]\n album.titleEn = item[\"titleEn\"]\n try:\n session.add(album)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n DropItem(\"Album should be input once\")\n else:\n query = session.query(Album).filter(Album.imgSrc == item['imgSrc'])\n song = Song()\n\n song.subTitle = item[\"subTitle\"]\n song.lowUrl = item[\"lowUrl\"]\n song.highUrl = item[\"highUrl\"]\n song.serial = item['serial']\n song.album_id = query.first().id\n try:\n session.add(song)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item", "def record_meta(spider):\n pass", "def process_item(self, item, spider):\n session = self.Session()\n article = Article()\n restaurant = Restaurant()\n\n # populate article\n article.url = item['article_url']\n article.title = item['article_title']\n article.datetime = item['article_datetime']\n \n # populate restaurant\n restaurant.name = item['restaurant_name']\n restaurant.slug = item['restaurant_slug']\n restaurant.address = item['restaurant_address']\n restaurant.googlemaps_url = item['restaurant_googlemaps']\n restaurant.googlemaps_id = parse_googlemaps_id(restaurant.googlemaps_url)\n restaurant.lat = parse_lat(restaurant.googlemaps_url)\n restaurant.lng = parse_lng(restaurant.googlemaps_url)\n\n # determine if new article\n exist_article = session.query(Article).filter_by(url = article.url).first()\n if exist_article: \n article = exist_article\n\n # determine if new restaurant\n exist_restaurant = session.query(Restaurant).filter_by(slug = restaurant.slug).first()\n if exist_restaurant: \n restaurant = exist_restaurant\n if article not in restaurant.articles: \n restaurant.articles.append(article)\n else:\n # geocode for lat lng if necessary\n if restaurant.googlemaps_id: \n restaurant.lat, restaurant.lng, restaurant.address = convert_id(restaurant.googlemaps_id)\n # add article to restaurant.articles\n restaurant.articles.append(article)\n\n try:\n session.add(restaurant)\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item", "def parse_items(self):", "def process_item(self, item, spider):\n session = self.Session()\n real = Reals(**item)\n\n try:\n session.add(real)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item", "def process_item(self, item, spider):\n sqlinsert = self.siteInsert.format(\n \tsite_name = pymysql.escape_string(item['site_name']),\n \tsite_url = pymysql.escape_string(item['site_url'])\n \t)\n\n self.cursor.execute(sqlinsert)\n return item\n # print(\"this is item in pipeline process_item\")\n # session = self.Session()\n # sitesdb = SitesDB()\n # print(\"this is sitesdb in pipeline process_item\")\n # sitesdb.site_name = item[\"site_name\"]\n # sitesdb.site_url = item[\"site_url\"]\n\n # try:\n # session.add(sitesdb)\n # session.commit()\n # except:\n # session.rollback()\n # raise\n # finally:\n # session.close()\n\n # return item", "def process_item(self, item, spider):\n task = SpiderTask.objects.get(id=spider.task_id)\n dj_item = Item.objects.create(task=task, **item)\n return dj_item", "def parse(self, response, **kwargs):\n category_links = response.xpath('.//a[contains(@class,\"block-brands__item-link\")]/@href').extract()\n yield from response.follow_all(category_links, self.parse_main)", "def parse_item(self, response):\n item = IphoneSpiderItem()\n\n item['sku'] = response.meta.get('sku')\n item['price'] = response.meta.get('price')\n item['name'] = response.meta.get('name')\n item['seller'] = response.meta.get('seller')\n #pass the data from parse to parse_item\n\n url = response.url\n model = response.xpath('//*[@id=\"crumb-wrap\"]/div/div[1]/div[9]/text()').extract_first()\n color = response.xpath('//div[@data-type=\"颜色\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/i/text()').extract_first()\n memory = response.xpath('//div[@data-type=\"版本\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n memory2 = response.xpath('//div[@data-type=\"内存\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n #memory data can be stored in 版本 or 内存\n\n if memory2:\n memory = memory2.strip()\n elif memory:\n memory = memory.strip()\n\n item['model'] = model\n item['color'] = color\n item['memory'] = memory\n item['url'] = url\n\n return item", "def parse_main(self, response):\n\n for i in response.xpath('//div[contains(@class,\"products-list__item\")]'):\n item = {\n \"VENDORID\": 1055,\n \"VENDOR\": 'JC SALES',\n \"ITEMNO\": i.xpath('.//span[contains(text(),\"Item No:\")]/text()').get().replace('Item No:', '').strip(),\n \"DESCRIPTION\": i.xpath('.//div[contains(@class,\"product-card__name\")]//a/text()').get(),\n \"IMAGE_URL\": i.xpath('.//div[contains(@class,\"product-card__image\")]//img[1]/@src').get(),\n \"PAGE_TITLE\": response.css('title::text').get(),\n \"PAGE_URL\": response.request.url\n }\n yield Request(response.urljoin(i.xpath('.//a[contains(@class,\"image__body\")]/@href').get()),\n self.parse_details, meta={'item': item})\n\n next_page = response.xpath('//a[text()=\">\"]/@href').get()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse_main)", "def parse_item(self, response):\n self.check_Tor_time()\n print(\"Looking\", response.url)\n # Create the loader using the response\n l = ItemLoader(item=PropertiesItem(), response=response)\n l.default_output_processor = TakeFirst()\n try:\n self.fill_from_Json(l)\n except Exception as e:\n print('exception->', e)\n print('1')\n for node in response.css('div.padding-phone-only > .padding-small-top'):\n try:\n title = node.xpath('div[1]/h6/text()').extract()\n except Exception as e:\n print 1, e\n print('title:', title)\n try:\n val = node.xpath('div[2]/text()').extract()\n except Exception as e:\n print 2, e\n try:\n if \"code\" in title[0]:\n l.add_value('unique_id', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Bedrooms\" in title[0]:\n l.add_value('property_rooms_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Construction\" in title[0]:\n l.add_value('construction_num', val[0],\n MapCompose(unicode.strip, unicode.title))\n if \"Modified\" in title[0]:\n l.add_value('on_site_date', node.xpath('div[2]/time/text()').extract()[0],\n MapCompose(\n lambda i: parse(i, fuzzy=True)))\n print(node.xpath('div[2]/time/text()').extract())\n except Exception as e:\n print 3, e\n print('2')\n # Housekeeping fields\n l.add_value('url', response.url)\n # l.add_value('spider', self.name)\n l.add_value('source', self.allowed_domains[0])\n l.add_value('imported_date', datetime.now())\n l.add_value('asset_type', 'realestate')\n l.add_value('transaction_type', 'commercial')\n tp = response.xpath(\n '//*[@id=\\\"breadCrumbs\\\"]/a[1]/text()').extract()[0]\n print('3')\n if \"Sales\" in tp:\n l.replace_value('property_buy_or_rent', \"sale\")\n else:\n l.replace_value('property_buy_or_rent', \"rent\")\n if \"residential\" in tp:\n l.add_value('category_major', \"residential\")\n elif \"commercial\" in tp:\n l.add_value('category_major', \"commercial\")\n else:\n l.add_value('category_major', \"land\")\n # a = l.load_item()\n # print(a)\n # return\n print('4')\n\n print(l)\n return l.load_item()", "def open_spider(self,Spider):\n pass", "def process_item(self, item, spider):\n writer = csv.writer(self.file, delimiter = '|')\n for apartment in item[\"apartments\"]:\n row = [apartment[\"price\"], apartment[\"size\"], apartment[\"rooms\"], apartment[\"address\"], apartment[\"lat\"],\n apartment[\"lng\"], apartment[\"zone\"], apartment[\"band\"], apartment[\"east\"], apartment[\"north\"],\n apartment[\"date\"]]\n writer.writerow(row)\n self.file.flush()\n print(\"page {} processed.\".format(item[\"page\"]))\n return item", "def pytest_itemcollected(item):\n\n if item._obj.__doc__:\n item._nodeid = get_test_case_docstring(item)", "def _scrape_agenda_item(self, agenda_item_location):\n pass", "def parse_items(self, response: Response) -> RlItem:\n self.logger.info('Crawler Found Item Page: %s', response.url)\n\n # Iterate through each rocket league item and build it.\n for elem_item in response.xpath('//div[starts-with(@class, \"rlg-item__container\")]'):\n loader = RlItemLoader(item=RlItem(), selector=elem_item)\n loader.add_xpath('data_id', './/div/@data-id')\n loader.add_xpath('img_url', './/img/@src')\n loader.add_value('name', elem_item.attrib['data-name'])\n loader.add_value('category', elem_item.attrib['data-category'])\n loader.add_value('platform', elem_item.attrib['data-platform'])\n loader.add_value('rarity', elem_item.attrib['data-rarity'])\n loader.add_value('dlcpack', elem_item.attrib['data-dlcpack'])\n yield loader.load_item()", "def process_item(self, item, spider):\n # remove SQL support\n # use csv to store data\n #check whether table already exsit in pd_dict\n if item[\"table\"] not in self.pd_dict:\n #check whether csv with table name exit\n file = basePath +'/'+ item[\"table\"]+'.csv'\n if os.path.exists(file):\n df = pd.read_csv(file)\n self.pd_dict.update({item[\"table\"]: df})\n else:\n df = pd.DataFrame(columns = ['animatetitle', 'othertitle', 'cross_s','nums', 'last_title'])\n self.pd_dict.update({item[\"table\"]: df})\n\n if item['animatetitle'] not in self.pd_dict[item[\"table\"]]['animatetitle'].values:\n self.pd_dict[item[\"table\"]] = self.pd_dict[item[\"table\"]].append(\n {'animatetitle' : item['animatetitle'], 'othertitle' : item['othertitle'], 'cross_s' : item['cross'],'nums':item['nums'], 'last_title':item['last_title']}, \n ignore_index = True)\n\n return item", "def process_item(self, item, spider):\n try:\n s = self.session()\n if isinstance(item, (PomItem, type(PomItem()), type(PomItem))):\n self.store_pom(item, s)\n elif isinstance(item, (AscItem, type(AscItem()), type(AscItem))):\n self.store_asc(item, s)\n elif isinstance(item, (ArtifactItem, type(ArtifactItem()), type(ArtifactItem))):\n self.store_index(item, s)\n elif isinstance(item, LinkItem):\n pass\n else:\n logger.warning('Unknown item: %s type %s' % (item, type(item)))\n return\n\n s.commit()\n s.flush() # writes changes to DB\n s.expunge_all() # removes objects from session\n except Exception as e:\n logger.warning('Exception in storing key %s' % e)\n\n finally:\n utils.silent_close(s)\n s = None\n return item", "def _item_added(self, item):\n item.root = self.root\n item.parent = self\n item.observe('linkable_vars', self.root._update_linkable_vars)\n if isinstance(item, Sequence):\n item.observe('_last_index', self._item_last_index_updated)", "def use(self, item_name):\n pass", "def set_item(self, item):\n self.item = item", "def set_item(self, item):\n self.item = item", "def parse(self, response):\n item = NewsScraperItem()\n containers = response.xpath(\"//div[contains(@class,'largeTitle')]/article[contains(@class,\"\n \"'articleItem')]/div[contains(@class,'textDiv')]\")\n for info in containers:\n\n try:\n date = info.xpath(\".//div[contains(@class,'articleDetails')]/span[contains(@class,'date')]/text()\").extract_first()\n date = re.sub(r'\\xa0-\\xa0', '', date)\n # Convert 'minutes ago' to datetime\n date = datetime.now() - timedelta(minutes=int(re.sub(r'[^0-9]', '', date))) # Regex = Where not numeric\n item['date'] = date.strftime(\"%Y/%m/%d %H:%M:%S\")\n earn_id = re.search(r'[0-9]{4,}', info.xpath(\".//a/@onclick\").extract_first())\n item['id'] = earn_id.group()\n item['title'] = info.xpath(\".//a/text()\").extract_first()\n item['author'] = info.xpath(\".//div[contains(@class,'articleDetails')]/span/text()\").extract_first()\n item['text'] = info.xpath(\".//p/text()\").extract_first()\n item['link'] = info.xpath(\".//a/@href\").extract_first()\n yield item\n\n except:\n print(\"Unusual format detected\")\n logging.warning(\"Item skipped due to unusual format\")", "def item_read(self, item):\n self.update_item(item)", "def MouseOverItem(self,item):\r\n pass", "def Item(self) -> object:", "def Item(self) -> object:", "def _link_items(self):\n pass", "def process_item(self, item, spider):\n try:\n self.db[self.collection_name].insert_one(dict(item))\n except Exception as e:\n logger.debug(str(e))\n return item", "def on_fetched(self, doc):\n\n docs = doc[app.config[\"ITEMS\"]]\n for item in docs:\n build_custom_hateoas(\n {\n \"self\": {\n \"title\": item[\"_type\"],\n \"href\": \"/{}/{{_id}}\".format(item[\"_type\"]),\n }\n },\n item,\n )", "def process_item(self, item, spider):\n print item\n try:\n self.cursor.execute(\n \"insert into {0} (city, priceToIncomeRatio, grossRentalYieldCityCentre, grossRentalYieldOutsideOfCentre, priceToRentRatioCityCentre, priceToRentRatioOutsideOfCityCentre, mortgageAsAPercentageOfIncome, affordabilityIndex) values (?, ?, ?, ?, ?, ?, ?, ?)\".format(spider.name),\n (item['city'], item['priceToIncomeRatio'], item['grossRentalYieldCityCentre'], item['grossRentalYieldOutsideOfCentre'], item['priceToRentRatioCityCentre'], item['priceToRentRatioOutsideOfCityCentre'], item['mortgageAsAPercentageOfIncome'], item['affordabilityIndex']))\n self.connection.commit()\n except:\n ''\n #import sys\n #sys.exit()\n\n log.msg(\"Item stored : \" % item, level=log.DEBUG)\n return item", "def display_item_process(self):\n raise NotImplementedError()", "def task_item(self, grab, task):\n if self.file_exist(self.get_id(grab.doc.url)) \\\n and not config['rewrite_files']:\n logging.info(\"Item will not parse since file exists: %s.%s page:%s\"\n % (self.get_id(grab.doc.url),\n self.type_file,\n task.page)\n )\n return\n\n logging.debug(\"Begining item parsing: %s\" % grab.doc.url)\n json_info = {}\n realtime_found = None\n try:\n realtime_found = grab.doc.rex_text(\"'boatBanner'\\s*:\\s*'(.*?)',\")\n except DataNotFound:\n logging.warning(\n \"Repeat... 'boatBanner' for realtimeavibility not found in: %s\"\n % grab.doc.url\n )\n # Repeat task\n yield Task('item', url=grab.config['url'],\n task_try_count=task.task_try_count + 1)\n\n data_boat = grab.doc.select('//span[@class=\"wishlist-btn ' +\n 'js-wishlist-toggle boatview__wishlist\"]')\n try:\n json_info = json.loads(data_boat.attr('data-boat'))\n except json.decoder.JSONDecodeError:\n logging.warning(\"Json decode error for data-boat in: %s\"\n % grab.doc.url)\n # Repeat task\n yield Task('item', url=grab.config['url'],\n task_try_count=task.task_try_count + 1)\n except IndexError:\n logging.warning(\"span js-wishlist-toggle... not found in: %s\"\n % grab.doc.url)\n # Repeat task\n yield Task('item', url=grab.config['url'],\n task_try_count=task.task_try_count + 1)\n\n if len(json_info) < 1 or realtime_found is None:\n return\n\n realtime = True if realtime_found == 'realtime' else False\n info = OrderedDict()\n info['url'] = grab.doc.url\n info['title'] = self.get_title(grab)\n info['parsingdate'] = datetime.now().strftime('%H:%M %d/%m/%y')\n info['realtimeavilbility'] = realtime\n\n location = json_info['location']\n info['location'] = OrderedDict([\n ('country', location.split(', ')[0]),\n ('city', location.split(', ')[1])\n ])\n\n data = OrderedDict(info)\n\n data['year'] = self.get_year(grab)\n data['length'] = json_info['length'].replace(' ', '')\n\n guests = self.get_guests(grab, json_info)\n if guests is not None:\n data['guests'] = int(guests)\n data['type'] = grab.doc.rex_text(\"'type': '(.+?)',\")\n\n engine_value = self.get_engine(grab)\n if engine_value is not None:\n data['engine'] = engine_value\n\n sleeps = self.get_sleeps(grab)\n if sleeps is not None:\n data['sleeps'] = sleeps\n\n cabins = self.get_cabins(grab, json_info)\n if cabins is not None:\n data['cabins'] = cabins\n\n bathrooms = self.find_boatview__stats(grab, 'Bathrooms')\n if bathrooms is not None:\n data['bathrooms'] = int(bathrooms)\n else:\n logging.debug(\"Bathrooms for 'bathrooms' not found in: %s\"\n % grab.doc.url)\n\n about = self.get_about(grab)\n if about is None:\n logging.debug(\"About for 'about' not found in: %s\"\n % grab.doc.url)\n data['about'] = about if about is not None else ''\n data['photos'] = self.get_images_urls(grab)\n\n inventory = self.get_inventory(grab)\n if inventory is not None:\n data['inventory'] = inventory\n\n data['pickup'] = self.get_pickup(grab)\n\n equipment = self.get_equipment(grab)\n if len(equipment) < 1:\n logging.debug(\"equipment not found in: %s\"\n % grab.doc.url)\n else:\n data['equipment'] = equipment\n\n prices = self.get_prices(grab, 'Obligatory extras')\n optional = self.get_prices(grab, 'Optional extras')\n if prices is not None:\n data['prices'] = OrderedDict([\n ('obligatory', prices),\n ])\n if optional is not None:\n data['optional'] = optional\n\n if self.file_exist(self.get_id(grab.doc.url)) \\\n and not config['rewrite_files']:\n logging.info(\"Item will not save since file exists: %s.%s\"\n % (self.get_id(grab.doc.url), self.type_file)\n )\n return\n\n # If elements more than 10 then save results into json-format\n if len(data) > 9:\n logging.debug(\"Saving url: %s from page: %s\"\n % (grab.doc.url, task.page))\n self.save_result(\n self.get_id(grab.doc.url),\n json.dumps(data, ensure_ascii=False, indent=2)\n )\n else:\n logging.info(\n \"Data hasn't been saved. It contains less 10 objects: %s.%s\"\n % (self.get_id(grab.doc.url), self.type_file)\n )\n # Repeat task\n yield Task('item', url=grab.config['url'],\n task_try_count=task.task_try_count + 1)", "def parse(self, response):\n\n\t\t### close spider if exception\n\t\tif 'Bandwidth exceeded' in response.body:\n\t\t\traise CloseSpider('bandwidth_exceeded')\n\n\t\tlog_scrap.debug(u\"\\n>>> NEW PARSING >>>\\n\" )\n\t\tlog_scrap.info(\"--- GenericSpider.parse ...\" )\n\n\t\tlog_scrap.info(\"\\n--- GenericSpider.parse /response : \\n%s\" , response)\n\t\tlog_scrap.info(\"\\n--- GenericSpider.parse /response : \\n%s \\n\" , response.__dict__.keys() )\n\n\t\t# for k, v in response.__dict__.iteritems() :\n\t\t# \tlog_scrap.info(\"\\n--- [k] {} : [v] {} : \".format(k,v))\n\t\t# print response._body\n\t\tstart_url = response.meta[\"start_url\"]\n\t\tlog_scrap.info(\"--- GenericSpider.parse / start_url : %s\", start_url )\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start request with API crawler\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t# if self.spider_config_flat[\"parse_api\"] == True :\n\t\tif self.parse_api == True :\n\n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting request on API endpoint... \" )\n\t\t\tjsonresponse = json.loads(response.body_as_unicode())\n\t\t\t# log_scrap.info(\"--- GenericSpider.parse / jsonresponse : \\n%s\", jsonresponse )\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / jsonresponse received...\" )\n\n\t\t\traw_items_list = get_dictvalue_from_xpath(jsonresponse, self.item_xpath)\n\t\t\t# raw_items_list = jsonresponse[self.item_xpath]\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / raw_items_list[0] : \\n%s\\n...\", pformat(raw_items_list[0]) )\n\n\t\t\t### - - - - - - - - - - ###\n\t\t\t### PARSING PAGE - API\n\t\t\t### start parsing page : loop through data items in page in response\n\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / START LOOPING raw_items_list WITH API ...\" )\n\n\t\t\t\t# while self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\tprint()\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - API - item n°{} >>> \\n\".format(self.item_count) )\n\n\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\titem[ 'spider_id' ]\t= self.spider_id\n\t\t\t\t\t\titem[ 'added_by' ]\t= self.user_id\n\t\t\t\t\t\titem[ 'added_at' ]\t= time.time()\t\t# timestamp\n\t\t\t\t\t\titem[ 'link_src' ]\t= response._url\n\n\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_api_rest=True, item_n=self.item_count)\n\n\n\t\t\t\t\t\t### - - - - - - - - - - ###\n\t\t\t\t\t\t### FOLLOW LINK - API\n\t\t\t\t\t\t### if need to follow to extract all data\n\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - API - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\t# follow_link_raw = raw_data[ self.follow_xpath ]\n\t\t\t\t\t\t\tfollow_link_raw = get_dictvalue_from_xpath(raw_data, self.follow_xpath)\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW ({}) : {} \".format(type(follow_link_raw),follow_link_raw) )\n\n\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\turl_follow = self.page_url\n\n\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link_raw, url_root=url_follow)\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN : %s \", follow_link )\n\n\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\turl \t\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\tfollow_is_api = self.follow_is_api\n\n\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url, 'item_n' : self.item_count , 'parse_api' : follow_is_api })\n\n\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t### item completion is finished - yield and so spark pipeline for item (store in db for instance)\n\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t# log_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF LIMIT_ITEMS')\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t# raise CloseSpider('OUT OF ITEMS')\n\n\t\t\t### - - - - - - - - - - - - ###\n\t\t\t### NEXT PAGE - API\n\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse (API) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t### get and go to next page\n\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\turl_next = \"\"\n\t\t\t\t\tif self.api_pagination_root != \"\" :\n\t\t\t\t\t\turl_next = self.api_pagination_root\n\t\t\t\t\telse :\n\t\t\t\t\t\turl_next = self.page_url\n\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\tnext_page = url_next + str(self.page_count)\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE II : %s\", next_page )\n\n\t\t\t\t\tyield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} \".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} \".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start requests with pure Scrapy requests\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\telif self.spider_config_flat[\"parse_reactive\"] == False :\n\t\t# elif self.parse_reactive == False :\n \n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting requests with Scrapy... \" )\n\t\t\t# self.parse_scrapy(response)\n\n\t\t\t### find items list\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / self.item_xpath : %s\", self.item_xpath )\n\t\t\traw_items_list = response.xpath(self.item_xpath)\n\t\t\tlog_scrap.info(\"--- GenericSpider.parse / len(raw_items_list) : %d \", len(raw_items_list) )\n\n\n\t\t\t### - - - - - - - - - - - ###\n\t\t\t### PARSING PAGE - SCRAPY\n\t\t\t### start parsing page : loop through data items in page in response\n\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / START LOOPING raw_items_list WITH SCRAPY ...\" )\n\n\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\tprint()\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - Scrapy - item n°{} / page n°{} >>> \\n\".format(self.item_count, self.page_count) )\n\n\t\t\t\t\t\t# print \">>> raw_data : \\n\", raw_data.extract()\n\n\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\titem[ 'spider_id' ]\t\t= self.spider_id\n\t\t\t\t\t\titem[ 'added_by' ]\t\t= self.user_id\n\t\t\t\t\t\titem[ 'added_at' ]\t\t= time.time()\t\t# timestamp\n\t\t\t\t\t\titem[ 'link_src' ]\t\t= response._url\n\n\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, item_n=self.item_count)\n\n\n\t\t\t\t\t\t### - - - - - - - - - - - ###\n\t\t\t\t\t\t### FOLLOW LINK - SCRAPY\n\t\t\t\t\t\t### if need to follow to extract all data\n\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - SCRAPY - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\tfollow_link \t= raw_data.xpath( self.follow_xpath ).extract_first()\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW ({}) : {} \".format(type(follow_link),follow_link) )\n\n\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\n\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link, url_root=url_follow)\n\t\t\t\t\t\t\t# log_scrap.info(\" --> follow_link CLEAN : %s \", follow_link )\n\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN ({}) : {} \".format(type(follow_link),follow_link) )\n\n\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\turl \t\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOWING LINK --> url : {} \".format(url) )\n\t\t\t\t\t\t\t\t# yield Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url } )\n\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False} )\n\t\t\t\t\t\t\t\t# log_scrap.warning(u\">>> FOLLOWING LINK --> url : {} / WORKED !!! \".format(url) )\n\n\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOW LINK - NOT WORKING : {} \".format(url) )\n\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\telse :\n\n\t\t\t\t\t\t\tlog_scrap.warning(u\">>> NO FOLLOW LINK ... \" )\n\t\t\t\t\t\t\t### item completion is finished - yield and so spark pipeline for item (store in db for instance)\n\t\t\t\t\t\t\t# log_scrap.info(\">>> GenericSpider.parse - item.items() : \\n %s\", item.items() )\n\t\t\t\t\t\t\t# log_scrap.info(\">>> GenericSpider.parse - item.keys() : \\n %s\", item.items() )\n\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t# print (\"\\n>>> NEXT ITEM \" + \">>> >>> \"*10, \"\\n\")\n\n\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF LIMIT_ITEMS')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF ITEMS')\n\n\t\t\t### - - - - - - - - - - ###\n\t\t\t### NEXT PAGE - SCRAPY\n\t\t\t### check if there is a test_limit\n\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse (Scrapy) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t### get and go to next page\n\t\t\t\t\tis_next_page, next_page = self.get_next_page(response, start_url)\n\n\t\t\t\t\tif is_next_page :\n\n\t\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\t\turl_next = \"\"\n\t\t\t\t\t\tif self.api_pagination_root != \"\" :\n\t\t\t\t\t\t\turl_next = self.api_pagination_root\n\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE I : %s\", next_page )\n\t\t\t\t\t\tnext_page = self.clean_link(next_page, url_root=url_next)\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider.parse >>> NEXT PAGE II : %s\", next_page )\n\n\t\t\t\t\t\tyield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / NO MORE PAGE TO SCRAP - pages count : {} \".format(self.page_count) )\n\t\t\t\t\t\t# raise CloseSpider('NO MORE PAGE TO SCRAP')\n\n\t\t\t\telse :\n\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\t\t\telse :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t# self.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\n\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\t### start requests with Selenium\n\t\t### - - - - - - - - - - - - - - - - - - - - - - - ###\n\t\telse :\n\t\t\t### initiate selenium browser\n\t\t\t### cf : https://github.com/voliveirajr/seleniumcrawler/blob/master/seleniumcrawler/spiders/seleniumcrawler_spider.py\n\t\t\tlog_scrap.info(\"\\n--- GenericSpider.parse / starting Selenium driver... \" )\n\n\t\t\t# retrieve exec path for chromedriver from settings_scrapy.py\n\t\t\t### GET APP MODE FROM ENV VARS\n\t\t\tapp_mode \t\t\t\t\t\t= os.environ.get('APP_MODE', 'default')\n\t\t\tlog_scrap.debug(u\"--- GenericSpider.parse / APP_MODE : %s\", app_mode)\n\t\t\tchromedriver_path \t= CHROMEDRIVER_PATH_LIST[ app_mode ]\n\t\t\tlog_scrap.debug(u\"--- GenericSpider.parse / chromedriver_path : %s\", chromedriver_path)\n\n\t\t\t### specify executable path to launch webdriver-->\n\t\t\t# cf : where chromedriver was installed when `brew install chromedriver`\n\t\t\tself.driver = webdriver.Chrome(executable_path=chromedriver_path, chrome_options=options_selenium)\n\t\t\t# self.driver = webdriver.Chrome(chrome_options=options_selenium)\n\t\t\t# self.driver = webdriver.Firefox()\n\t\t\t# self.driver = webdriver.Chrome()\n\t\t\t# self.driver = webdriver.PhantomJS() ### deprecated\n\n\t\t\t### setup waiting times\n\t\t\t# self.driver.set_page_load_timeout(60)\n\t\t\tself.wait_driver\t= WebDriverWait(self.driver, self.delay_driver)\n\t\t\tself.wait_page \t\t= WebDriverWait(self.driver, self.delay_new_page)\n\t\t\tself.driver.implicitly_wait(self.delay_implicit)\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_driver : %s\", self.delay_driver )\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_new_page : %s\", self.delay_new_page )\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / self.delay_implicit : %s\", self.delay_implicit )\n\n\n\t\t\t### start parsing with selenium\n\t\t\tlog_scrap.debug(u\"--- GenericSpider. / response._url : %s\", response._url )\n\t\t\ttry :\n\t\t\t\tself.driver.get(response._url)\n\n\t\t\t\t### try scroll_down if needed in config\n\t\t\t\tif self.spider_config_flat['scroll_down'] : \n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / scroll_down is TRUE ... \" )\n\t\t\t\t\t# log_scrap.debug(u\"--- GenericsSpider. / scroll_down - self.spider_config_flat : \\n%s\", pformat(self.spider_config_flat) )\n\n\t\t\t\t\tscroll_pause_time = self.spider_config_flat[\"scroll_pause_time\"]\n\t\t\t\t\tmax_loops \t\t\t\t= self.spider_config_flat[\"scroll_loops\"]\n\t\t\t\t\tself.driver = scroll_down(self.driver, scroll_pause_time, max_loops)\n\t\t\t\t\t# scroll_down(self.driver, scroll_pause_time, max_loops)\n\t\t\t\tlog_scrap.info(\"--- GenericSpider. / url '{}' is loaded ... \".format( response._url ))\n\t\t\t\n\t\t\texcept :\n\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\tself.driver.close()\n\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\traise CloseSpider('DRIVER NOT RESPONDING')\n\n\n\t\t\t### clean original xpath from strings\n\t\t\tstrings_to_clean = [\n\t\t\t\t'/@src',\n\t\t\t\t'/@href',\n\t\t\t\t'/text()',\n\t\t\t\t'/@*[name()=\"xlink:href\"]',\n\t\t\t\t'/@datetime'\n\t\t\t]\n\n\t\t\t# while self.there_is_more_items_to_scrap :\n\t\t\twhile self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / while loop continues : %s\", self.there_is_more_items_to_scrap )\n\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / while loop continues : %s\", self.there_is_more_items_to_scrap_dict[start_url] )\n\n\t\t\t\ttry :\n\n\t\t\t\t\t### wait / debug page content\n\t\t\t\t\tpage_source_code = self.driver.page_source.encode(\"utf-8\")\n\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / page_source_code : \\n %s \", page_source_code )\n\t\t\t\t\ttime.sleep(self.delay_new_page)\n\n\t\t\t\t\t### start parsing page :\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.item_xpath : %s\", self.item_xpath )\n\t\t\t\t\traw_items_list \t= self.driver.find_elements_by_xpath(self.item_xpath)\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / raw_items_list length : %s\", len(raw_items_list) )\n\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / raw_items_list[0].text : \\n%s\", raw_items_list[0].text )\n\n\t\t\t\t\t# current_item_index = 0\n\n\t\t\t\t\t### - - - - - - - - - - - - ###\n\t\t\t\t\t### PARSING PAGE - SELENIUM\n\t\t\t\t\t# loop through data items in page in response\n\t\t\t\t\tif len(raw_items_list) != 0 :\n\n\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / START PARSING WITH SELENIUM ...\\n\" )\n\n\t\t\t\t\t\tfor raw_data in raw_items_list :\n\n\t\t\t\t\t\t\tprint()\n\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / START LOOPING raw_items_list WITH SELENIUM ...\" )\n\n\t\t\t\t\t\t\t### add +1 to items count\n\t\t\t\t\t\t\tself.item_count += 1\n\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} - there_is_more_items_to_scrap_dict[start_url] : {} \".format(str(self.spider_name), self.item_count, self.there_is_more_items_to_scrap_dict[start_url]) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} \".format(self.spider_name, self.item_count) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - item n°{} \".format(self.item_count) )\n\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / VARIABLES - spider_name : '%s' - item n°%s \" %(self.spider_name, self.item_count) )\n\n\t\t\t\t\t\t\t### check if can continue depending on item_count\n\t\t\t\t\t\t\tif self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - current start_url : {} >>>\".format(start_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEW ITEM - Selenium - item n°{} / page n°{} >>> \\n\".format(self.item_count, self.page_count) )\n\n\t\t\t\t\t\t\t\t### instantiate Item to fill from datamodel --> cf items.py\n\t\t\t\t\t\t\t\titemclass \t= create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )\n\t\t\t\t\t\t\t\titem \t\t= itemclass()\n\n\t\t\t\t\t\t\t\t### add global info to item : i.e. core fields in dm_core_item_related list\n\t\t\t\t\t\t\t\titem[ 'spider_id' ]\t\t= self.spider_id\n\t\t\t\t\t\t\t\titem[ 'added_by' ]\t\t= self.user_id\n\t\t\t\t\t\t\t\titem[ 'added_at' ]\t\t= time.time()\t\t# timestamp\n\t\t\t\t\t\t\t\titem[ 'link_src' ]\t\t= response._url\n\n\t\t\t\t\t\t\t\titem[ 'page_n' ]\t\t= self.page_count\n\t\t\t\t\t\t\t\titem[ 'item_n' ]\t\t= self.item_count\n\n\t\t\t\t\t\t\t\t### extract data and feed it to the Item instance based on spider_config_flat\n\t\t\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )\n\n\t\t\t\t\t\t\t\t### - - - - - - - - - - ###\n\t\t\t\t\t\t\t\t### FOLLOW LINK - SELENIUM\n\t\t\t\t\t\t\t\t### find follow link to open detailled item view\n\t\t\t\t\t\t\t\tif self.spider_config_flat[\"parse_follow\"] == True :\n\n\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> FOLLOW LINK - SELENIUM - item n°{} / page n°{} >>>>>> \\n\".format(self.item_count, self.page_count) )\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_xpath : %s\", self.follow_xpath )\n\n\t\t\t\t\t\t\t\t\t### follow link with Scrapy\n\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / follow link with Scrapy ...\" )\n\n\t\t\t\t\t\t\t\t\t\t# log_scrap.debug(u\"--- GenericSpider. / get href of follow_link ...\" )\n\t\t\t\t\t\t\t\t\t\tfollow_link_xpath \t= clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link_xpath : %s \", follow_link_xpath )\n\n\t\t\t\t\t\t\t\t\t\tfollow_link\t\t\t= raw_data.find_element_by_xpath( follow_link_xpath ).get_attribute('href')\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link RAW : %s \", follow_link )\n\n\t\t\t\t\t\t\t\t\t\turl_follow = \"\"\n\t\t\t\t\t\t\t\t\t\tif self.api_follow_root != \"\" :\n\t\t\t\t\t\t\t\t\t\t\t\turl_follow = self.api_follow_root\n\n\t\t\t\t\t\t\t\t\t\t# complete follow link if needed\n\t\t\t\t\t\t\t\t\t\tfollow_link = self.clean_link(follow_link, url_root=url_follow)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\" --> follow_link CLEAN ({}) : {}\".format(type(follow_link), follow_link ) )\n\n\t\t\t\t\t\t\t\t\t\t# store follow_link\n\t\t\t\t\t\t\t\t\t\titem[ 'link_data' ]\t= follow_link\n\t\t\t\t\t\t\t\t\t\turl\t\t\t= item['link_data']\n\n\t\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOWING LINK --> url : {} \".format(url) )\n\t\t\t\t\t\t\t\t\t\t\tyield scrapy.Request(url, callback=self.parse_detailed_page, meta={'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False})\n\n\t\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.warning(u\">>> FOLLOW LINK - NOT WORKING : {} \".format(url) )\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\n\t\t\t\t\t\t\t\t\t### follow link with Selenium\n\t\t\t\t\t\t\t\t\t### FIND A WEBSITE TEST FOR REACTIVE DETAILLED PAGES\n\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / follow link with Selenium ...\" )\n\n\t\t\t\t\t\t\t\t\t\tfollow_link_xpath \t= clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)\n\t\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.follow_link_xpath : %s\", self.follow_link_xpath )\n\t\t\t\t\t\t\t\t\t\tfollow_link \t\t= raw_data.find_element_by_xpath( follow_link_xpath )\n\n\t\t\t\t\t\t\t\t\t\t### open link in new tab ?\n\t\t\t\t\t\t\t\t\t\tfollow_link.click()\n\n\t\t\t\t\t\t\t\t\t\t### get data and save data\n\t\t\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- GenericSpider. / get data and save data ...\" )\n\t\t\t\t\t\t\t\t\t\t\titem = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )\n\n\t\t\t\t\t\t\t\t\t\t\t### back to previous page and scrap from where it left\n\t\t\t\t\t\t\t\t\t\t\t### cf : https://selenium-python.readthedocs.io/navigating.html#navigation-history-and-location\n\t\t\t\t\t\t\t\t\t\t\tself.driver.back()\n\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t### if no follow link\n\t\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t\tyield item\n\n\t\t\t\t\t\t\t\t# log_scrap.info(\" --> item : \\n %s \\n\", pformat(item) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\" --> item ...\" )\n\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t\t\traise CloseSpider('OUT OF LIMIT_ITEMS')\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse :\n\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF ITEMS - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\t### - - - - - - - - - - - - ###\n\t\t\t\t\t### NEXT PAGE - SELENIUM\n\t\t\t\t\tif self.test_limit == None or self.page_count < self.test_limit :\n\n\t\t\t\t\t\tif self.there_is_more_items_to_scrap_dict[start_url] :\n\n\t\t\t\t\t\t\tif self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :\n\n\t\t\t\t\t\t\t\tprint ()\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_name : '%s' >>>\" %(self.spider_name) )\n\t\t\t\t\t\t\t\tlog_scrap.info(\" --- GenericSpider.parse (Selenium) >>> PAGE n°{} DONE -> NEXT PAGE >>> \\n\".format(self.page_count) )\n\n\t\t\t\t\t\t\t\t### add +1 to parsed pages\n\t\t\t\t\t\t\t\tself.page_count += 1\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - spider_page_url : {} >>>\".format(self.spider_page_url) )\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\">>> NEXT PAGE - current start_url : {} >>>\".format(start_url) )\n\n\t\t\t\t\t\t\t\t### find next page btn in current view\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / self.next_page : %s\", self.next_page )\n\t\t\t\t\t\t\t\tnext_page_xpath = clean_xpath_for_reactive(self.next_page, strings_to_clean)\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page_xpath : %s\", next_page_xpath )\n\t\t\t\t\t\t\t\t# next_page \t= re.sub(\"|\".join(strings_to_clean), \"\", next_page )\n\n\t\t\t\t\t\t\t\t# try :\n\t\t\t\t\t\t\t\t# element_present = EC.presence_of_element_located((By.XPATH, next_page_xpath ))\n\t\t\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / next_page present : %s\", element_present )\n\t\t\t\t\t\t\t\t# self.wait.until(element_present)\n\t\t\t\t\t\t\t\t# next_page = self.wait.until( EC.element_to_be_clickable(element_present) )\n\t\t\t\t\t\t\t\t# next_page \t\t= self.driver.find_element_by_xpath( next_page_xpath )\n\t\t\t\t\t\t\t\tnext_page \t\t= self.driver.find_element(By.XPATH, next_page_xpath )\n\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page : %s\", next_page )\n\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page.text : %s\", next_page.text )\n\n\t\t\t\t\t\t\t\t# except TimeoutException:\n\t\t\t\t\t\t\t\t# except :\n\t\t\t\t\t\t\t\t# \tlog_scrap.error(\"--- GenericSpider. / Timed out waiting for page to load\")\n\n\t\t\t\t\t\t\t\t### click next button and wait for ajax calls to complete (post and get)\n\t\t\t\t\t\t\t\t### cf : http://www.obeythetestinggoat.com/how-to-get-selenium-to-wait-for-page-load-after-a-click.html\n\n\t\t\t\t\t\t\t\t# def wait_for(condition_function):\n\t\t\t\t\t\t\t\t# \t\tstart_time = time.time()\n\t\t\t\t\t\t\t\t# \twhile time.time() < start_time + 3:\n\t\t\t\t\t\t\t\t# \t\tif condition_function():\n\t\t\t\t\t\t\t\t# \t\t\treturn True\n\t\t\t\t\t\t\t\t# \t\telse:\n\t\t\t\t\t\t\t\t# \t\t\ttime.sleep(0.1)\n\t\t\t\t\t\t\t\t# \traise Exception ('Timeout waiting for {}'.format(condition_function.__name__) )\n\n\t\t\t\t\t\t\t\t# def link_has_gone_stale():\n\t\t\t\t\t\t\t\t# \t\ttry:\n\t\t\t\t\t\t\t\t# \t\t# poll the link with an arbitrary call\n\t\t\t\t\t\t\t\t# \t\tnext_page.find_elements_by_xpath(self.item_xpath)\n\t\t\t\t\t\t\t\t# \t\treturn False\n\t\t\t\t\t\t\t\t# \texcept StaleElementReferenceException :\n\t\t\t\t\t\t\t\t# \t\treturn True\n\n\t\t\t\t\t\t\t\tlog_scrap.debug(u\"--- ... ---\")\n\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / next_page.click() \" )\n\t\t\t\t\t\t\t\t\tnext_page.click()\n\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\t# log_scrap.info(\"--- GenericSpider. / next_page.send_keys( \\ n )\" )\n\t\t\t\t\t\t\t\t\t# next_page.send_keys(\"\\n\")\n\t\t\t\t\t\t\t\t\t# added this step for compatibility of scrolling to the view\n\t\t\t\t\t\t\t\t\tlog_scrap.error(\"--- GenericSpider. / ALTERNATIVE next_page.click() \" )\n\t\t\t\t\t\t\t\t\t# self.driver.execute_script(\"return arguments[0].scrollIntoView();\", next_page)\n\t\t\t\t\t\t\t\t\t# next_page.click()\n\t\t\t\t\t\t\t\t\tself.driver.execute_script(\"arguments[0].click();\", next_page)\n\n\t\t\t\t\t\t\t\t### wait after click\n\t\t\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider. / wait for ajax to finish... \" )\n\t\t\t\t\t\t\t\t\t# wait_for(link_has_gone_stale)\n\t\t\t\t\t\t\t\t\tself.wait_page.until(lambda driver: self.driver.execute_script('return jQuery.active') == 0)\n\t\t\t\t\t\t\t\t\tself.wait_page.until(lambda driver: self.driver.execute_script('return document.readyState') == 'complete')\n\t\t\t\t\t\t\t\t\t# time.sleep(self.delay_implicit)\n\t\t\t\t\t\t\t\t\ttime.sleep(self.delay_new_page)\n\t\t\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\t\t\tlog_scrap.error(\"--- GenericSpider. / !!! FAIL / wait for ajax to finish... \" )\n\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF PAGES TO SCRAP - page n°{} / except -> break\".format(self.page_count) )\n\t\t\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\t\t\traise CloseSpider('OUT OF PAGES TO SCRAP')\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# self.there_is_more_items_to_scrap = False\n\t\t\t\t\t\tself.there_is_more_items_to_scrap_dict[start_url] = False\n\t\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break\".format(self.page_count, self.settings_limit_pages, self.test_limit) )\n\t\t\t\t\t\tself.driver.close()\n\t\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\t\t# raise CloseSpider('OUT OF TEST_LIMIT')\n\t\t\t\t\t\tbreak\n\n\t\t\t\texcept :\n\t\t\t\t\tlog_scrap.warning(u\"--- GenericSpider. / NO MORE ITEMS TO SCRAP - item_count : {} - LIMIT_ITEMS : {} / except -> break\".format(self.item_count, self.LIMIT_ITEMS) )\n\t\t\t\t\tself.driver.close()\n\t\t\t\t\tlog_scrap.info(\"--- GenericSpider / driver is shut\" )\n\t\t\t\t\traise CloseSpider('NO MORE ITEMS TO SCRAP')\n\t\t\t\t\tbreak", "def process_item(self, item, spider):\n if item is None:\n raise DropItem(\"Something went wrong in parsing data...\")\n try:\n self.curr.execute(\n SqlStatements.insert_new_real_estate(),\n (\n item['listing_type'],\n item['property_type'], \n item['price'], \n item['location_city'], \n item['location_city_district'], \n item['area_property'],\n item['area_land'],\n item['construction_type'],\n item['num_floors_building'],\n item['apartment_floor'],\n item['registered'],\n item['heating_type'],\n item['num_rooms'],\n item['num_bathrooms'],\n item['source']\n )\n )\n self.conn.commit()\n except Exception as e:\n print(e)\n self.conn.rollback()\n return item\n self._log_progress()\n return item", "def accept (self, item):\n\t\treturn 1", "def process_item(self, item, spider):\n tmp_dict = {}\n tmp_dict['comments'] = item['comments']\n tmp_dict['referenceName'] = item['referenceName']\n tmp_dict['referenceTime'] = item['referenceTime']\n tmp_dict['productColor'] = item['productColor']\n tmp_dict['productSize'] = item['productSize']\n self.savefile.write(u\"{0}\\n\".format(json.dumps(tmp_dict)))\n #raise DropItem()", "def item_starred(self, item):\n self.update_item(item)", "def LookOn(play, item):\r\n\tspk(\"You start perusing the items on %s\" % item.name)\r\n\tif item.items != []:\r\n\t\tlookoner(play, item)\r\n\telse:\r\n\t\tspk(\"Nothing\")", "def process_item(self, item, spider):\n\n url = item['url']\n iso_code = item['iso_code']\n result = self.item_data_store.get_item(url, iso_code)\n\n if result.data is not None:\n raise DropItem(\n f'Resource already indexed for language {iso_code}: {url}')\n\n create_result = self.item_data_store.create_item(item)\n\n if create_result.has_error():\n self.logger.error('\\n'.join(create_result.messages))\n\n return item", "def from_crawler(cls, crawler, *args, **kwargs):\n\n spider = super(telecom_vivo_movel_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider", "def get_final_result(self, spider):\n\n # stop crawling after yeild_item called\n if not self.result_received:\n # push to webhook\n if self.screenshots_ids:\n self.result['__screenshots_ids__'] = self.screenshots_ids\n self.data = {\n 'scrape_id': self.scrape_id,\n 'scraper_name': self.name,\n 'files_count': self.files_count,\n 'screenshots_count': self.screenshots_count,\n 'cnpj': self.cnpj}\n self.data.update({'result': self.result})\n if self.errors:\n self.data.update({'errors': self.unique_list(self.errors)})\n webhook_file_path = os.path.join(\n path, \"downloads\", self.scrape_id, '{renavam}-data_collected.json'.format(\n renavam=self.renavam))\n self.data_collected(self.data, webhook_file_path)\n # return item for scrapinghub\n self.result_received = True\n req = Request(self.start_url, callback=self.yield_item,\n errback=self.yield_item, dont_filter=True)\n self.crawler.engine.crawl(req, spider)", "def parse(self, response):\n self.driver.get(response.url)\n product_category=response.meta[\"category_text\"]\n products=response.xpath(\"//*[(@class='list-item')]\")\n \n # item containers for storing product\n items = CrawlingECommerceItem()\n \n # iterating over search results\n # for product in products:\n # # Defining the XPaths\n # XPATH_PRODUCT_LINK=\".//*[contains(concat( ' ', @class, ' ' ), concat( ' ', 'goods-tit', ' ' ))]//a\"\n # XPATH_PRODUCT_NAME=\".//div[@class='goods-introudce']//a/@href\"\n # XPATH_PRODUCT_PRICE=\".//div[@class='catalog-detail']//div[@class='detail-right']//p/text()\"\n # XPATH_PRODUCT_IMAGE_LINK=\".//img\"\n\n # raw_product_name=product.xpath(XPATH_PRODUCT_NAME).get()\n # raw_product_price=product.xpath(XPATH_PRODUCT_PRICE).get()\n # raw_product_image_link=product.xpath(XPATH_PRODUCT_IMAGE_LINK).extract()\n # raw_product_link=product.xpath(XPATH_PRODUCT_LINK).get()\n\n # # cleaning the data\n # product_name=''.join(raw_product_name).strip(\n # ) if raw_product_name else None\n # product_price=''.join(raw_product_price).strip(\n # ) if raw_product_price else None\n # product_image_link=''.join(raw_product_image_link).strip(\n # ) if raw_product_image_link else None\n # product_link=''.join(raw_product_link).strip(\n # ) if raw_product_link else None\n\n # # storing item\n # yield CrawlingECommerceItem (\n # product_name=product_name,\n # product_price=product_price,\n # product_url=product_link,\n # product_category=product_category,\n # image_urls=raw_product_image_link\n # )\n\n # # yield items\n \n # XPATH_PRAGINATION_LINK=\"//*[(@class='next right')]/a/@href\"\n\n yield response.follow(str(response.request.url), callback = self.parse, meta = {\"category_text\": product_category})", "def parse(self, response):\n for sel in response.xpath('//*[@id=\"J_goodsList\"]/ul/li[@class=\"gl-item\"]'):\n \"\"\"iterate all items in this page\"\"\"\n sku = sel.xpath('.//@data-sku').extract_first()\n price = float(sel.xpath('.//div/div[3]/strong/i/text()').extract_first())\n name = ''.join(sel.xpath('.//div/div[4]/a/em/descendant-or-self::node()/text()').extract())\n seller = sel.xpath('.//div/div[7]/span/a/text()').extract_first()\n sku_url = \"http:\" + sel.xpath('.//div/div[1]/a/@href').extract_first()\n\n yield Request(sku_url,\n callback=self.parse_item,\n meta = {'sku' : sku,\n 'price' : price,\n 'name' : name,\n 'seller' : seller})\n #make the request of individual page", "def parse(self, response):\n # DEBUGGING SCRAPY: Logging in Scrapy\n # logger = logging.getLogger()\n # logger.warning(\"Your Message here\")\n # logger.debug(\"Your Message here\")\n # Inside the spider we can eaily use:\n # self.logger.info('Your Message here')\n\n l = ItemLoader(item=GplayItem(), response=response)\n \n l.add_xpath('App_name', '//*[@itemprop=\"name\"]/span/text()')\n \n l.add_xpath('Package_name', '//meta[@property=\"og:url\"]/@content')\n \n l.add_xpath('Genre', '//*[@itemprop=\"genre\"]/text()')\n \n l.add_xpath('Price', '//*[@itemprop=\"price\"]/@content')\n\n l.add_xpath('Editor_choice', '//*[@itemprop=\"editorsChoiceBadgeUrl\"]')\n \n l.add_xpath('Developer', '//span/a[@itemprop=\"genre\"][1]/../../span[1]/a/text()')\n \n l.add_xpath('Developer_URL', '//span/a[@itemprop=\"genre\"][1]/../../span[1]/a/@href')\n \n l.add_xpath('Content_rating', '//div[contains(text(),\"Content Rating\")]/following-sibling::span/div/span/div[1]/text()')\n \n l.add_xpath('Promotion', '//div[contains(text(), \"Contains Ads\") or contains(text(), \"Offers in-app purchases\")]/text()')\n\n l.add_xpath('Description','//content/div/text()')\n\n l.add_xpath('App_rate', '//div[contains(@aria-label,\"stars out of five stars\")]/text()')\n \n l.add_xpath('Reviewers_count', '(//span[contains(@aria-label,\"ratings\")])[1]/text()')\n\n l.add_xpath('Developer_location', '//div/span/div/span/div/a[text()=\"Privacy Policy\"]/../following-sibling::div/text()')\n \n # There are five grades in star for rating apps\n for i in range(5, 0,-1):\n l.add_xpath('Rate_'+str(i)+'star',\n '//div/div/span[text()='+str(i)+']/following-sibling::span/@title')\n \n # Batch scraping fields at the bottom of the app page\n info_fields = {\"Update_date\":\"Updated\", \"Filesize\":\"Size\", \"Install_count\":\"Installs\", \n \"Version\":\"Current Version\", \"Android_version\":\"Requires Android\",\n \"Inapp_products\":\"In-app Products\", \"Offered_by\":\"Offered By\"}\n \n for field_name, xpath_name in info_fields.items():\n l.add_xpath(field_name,\n '//div/div/div/div[text()=\"'+str(xpath_name)+'\"]/following-sibling::span/div/span/text()')\n \n ## Filling housekeeping fields\n l.add_value('URL', response.url)\n l.add_value('Scraper_project', self.settings.get('BOT_NAME'))\n l.add_value('Scraper_spider', self.name)\n l.add_value('Scraper_server', socket.gethostname())\n l.add_value('Scraper_date', str(datetime.datetime.now()))\n \n return l.load_item()", "def from_crawler(cls, crawler, *args, **kwargs):\n\n spider = super(autos_detran_terceiros_sp_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider", "def next(self, item: Any):\n self.check()\n with self.lock:\n self.connection.transport.send(StreamItemMessage(\n self.invocation_id,\n item))", "def on_item(self, ch, method, header, body):\n try:\n # Get the item from the playlist store\n item = self.playlist_store.find_one({'_id': ObjectId(body)})\n print \" [x] Received %r\" % (item['track']['track']['name'],)\n \n except:\n print \" [x] Not found: %r\" % (body,)\n \n else:\n # Add item to our list\n self.items.append(item)\n \n # Mark item as 'queued'\n item['status'] = 'queued'\n self.playlist_store.update({'_id': item['_id']}, item)\n \n # If no items 'sent' or 'playing', broadcast next item in queue\n self.send()\n \n # Acknowledge\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def parse(self, response):\n\n product_page_links = response.css('.detailsLink')\n yield from response.follow_all(product_page_links, self.parse_item)\n\n pagination_links = response.css('span.fleft a')\n yield from response.follow_all(pagination_links, self.parse)", "def get_next_item(self):\n pass", "def __init__(self, item, next = None):\n self.item = item\n self.next = next", "def parse(self, response):\n for link in response.css(\".event-entry .event-title a::attr(href)\").extract():\n yield scrapy.Request(\n response.urljoin(link), callback=self.parse_event_page, dont_filter=True\n )", "def get_final_result(self, spider):\n\n # stop crawling after yeild_item called\n if not self.result_received:\n # push to webhook\n self.data = {\n 'scrape_id': self.scrape_id,\n 'scraper_name': self.name,\n 'files_count': self.files_count,\n 'screenshots_count': self.screenshots_count,\n 'cnpj': self.cnpj}\n self.data.update({'result': self.result})\n if self.errors:\n self.data.update({'errors': self.unique_list(self.errors)})\n webhook_file_path = os.path.join(\n path, \"downloads\", self.scrape_id,\n '{cnpj}-data_collected.json'.format(cnpj=self.cpf_cnpj))\n self.data_collected(self.data, webhook_file_path)\n # return item for scrapinghub\n self.result_received = True\n req = Request(self.start_url,\n callback=self.yield_item,\n errback=self.yield_item, dont_filter=True)\n self.crawler.engine.crawl(req, spider)", "def __call__(self):\n yield from self", "def parse(self, response):\n\n #下面这种写法使用生成器方式比较好\n \"\"\" items = []\n for i in response.css('div.quote'):\n item = ScrapequoteItem()\n item['tag'] = i.css('span.text[itemprop]::text').get()\n item['author'] = i.css('small.author::text').get()\n items.append(item)\n return items \"\"\"\n\n for i in response.css('div.quote'):\n item = ScrapequoteItem()\n item['tag'] = i.css('span.text[itemprop]::text').get()\n item['author'] = i.css('small.author::text').get()\n yield item\n\n #以下循环获取其他页面\n next_page = response.css('li.next a::attr(href)').get()\n if next_page is not None:\n yield response.follow(next_page, callback=self.parse) #返回一个Request instance", "def parse(self, response):\n product_urls = response.css('.item .name a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n next_page_url = response.css('.LinkNext a::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def process_item_data(self, db, ref, response):\n raise Exception(\"To be implemented\")", "def items():", "def items(self, course):\r\n pass", "def process_item(self, item, spider):\n\t\tif isinstance(item, DatabaseItem):\n\t\t\t# run db query in thread pool\n\t\t\tquery = self.dbpool.runInteraction(self._conditional_op, item)\n\t\t\tquery.addErrback(self._database_error, item)\n\n\t\treturn item", "def get(self, item):\r\n raise NotImplementedError", "def parse(self, response):\n next_selector = response.xpath('//div//li/a[@id=\"quotes_content_left_lb_NextPage\"]/@href')\n ticker = re.findall('symbol/(.+?)/', response.url)[0]\n\n for url in next_selector.extract():\n yield Request(url, callback = self.parse)\n \n links = response.xpath('//div//span[@class=\"fontS14px\"]/a/@href').extract()\n for link in links:\n # meta is passed along with the response into the spider\n # allowing it to access what ticker it's using\n yield Request(link, callback = self.parse_articles, meta = {'ticker': ticker})", "def select(self,item):\r\n pass", "def __init__(self, spider):\n\n super(SprintBehavior, self).__init__(spider)\n self.remoteContext = spider.remoteController.context", "def _apply_item(self, item: Item) -> bool:\n return False", "def SetItemVisited(self, item, visited=True):\r\n\r\n item.SetVisited(visited)\r\n self.RefreshLine(item)", "def processItem(self):\r\n self.extract()\r\n self.mergeLanguageClaims()\r\n self.validateClaims()\r\n self.mergeWithWikidata()\r\n self.writeToWikidata()\r\n self.log()", "def handle_item(self, item):\n sqs = SearchQuerySet().models(self.model)\n mlt_results = sqs.more_like_this(item)\n saved = []\n for result in mlt_results: # type: SearchResult\n\n rel = self.model_relation(score=result.score)\n rel.set_relation(seed_id=item.pk, related_id=result.pk)\n rel.save()\n\n logger.debug('Saved %s' % rel)\n\n saved.append(rel)\n\n return saved", "def HandleHyperLink(self, item):\r\n\r\n if self.IsItemHyperText(item):\r\n event = TreeEvent(wxEVT_TREE_ITEM_HYPERLINK, self.GetId())\r\n event._item = item\r\n self.GetEventHandler().ProcessEvent(event)", "def match(self, item):", "def give(self, item: Item, count: int):\n pass", "def receive_item(self, item):\n self.inventory.append(item)\n events.trigger_event(\"print_message\", \"Picked up {0}\".format(item))", "def item_info(self, item_info):\n\n self._item_info = item_info", "def spider_idle(self):\n count = 10\n while self.set_artist and count:\n count -= 1\n url = self.set_artist.pop()\n request = Request(url, dont_filter=True, callback=self.get_info)\n self.crawler.engine.crawl(request, self)", "def Tap(self, item):\n _ = item\n return True", "def __getitem__(self, item):\n pass", "def __getitem__(self, item):\n pass", "def spider(self):\n for key in self._site:\n if isinstance(self._site[key], Site):\n for fragment in self._site[key].spider():\n yield self._join(key, fragment)\n else:\n yield key", "def _handle_error(self, failure, item, spider):\n self.logger.error(\"失败原因:{}, 失败对象{}\".format(failure, item))", "def __call__(self, *paths):\n\n for item in self.site.items:\n if item.is_page() and item.match(*paths):\n yield item", "def items(self):", "def process_item(self, item, spider):\n session = self.Session()\n sales = item['sales']\n values = item['values']\n del item['sales']\n del item['values']\n property = Property(**item)\n\n try:\n session.add(property)\n # flush to obtain the id of property to be used as the foreign key\n session.flush()\n\n for sale in sales:\n sale['property_id'] = property.id\n session.add(PropertyTransfer(**sale))\n for value in values:\n value['property_id'] = property.id\n session.add(PropertyValue(**value))\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n return item" ]
[ "0.70246166", "0.6571699", "0.65183365", "0.6512757", "0.6512757", "0.646952", "0.63494533", "0.6298831", "0.61873883", "0.6149486", "0.6139941", "0.6128781", "0.6128781", "0.6095686", "0.60349035", "0.5999732", "0.59780586", "0.5950867", "0.59086984", "0.58660156", "0.58603305", "0.5859434", "0.58403045", "0.5803458", "0.5799318", "0.5795761", "0.5791532", "0.5786277", "0.5772638", "0.57642823", "0.57596254", "0.57232213", "0.5712477", "0.57109386", "0.56922185", "0.56880397", "0.56820834", "0.56793976", "0.5670811", "0.5670811", "0.5666806", "0.566597", "0.56616515", "0.56205016", "0.56205016", "0.5610014", "0.5606517", "0.5606026", "0.56038725", "0.55987746", "0.55964965", "0.55922437", "0.5590231", "0.5583568", "0.5582685", "0.55527097", "0.5541497", "0.5529065", "0.5526091", "0.5497838", "0.5478401", "0.5475499", "0.5456578", "0.5449828", "0.54491067", "0.54473346", "0.5386752", "0.5378485", "0.5352235", "0.53465754", "0.533656", "0.53364414", "0.5327843", "0.5324415", "0.53231245", "0.53180027", "0.53141373", "0.5306981", "0.52895516", "0.52847546", "0.52715474", "0.52690125", "0.52687055", "0.52683413", "0.5257997", "0.5250345", "0.52430135", "0.5238371", "0.5232487", "0.52288437", "0.5226401", "0.52256", "0.5224377", "0.5218461", "0.5218461", "0.5215605", "0.5214795", "0.5213786", "0.5212491", "0.5211894" ]
0.5859573
21
Executed on spider completion
def close_spider(self, spider): # Update batch finish time, keep this last spider.batch.finish_time = spider.get_now_time() spider.batch.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _finished(self) -> None:", "def done(self):", "def done(self):", "def finished(self):", "def onfinish( request ):", "def onfinish( request ):", "def finished(self):\n pass", "def onDone(self):\n pass", "def after_parsing(self):", "def finish(self):", "def finish(self):", "def handleContentComplete():", "def on_finish(self):\n pass", "def complete(self):\n pass", "def on_finish(self):", "def finish(self):\n pass", "def finish(self):\n pass", "def open_spider(self, spider):\n pass", "def post_execute(self):", "def completion() -> None:", "def finished(self):\n\t\telog(\"finished\")", "def done(self):\n pass", "def spider_closed(spider):\n spider.crawler.stats.set_value('failed_urls', ','.join(spider.failed_urls))", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def finish(self) -> None:", "def finish(self) -> None:", "def spider_idle(self):\n self.logger.info('the queue is empty, wait for one minute to close the spider')\n time.sleep(60)\n req = self.next_requests()\n\n if req:\n self.schedule_next_requests()\n else:\n self.crawler.engine.close_spider(self, reason='finished')", "def item_done(self, rsp=None):\n self.export.item_done(rsp)", "def do_after(self):\r\n pass", "def spider_idle(self):\n self.logger.info('the queue is empty, wait for one minute to close the spider')\n time.sleep(30)\n req = self.next_requests()\n\n if req:\n self.schedule_next_requests()\n else:\n self.crawler.engine.close_spider(self, reason='finished')", "def spider_idle(self):\n self.logger.info('the queue is empty, wait for one minute to close the spider')\n time.sleep(30)\n req = self.next_requests()\n\n if req:\n self.schedule_next_requests()\n else:\n self.crawler.engine.close_spider(self, reason='finished')", "def spider_idle(self):\n self.logger.info('the queue is empty, wait for one minute to close the spider')\n time.sleep(30)\n req = self.next_requests()\n\n if req:\n self.schedule_next_requests()\n else:\n self.crawler.engine.close_spider(self, reason='finished')", "def spider_closing(spider):\n print(\"Spiderclose\"*10)\n #reactor.stop()", "def done(self):\n ## All done with the request object\n self.closed = True\n self.d.callback('')", "def on_end(self, ctx):\n pass", "def spider_idle(self):\n self.logger.info('the queue is empty, wait for half minute to close the spider')\n time.sleep(30)\n req = self.next_requests()\n\n if req:\n self.schedule_next_requests()\n else:\n self.crawler.engine.close_spider(self, reason='finished')", "def spider_idle(self):\n count = 10\n while self.set_artist and count:\n count -= 1\n url = self.set_artist.pop()\n request = Request(url, dont_filter=True, callback=self.get_info)\n self.crawler.engine.crawl(request, self)", "def finish():\n pass", "def _postprocess(self):", "def finish():", "def finish():", "def finish():", "def finish():", "def onfinish():", "def _success_finish(self):\n # run this task after scrapy process successfully finished\n # cache result, if there is at least one scraped item\n time.sleep(2) # let the data to be dumped into the output file?\n self._update_items_scraped()\n if self.items_scraped:\n self.save_cached_result()\n else:\n logger.warning('Not caching result for task %s (%s) '\n 'due to no scraped items.',\n self.task_data.get('task_id'),\n self.task_data.get('server_name'))\n logger.info('Success finish task #%s', self.task_data.get('task_id', 0))\n self.finished_ok = True", "def action_done(self):\n pass", "def finish_parse(self) -> None:\n pass", "def spider_closed(self, spider):\n if spider is not self:\n return\n\n # Phase1: write injection points to file\n import json\n with open(\"output/\"+self.name + '_phase1.json', 'w') as fp:\n json.dump(self.injection_points, fp, sort_keys=True, indent=4)", "def onEnd(self, agent):\n\n pass", "def setupFinished(self, *args, **kwargs): # real signature unknown\n pass", "def get_final_result(self, spider):\n\n # stop crawling after yeild_item called\n if not self.result_received:\n # push to webhook\n self.data = {\n 'scrape_id': self.scrape_id,\n 'scraper_name': self.name,\n 'files_count': self.files_count,\n 'screenshots_count': self.screenshots_count,\n 'cnpj': self.cnpj}\n self.data.update({'result': self.result})\n if self.errors:\n self.data.update({'errors': self.unique_list(self.errors)})\n webhook_file_path = os.path.join(\n path, \"downloads\", self.scrape_id,\n '{cnpj}-data_collected.json'.format(cnpj=self.cpf_cnpj))\n self.data_collected(self.data, webhook_file_path)\n # return item for scrapinghub\n self.result_received = True\n req = Request(self.start_url,\n callback=self.yield_item,\n errback=self.yield_item, dont_filter=True)\n self.crawler.engine.crawl(req, spider)", "def _request_finished(self):\n\n self._next_request = self._next_request_ts()\n\n self._logger.debug(\"next call at %s\" % (time.strftime(\"%H:%M:%S\", time.localtime(self._next_request))))", "def task_done(self) -> None:\n pass", "def get_final_result(self, spider):\n\n # stop crawling after yeild_item called\n if not self.result_received:\n # push to webhook\n if self.screenshots_ids:\n self.result['__screenshots_ids__'] = self.screenshots_ids\n self.data = {\n 'scrape_id': self.scrape_id,\n 'scraper_name': self.name,\n 'files_count': self.files_count,\n 'screenshots_count': self.screenshots_count,\n 'cnpj': self.cnpj}\n self.data.update({'result': self.result})\n if self.errors:\n self.data.update({'errors': self.unique_list(self.errors)})\n webhook_file_path = os.path.join(\n path, \"downloads\", self.scrape_id, '{renavam}-data_collected.json'.format(\n renavam=self.renavam))\n self.data_collected(self.data, webhook_file_path)\n # return item for scrapinghub\n self.result_received = True\n req = Request(self.start_url, callback=self.yield_item,\n errback=self.yield_item, dont_filter=True)\n self.crawler.engine.crawl(req, spider)", "def after_all(self) -> None:", "def postloop(self):\n cmd.Cmd.postloop(self) ## Clean up command completion", "def done(self, request):\n raise NotImplementedError(\"Your %s class has not defined a done() \" \\\n \"method, which is required.\" \\\n % self.__class__.__name__)", "def searchingFinished(self, *args, **kwargs): # real signature unknown\n pass", "def postRun(self):\n pass", "def _onEnd(self, name, completed):\n logging.debug(\"onEnd...\")", "def finished(self):\r\n raise NotImplementedError", "def _crawler_result(item, response, spider):\n output_data.clear()\n output_data.append(dict(item))", "def _scrape(self):", "def taskCompleted(self) -> None:\n ...", "def taskCompleted(self) -> None:\n ...", "def Done(self):\n pass", "def on_event_finished(self, event):", "def after(self):\n pass", "def after(self):\n pass", "def post(self):\n self.finish(self.register())", "def finished(self):\n raise NotImplementedError()", "def action_done(self):", "def dm_teardown(self):\n try:\n dispatcher.disconnect(\n self.dequeue_next_page_requests,\n signal=signals.spider_idle\n )\n except DispatcherKeyError:\n pass", "def post_start(self):", "def done(self):\n raise NotImplementedError()", "def onContentDownloadComplete(self, fetcher, connectionResp): #$NON-NLS-1$\r", "def finish(self):\r\n\r\n self._is_finished = True", "def complete_run():\n pass", "def _after_serve_actions(self):\n pass", "def after_test(self, test_results):\n pass", "def grabBlogPostAnalysisComplete(self): #$NON-NLS-1$\r", "def afterWork(self):\n pass", "def spider_closed(self, spider):\n stats = spider.crawler.stats.get_stats()\n prefix = 'start_requests/item_scraped_count/'\n with open(self.log_output, \"a\") as log_file:\n for url in self.start_urls:\n if (prefix + url in stats and stats[prefix + url] < 1) or (prefix + url not in stats):\n log_file.write(prefix + url + \",0\" + \",0\" + '\\n')", "def submit_complete( self ):\n cfunc.submit_complete( self )", "def on_scanner_finish(self, scanner):", "def indexingFinished(self, *args, **kwargs): # real signature unknown\n pass", "def stepFinished(build, step, results):", "def onFinished( self, resultLine ):\n\t\treturn self.agi.finish()", "def set_finish_callback( callback ):", "def set_finish_callback( callback ):", "def open_spider(self,Spider):\n pass", "def Finish(self):\n pass", "def on_success(self) -> None:", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._log.debug('done')", "def after_request(self, response):\n # only track data for specified blueprints\n if self.blueprints:\n if request.blueprint not in self.blueprints:\n return response\n\n t_0 = getattr(g, 'start_time', dt.datetime.now())\n\n visit = dict(\n session_id=session.get('UUID', 0),\n timestamp=timestamp(),\n url=request.url,\n view_args=request.view_args,\n status_code=response.status_code,\n path=request.path,\n latency=(dt.datetime.now() - t_0).microseconds / 100000,\n content_length=response.content_length,\n referer=request.referrer,\n values=request.values\n )\n self.store_visit(visit)\n self.update_top_list(request.path)\n return response", "def on_success(self):\n pass" ]
[ "0.66993535", "0.66545945", "0.66545945", "0.6630113", "0.64786404", "0.64786404", "0.6470851", "0.64693534", "0.6428068", "0.6403498", "0.6403498", "0.63767177", "0.63756603", "0.63177204", "0.6315278", "0.6261107", "0.6261107", "0.62559295", "0.6248004", "0.62337804", "0.62137425", "0.6198367", "0.6195741", "0.61849004", "0.61849004", "0.61849004", "0.61849004", "0.61849004", "0.6176539", "0.6176539", "0.61684537", "0.616038", "0.6158089", "0.61358637", "0.61358637", "0.61358637", "0.6132144", "0.6120285", "0.61007804", "0.60964406", "0.6073235", "0.60400116", "0.60154223", "0.60117614", "0.60117614", "0.60117614", "0.60117614", "0.60057735", "0.5987363", "0.59711015", "0.59689885", "0.59447753", "0.5938859", "0.5937054", "0.5927808", "0.5920804", "0.591966", "0.59132475", "0.59021974", "0.58965886", "0.5886185", "0.5855293", "0.5852984", "0.5851163", "0.5848376", "0.5848007", "0.582968", "0.58143944", "0.58143944", "0.5803928", "0.5803356", "0.5796461", "0.5796461", "0.5789115", "0.5788127", "0.5779843", "0.5779189", "0.57679945", "0.5764871", "0.5763244", "0.5757033", "0.5756706", "0.57564807", "0.5755371", "0.5748692", "0.57369316", "0.5726131", "0.5721188", "0.5717081", "0.56988037", "0.56974465", "0.56967527", "0.5692707", "0.5692707", "0.5677886", "0.56649387", "0.5658642", "0.56586283", "0.5658105", "0.56445974" ]
0.6006156
47
Executed on spider launch
def open_spider(self, spider): now = spider.get_now_time() # Create initial batch spider.batch = model.Batch.objects.create( kickoff_time=now, finish_time=now) spider.batch.save() # save initial site list file_content = ContentFile('\n'.join(spider.start_urls)) filename = str(spider.batch).replace(' ', '') spider.batch.sitelist.save(filename, file_content) spider.batch.sitelist.close() spider.batch.save() spider.batch_user_agents = [] # Give the spider a set of batch user agents, which preserve historical # user agent data for ua in list(model.UserAgent.objects.all()): batch_user_agent = model.BatchUserAgent.objects.create( batch=spider.batch, ua_string=ua.ua_string, primary_ua=ua.primary_ua, ua_type=ua.ua_type, ua_human_name=ua.ua_human_name ) spider.batch_user_agents.append(batch_user_agent) if not spider.batch_user_agents: raise ValueError( "No user agents; add some with 'manage.py useragents --add'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_spider(self, spider):\n pass", "def boot():\n\t\tcreate_project_url_dir(Spider.project_name)\n\t\tcreate_url_data(Spider.project_name, Spider.base_url)\n\t\tSpider.queue = file_to_set(Spider.queue_file)\n\t\tSpider.crawled = file_to_set(Spider.crawled_file)", "def post_start(self):", "def open_spider(self,Spider):\n pass", "def __init__(self):\n self.SPIDER = \"spider\"", "def on_startup(self) -> None:\n ...", "def launch(self):", "def start(self):\n\t\tself.app.printflush('Sitemap: ' + self.sitemap_url)\n\t\tself.getUrlsList()\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count))\n\t\tself.app.printflush('Processes: ' + str(self.processes))\n\t\tself.CheckURLs()\n\t\tself.printReport()", "def on_run(self):\r\n\r\n\t\tpass", "def __init__(self, spider):\n\n super(SprintBehavior, self).__init__(spider)\n self.remoteContext = spider.remoteController.context", "def open_spider(self, spider):\n assert spider not in self.sites, \"Spider already opened: %s\" % spider\n self.sites[spider] = SpiderInfo(spider)", "def dm_setup(self):\n dispatcher.connect(\n self.dequeue_next_page_requests,\n signal=signals.spider_idle\n )\n self._was_setup_called = True", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def postRun(self):\n pass", "def on_before_execution(self):\n pass", "def on_start(self):", "def on_start(self):", "def main():\n\n from scrapy.crawler import CrawlerProcess\n from scrapy.utils.project import get_project_settings\n\n process = CrawlerProcess(get_project_settings())\n process.crawl(NCBIGeoSpider)\n process.start()", "def _start(self):", "def on_run(self):\n pass", "def pre_execute(self):", "def on_start(self, ctx):\n pass", "def __init__(self, *args, **kwargs):\n super(AlibabaCompanySpider, self).__init__(*args, **kwargs)", "def on_pre_enter(self):\n self.setup()\n self.start()", "def startup(self) -> None:", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def runSpider(spiderClass):\n\n\tprocess = CrawlerProcess({\n\t\t# 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n\t})\n\n\tprocess.crawl(spiderClass)\n\tprocess.start() # the script will block here until the crawling is finished", "def start(self):\n ...", "def main():\n # get input params\n args = parm_parser.get_args()\n # init log config\n log.init_log('./log/mini_spider')\n if args:\n # read config file spider.conf\n conf_params = parm_parser.set_config_by_file(args.conf)\n # use config set up spider initial params\n spider = SpiderWorker(conf_params)\n # init result_path, make it complete\n spider.set_path()\n # init url queue\n spider.set_url_queue()\n # start to crawl url\n spider.start_crawl_work()\n\n return", "def pre_start(self) -> None:\n pass", "def startup(self):\n pass", "def post_execute(self):", "def post_setup(self, context):\n pass", "def _start(self):\n pass", "def started(self):", "def on_load(self):\n pass", "def on_load(self):\n pass", "def on_launch(request):\n\n return get_launch_response()", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_start_hook(self):\n\n LOG.debug(_('XManager post_start_hook...'))\n\n pass", "def on_load(self):", "def __init__(self):\n super(PreProcess, self).__init__()", "def run_spiders():\n hour_limit = 3600 # in seconds\n\n settings = get_project_settings()\n\n # Uncomment the following block of code if you want to test the manager on\n # the \"onepagetest\" to make sure the manager is working.\n \"\"\"\n settings[\"HTTPCACHE_ENABLED\"] = 1\n settings[\"HTTPCACHE_EXPIRATION_SECS\"] = 0\n settings[\"HTTPCACHE_STORAGE\"] = \"scrapy.extensions.httpcache.FilesystemCacheStorage\"\n settings[\"HTTPCACHE_IGNORE_MISSING\"] = 1\n settings[\"HTTPCACHE_DIR\"] = \"onepagetest\"\n \"\"\"\n\n runner = CrawlerRunner(settings)\n begin_time = datetime.now()\n\n d = runner.crawl(\"sb_spider\")\n d.addBoth(lambda _: continue_crawl(d, runner, begin_time, hour_limit))\n reactor.run()", "def on_start(self):\n self.init()", "def on_start(self):\n self.write_log(\"策略启动\")", "def on_start(self):\n self.write_log(\"策略启动\")", "def on_start(self):\n self.write_log(\"策略启动\")", "def on_start(self):\n self.write_log(\"策略启动\")", "def initialize_process():\n\n settings = Settings({'BOT_NAME': 'warnnoticebot',\n 'LOG_LEVEL': 'INFO',\n 'ITEM_PIPELINES': {'modules.pipelines.PerStateJsonlinesExportPipeline': 300},\n 'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36', # This is my actual user agent when using a browser\n 'COOKIES_ENABLED': False,\n 'ROBOTSTXT_OBEY': True,\n 'DOWNLOAD_DELAY': 5.0,\n 'DEFAULT_REQUEST_HEADERS': {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n 'Upgrade-Insecure-Requests': 1}\n })\n \n process = CrawlerProcess(settings) \n\n return process", "def setup( self ):", "def from_crawler(cls, crawler, *args, **kwargs):\n\n spider = super(autos_detran_terceiros_sp_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider", "def __init__(self, spider):\n self.connect = pymysql.Connect(\n cursorclass=pymysql.cursors.DictCursor,\n **zaih_scraper.settings.CONNECT_INFO\n )\n\n self.cursor = self.connect.cursor()\n\n self.start_time = datetime.datetime.now()\n self.end_time = datetime.datetime.now()\n self.duration = datetime.timedelta()\n\n # self.logger = logging.getLogger(spider.name)\n self.logger = SetLogger.set_logger(spider.name)\n\n self.table_exist = []", "def performPostModuleDiscoveryTasks(self):\n\n print(\"!\"*80)\n print(\"You are being watched!!!\")\n print(\"!\"*80)\n\n self.webWidget = slicer.qSlicerWebWidget()\n slicer.util.mainWindow().statusBar().addWidget(self.webWidget)\n\n self.logic = AnalyticsLogic()\n self.logic.watchAndReport(self.reportToGoogle)", "def _initialise_run(self) -> None:", "def OnEpisodeStart(self):\n pass", "def from_crawler(cls, crawler, *args, **kwargs):\n\n spider = super(telecom_vivo_movel_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider", "def on_start(self, session):\n pass", "def run(self):\n\t\t\n\t\tpass", "def run_rss(self):\n\n pass", "def Start(self) :\n\t\t...", "def _after_serve_actions(self):\n pass", "def run(self): \r\n return", "def kickoff(self):\n settings = Settings()\n\n # settings.set(\"USER_AGENT\", \"Test\")\n settings.set('JOBDIR', self.args.data_dir)\n self.spider = MavenDataSpider()\n\n # Wrap with crawler, configure\n crawler = Crawler(self.spider, settings)\n crawler.signals.connect(spider_closing, signal=signals.spider_closed)\n\n logger.info('Starting crawler')\n crawler.crawl(self.spider, app=self, dbsess=self.session)\n\n self.spider = crawler.spider\n self.spider.link_queue_mode = False\n if self.args.debug:\n coloredlogs.install(level=logging.DEBUG)\n\n # Keeping thread working\n reactor.run()", "def post_init(self):\n\t\tpass", "async def post_launch(self, **kwargs: Any) -> None:\n pass", "def start_processing(self):", "def start(self):\r\n pass", "def __init__(self):\n dispatcher.connect(self.stats_spider_closed, signal=signals.stats_spider_closed)\n dispatcher.connect(self.stats_spider_closed, signal=signals.spider_closed)\n dispatcher.connect(self.stats_spider_closed, signal=signals.engine_stopped)", "def run(self):\r\n pass", "def _onStart(self, name):\n logging.debug(\"onStart...\")", "def afterInit(self):", "def pre_start_hook(self):\n\n LOG.debug(_('XManager pre_start_hook...'))\n\n pass", "def run(self, **kwargs):", "def _post_load(self):\n pass", "def _post_init(self):\n pass", "def do_before(self):\r\n pass", "def start (self):\n pass", "def start (self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass" ]
[ "0.7322433", "0.70774", "0.68925005", "0.68527085", "0.67921203", "0.66922694", "0.65391195", "0.6513823", "0.6487005", "0.6476667", "0.6425333", "0.64246696", "0.64244527", "0.64244527", "0.64244527", "0.64244527", "0.64244527", "0.64244527", "0.64244527", "0.64244527", "0.64145714", "0.64036447", "0.6379592", "0.6379592", "0.6364965", "0.62880903", "0.626105", "0.6222223", "0.6202798", "0.61285603", "0.6097509", "0.6079374", "0.6072184", "0.6058451", "0.605199", "0.6037587", "0.6033839", "0.6030236", "0.60231346", "0.60181606", "0.60174406", "0.59971946", "0.5965661", "0.5965661", "0.5961853", "0.5927254", "0.5927254", "0.5927254", "0.5927254", "0.5927254", "0.5926373", "0.59019864", "0.5879784", "0.5873396", "0.58703315", "0.58660233", "0.58660233", "0.58660233", "0.58660233", "0.5861573", "0.5855286", "0.58550423", "0.5852777", "0.58440506", "0.5842449", "0.5826388", "0.5826378", "0.5825996", "0.57955045", "0.5784885", "0.577483", "0.57704586", "0.5767406", "0.5763356", "0.57571894", "0.57560766", "0.5749381", "0.57395244", "0.5733369", "0.57300335", "0.57300013", "0.5726066", "0.57153255", "0.5711302", "0.5697477", "0.56829095", "0.5681396", "0.56581825", "0.56581825", "0.5651332", "0.5651332", "0.5651332", "0.5651332", "0.5651332", "0.5651332", "0.5651332", "0.5651332", "0.5651332", "0.5651332", "0.5651332" ]
0.5739939
77
Placeholder for selftest function. An example use would be to test package api connectivity. Suggested return values are be unimplemented, success, or failure.
def selftest_function(opts): threatminer = ThreatMiner(opts.get("resilient", {}), opts.get(PACKAGE, {})) data, msg = threatminer.get(TEST_URI) state = "success" if data else "failure" return {"state": state}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testApi(self):", "def test_dummy():", "def test_something():", "def unitary_test():", "def test(self):\n raise NotImplementedError", "def test_func():\n pass", "def test_dummy_test():\n pass", "def test_integration(self):\n self.assertTrue(return_true())", "def test_for_client():", "def test_nothing(self):", "def test_if(self):", "def test_basic_execution(self):", "def local_test():\n pass", "def test():\n pass", "def test_1():\n\tassert api_call().status_code == 200", "def test_with_defaults(self, _):\n result = self.run()\n return self._handle_test_result(result)", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def inner_test():\n pass", "def inner_test():\n pass", "def test_method(self):", "def test_stub(self):\n pass", "def test_single_test_case():\n pass", "def test_passed():\n pass", "def test_orchestrator_http_simple(self):\n pass", "def _run_online_test(*args, **kwargs):\n import responses # noqa: F401", "def test(self) -> Any:\n pass", "def tests():\n api.local('nosetests')", "def test(self):\n pass", "def run_test(self):\n raise NotImplementedError", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test(self):\n\n return True", "def test_package(self):\n pass", "def test_1():", "def test():", "def test():", "def spec_tests():\n pass", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"", "def test():\n pass", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'pcap' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.pcap)\n api_client.not_implemented.assert_called_with(\"pcap\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def test_unit(self):\n self.assertTrue(return_true())", "def tests():", "def test():\r\n pass", "def test_required_methods(self):", "def selftest_function(opts):\n\n try:\n\n LOG.info(\"Verifying app.config values for %s\", CrowdStrikeHelper.app_config_section)\n\n app_configs = opts.get(CrowdStrikeHelper.app_config_section, {})\n\n cs_helper = CrowdStrikeHelper(app_configs)\n \n LOG.info(\"Verifying OAuth2 Credentials...\")\n\n cs_helper.get_oauth2_token()\n\n LOG.info(\"Success\")\n\n LOG.info(\"Verifying Basic Auth Credentials...\")\n\n res = cs_helper.get_device_status(\"x\")\n\n if res is not None and res.get(\"err_msg\") == \"Could not get device status for device_id x.\":\n LOG.info(\"Success\")\n else:\n raise ValueError(\"Failed to verify Basic Auth Credentials\")\n\n LOG.info(\"Test was successful\")\n\n return {\n \"state\": \"success\"\n }\n\n except Exception as err:\n err_reason_msg = \"\"\"Could not connect to CrowdStrike.\n error: {0}\n ---------\n Current Configs in app.config file::\n ---------\n cs_falcon_oauth2_base_url: {1}\n cs_falcon_oauth2_cid: {2}\n cs_falcon_bauth_base_url: {3}\n cs_falcon_bauth_api_uuid: {4}\\n\"\"\".format(\n err,\n cs_helper.oauth2_base_url,\n cs_helper.oauth2_cid,\n cs_helper.bauth_base_url,\n cs_helper.bauth_api_uuid)\n\n LOG.error(err_reason_msg)\n\n return {\n \"state\": \"failure\",\n \"reason\": err_reason_msg\n }", "def test_call(self):\r\n with self.assertRaises(NotImplementedError):\r\n self.est1(1)", "def test_alive():\n pass", "def test_alive():\n pass", "def test_alive():\n pass", "def test_should_implement(self):\n pass", "def test(self):", "def test(self):", "def test_single_issue():\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'interesting' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.interesting)\n api_client.not_implemented.assert_called_with(\"interesting\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def test_noop():\n assert True", "def test_api() -> bool:\r\n weather = False\r\n news = False\r\n covid = False\r\n if check_weather_version():\r\n logging.info(\"Weather API version is up to date (check_weather_version())\")\r\n weather = True\r\n else:\r\n logging.info(\"Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED\")\r\n if check_news_version():\r\n logging.info(\"News API version is up to date (check_news_version())\")\r\n news = True\r\n else:\r\n logging.info(\"News API version is not up to date (check_news_version()) - ACTION REQUIRED\")\r\n if check_covid_version():\r\n logging.info(\"Covid-19 API version is up to date (check_covid_version())\")\r\n covid = True\r\n else:\r\n logging.info(\"Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED\")\r\n return bool(weather and news and covid)", "def test(cls, hostname):\n pass", "def __test__():\n#-------------------------------------------------------------------------------\n import pylib.tester as tester\n return 0", "def test_cloud_api():\n mock = provider.MockProvider()\n\n mock.setup_cloud('empty config....')\n\n assert mock.get_ext_ip_addr('some-node')", "def ping(self):\n raise AssertionError(\"Ping function is not implemented\")", "def test1(self):\n\t\treturn describeInterface(self)", "def test_2():", "def runTests(self):\n \n pass", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'alerts' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.alerts)\n api_client.not_implemented.assert_called_with(\"alerts\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'analyze' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.analyze)\n api_client.not_implemented.assert_called_with(\"analyze\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def testfunc():\n return 1", "def test_easily_reusable(self):\n result = get_api_url()\n\n self.assertEqual(result, 'https://FQDN/afp-api/latest')\n self.mock_sanitize_host.assert_called_once_with('afp')", "def test_let(self):", "def test_should_be_ok(self):\n self.assertTrue(True)", "def test_wip(self):\n self.assertTrue(not return_true())", "def test_dummy_test(self):\n self.assertTrue(True)", "def runtest(self):", "def doNotFailOnNetworkError(func):\n\n @functools.wraps(func)\n def wrapper(*a, **kw):\n try:\n func(*a, **kw)\n except FailTest as e:\n if e.args[0].startswith(\"'Failed to get object inventory from \"):\n raise SkipTest(\n (\n \"This test is prone to intermittent network errors. \"\n \"See ticket 8753. Exception was: {!r}\"\n ).format(e)\n )\n raise\n\n return wrapper", "def test_empty_functions():", "def run(self, test, env):\n\n raise NotImplementedError", "def test_5():", "def test_app():\n pass", "def _RunTest(self, adb):\n return ResultType.FAIL, 'Unimplemented _RunTest() method!'", "def istest(func):\n func.__test__ = True\n return func", "def test_module(client: Client) -> str:\n\n test_ip = \"1.1.1.1\"\n message: str = \"\"\n try:\n response = client.test_module(test_ip)\n if response.status_code in [200, 404]:\n message = \"ok\"\n elif response.status_code == 403:\n message = \"Authorization Error: make sure API Key is correctly set\"\n else:\n message = \"Something went wrong\"\n except DemistoException as e:\n raise e\n return message", "def test_3():", "def test_connection():\r\n try:\r\n connect()\r\n except:\r\n pass\r\n print ('Unable to connect.')\r\n else:\r\n main()", "def main():\n # ============================================================================\n\n working_folder = os.path.dirname(os.path.realpath(__file__))\n os.chdir(working_folder)\n\n description_string =\\\n \"A set of python tests to fetch data from Open Weather Map\"\n epilog_string = (\"These tests should be run regularly to ensure that the \"\n \"API server is healthy\")\n\n parser = argparse.ArgumentParser(\n prog='OWM-API-test',\n description=description_string,\n epilog=epilog_string)\n\n parser.add_argument(\n \"--tb\",\n action='store',\n default='line',\n nargs='?',\n choices=['auto', 'long', 'short', 'no', 'line', 'native'],\n help='Set the traceback level for pytest',\n dest='traceback')\n\n parser.add_argument(\n \"-v\", \"--verbose\",\n action='store_true',\n help='Increase output verbosity',\n dest='verbose')\n\n parser.add_argument(\n \"-q\", \"--quiet\",\n action='store_true',\n help='Reduce output verbosity')\n\n args = parser.parse_args()\n\n # ============================================================================\n # construct pytest commands\n pytest_command = []\n if args.verbose:\n # we double increase verbosity to make it actually verbose\n pytest_command.extend([\"-v\", \"-v\"])\n # this only overwrites traceback argument if left at default\n if args.traceback == \"line\":\n args.traceback = \"long\"\n\n if args.quiet: # this overwrites and tb argument given\n args.traceback = \"no\"\n\n pytest_command.extend([\"--tb\", args.traceback])\n\n # ============================================================================\n\n printing_functions.test_declaration(\"Running Tests...\")\n run_local_tests(pytest_command)", "def test_emirp_check():\r\n pass", "def run_tests(self):\n raise NotImplementedError", "def _general_testing(self, context, kind, *args, **kwargs):\r\n if kind == \"fake_next_op\":\r\n self._register_fake_next_op(context.channel, *args, **kwargs)\r\n self._reply(context, proto_success({}, None), None)\r\n return True\r\n self._reply(context, proto_failure({\"Unsupported testing function '{}'\".format(kind)}), None)\r\n return False", "def test_get_run(self):\n pass", "def TestConnection(self):\n return self._analyzer.TestConnection()", "def test_install_plugin_again_is_ok(self):\n raise NotImplementedError()", "def test_T01():", "def test_main():\n # Setup\n # Exercise\n # Verify", "def test_verify_connection_to_a_device():", "def test_module(self):\n pass" ]
[ "0.701643", "0.6862365", "0.6859328", "0.68279654", "0.6786072", "0.6753484", "0.6739912", "0.67003626", "0.66963637", "0.6691693", "0.6675198", "0.66256887", "0.66032374", "0.6586758", "0.6558276", "0.64686215", "0.64683706", "0.64683706", "0.64683706", "0.64683706", "0.64683706", "0.6461951", "0.6461951", "0.64508307", "0.6433016", "0.64287555", "0.6423544", "0.64046", "0.63929886", "0.63731277", "0.63634235", "0.6355721", "0.63459027", "0.6336658", "0.6336658", "0.6333653", "0.6330733", "0.6329705", "0.63294065", "0.63294065", "0.6306503", "0.63064164", "0.6304583", "0.62968314", "0.62925637", "0.62858933", "0.6285876", "0.6282399", "0.6270157", "0.626435", "0.6262036", "0.6262036", "0.6262036", "0.6253099", "0.62316465", "0.62316465", "0.622982", "0.6227651", "0.6227651", "0.6227651", "0.62168074", "0.6184218", "0.61718214", "0.61689824", "0.61585224", "0.614523", "0.6138429", "0.61354244", "0.6134967", "0.6131848", "0.6112818", "0.6109289", "0.6107955", "0.6102679", "0.61017513", "0.6099701", "0.6096135", "0.6094755", "0.6091781", "0.60897493", "0.6072846", "0.6055647", "0.6055059", "0.6054697", "0.60532147", "0.60467595", "0.6045713", "0.6045153", "0.60447264", "0.6040937", "0.6037025", "0.6024497", "0.602355", "0.6021189", "0.5996583", "0.5987873", "0.5972495", "0.59646016", "0.5959332", "0.5958163" ]
0.6658282
11
Set up the Clementine platform.
def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: host = config[CONF_HOST] port = config[CONF_PORT] token = config.get(CONF_ACCESS_TOKEN) client = ClementineRemote(host, port, token, reconnect=True) add_entities([ClementineDevice(client, config[CONF_NAME])])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure(self):\n\n self.platform.configure()", "def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()", "def setup(self):\n\t\tpass", "def setup(self):\r\n pass", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/collective/demo.plone.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n if env.latest:\n if env.python3:\n sudo('ln -s local_demo_nightly_py3.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_demo_nightly_py2.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n else:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/starzel/buildout/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def SetupEnvironment(self):\n pass", "def setup(self):\n self.machine = Machine(['a', 'b', 'c', '_'])", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self):\n pass # pragma: no cover", "def platform_start(self):\n self.platform.start()", "def setup_method(self):\n self.hass = get_test_home_assistant()\n\n self.config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\", \"name\": \"test local\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n self.endpoint_url = f\"https://westus.{mf.FACE_API_URL}\"", "def setup(self) -> None:", "def setup( self ):", "def setup(self):\n pass", "def setup(self):\n ...", "def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')", "def set_up(self):\n self.path.state = self.path.gen.joinpath(\"state\")\n if self.path.state.exists():\n self.path.state.rmtree(ignore_errors=True)\n self.path.state.mkdir()\n\n for script in self.given.get(\"scripts\", []):\n script_path = self.path.state.joinpath(script)\n\n if not script_path.dirname().exists():\n script_path.dirname().makedirs()\n\n script_path.write_text(self.given[\"scripts\"][script])\n script_path.chmod(\"u+x\")\n\n for filename, contents in self.given.get(\"files\", {}).items():\n self.path.state.joinpath(filename).write_text(contents)\n\n self.python = hitchpylibrarytoolkit.project_build(\n \"commandlib\", self.path, self.given[\"python version\"]\n ).bin.python\n\n self.example_py_code = (\n ExamplePythonCode(self.python, self.path.state)\n .with_code(self.given.get(\"code\", \"\"))\n .with_setup_code(self.given.get(\"setup\", \"\"))\n )", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def setup(self):\n # if not system.restore_snapshot():\n # self.log.debug(\"No snapshot to restore, if this is not expected please contact automation team\")\n crindsim.set_mode(\"manual\")\n pos.connect()\n pos.sign_on()", "def setup_method(self):\n self.project_dir = os.path.join(DIR, 'test-project')\n self.e2e = E2EEnv(self.project_dir)", "def InitEnvironment(self):\r\n\t\t\r\n\t\t# Turn antialiasing on\r\n\t\trender.setAntialias(AntialiasAttrib.MMultisample,1)\r\n\t\t\r\n\t\t# load the falcon model\r\n\t\tfalcon = loader.loadModel(\"Content/falcon/falcon.bam\")\r\n\t\tfalcon.setScale(30)\r\n\t\tfalcon.setPos(0, 0, 28.5)\r\n\t\tfalcon.reparentTo(render)", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def main():\n\n # Install crypt32 (not required for Proton 3.16-3 and up)\n util.protontricks('crypt32')\n\n # Install directmusic, set overrides\n util.protontricks('directmusic')\n util.winedll_override('dmime', 'n')\n util.winedll_override('dmsynth', 'n')\n util.winedll_override('dmusic', 'n')\n util.winedll_override('dsound', 'n')\n util.winedll_override('dswave ', 'n')\n util.winedll_override('l3codecx', 'n')\n\n # Set sound to alsa\n util.protontricks('sound=alsa')\n\n # Disable launcher\n util.replace_command('patriots.exe', 'riseofnations.exe')", "def _setup(self) -> None:\n\t\treturn", "def initialize():\n environment = Environment()\n environment.setup()", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def setup(client):\n client.add_cog(ProcessDisplay(client))", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None\n) -> None:\n # Assign configuration variables.\n # The configuration check takes care they are present.\n host = config[CONF_HOST]\n username = config[CONF_USERNAME]\n password = config.get(CONF_PASSWORD)\n\n # Setup connection with devices/cloud\n hub = awesomelights.Hub(host, username, password)\n\n # Verify that passed in configuration works\n if not hub.is_valid_login():\n _LOGGER.error(\"Could not connect to AwesomeLight hub\")\n return\n\n # Add devices\n add_entities(AwesomeLight(light) for light in hub.lights())", "def setup(args=None):\n\n # Setup API server\n api = ParadropAPIServer(reactor)\n api.putChild('internal', Base(apiinternal, allowNone=True))\n site = Site(api, timeout=None)\n\n # Development mode\n if(args and args.development):\n thePort = settings.PDFCD_PORT + 10000\n out.info('Using DEVELOPMENT variables')\n # Disable sending the error traceback to the client\n site.displayTracebacks = True\n elif(args and args.unittest):\n thePort = settings.PDFCD_PORT + 20000\n out.info('Running under unittest mode')\n site.displayTracebacks = True\n else:\n thePort = settings.PDFCD_PORT\n site.displayTracebacks = False\n initializeSystem()\n\n # Setup the port we listen on\n reactor.listenTCP(thePort, site)\n\n # Never return from here\n reactor.run()", "def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()", "def startup( self ):\n # ---- Setup UPNPC ----\n if self.config.neuron.use_upnpc:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<green>ON</green>')\n try:\n self.external_port = net.upnpc_create_port_map( port = self.axon.port )\n except net.UPNPCException as upnpc_exception:\n logger.critical('Failed to hole-punch with upnpc')\n raise RuntimeError('Failed to hole-punch with upnpc')\n else:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<red>OFF</red>')\n self.external_port = self.config.axon.port\n\n # ---- Get external ip ----\n try:\n self.external_ip = net.get_external_ip()\n bittensor.logging.success(prefix = 'External IP', sufix = '<blue>{}</blue>'.format(self.external_ip))\n except net.ExternalIPNotFound as external_port_exception:\n raise RuntimeError('Unable to attain your external ip. Check your internet connection. error:{}', external_port_exception)\n\n # ---- Setup tensorboard ----\n if self.config.neuron.use_tensorboard == True:\n self._tensorboard_program = program.TensorBoard()\n self._tensorboard_program.configure(argv=[None, '--logdir', self.config.neuron.full_path, '--load_fast=true'])\n self._tensorbaord_url = self._tensorboard_program.launch()\n bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<blue>http://localhost:6006/</blue>')\n else: bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<red>OFF</red>')\n\n # ---- Setup Wallet. ----\n if not self.wallet.has_coldkeypub:\n self.wallet.create_new_coldkey( n_words = 12, use_password = True )\n if not self.wallet.has_coldkeypub:\n raise RuntimeError('Miner must have access to a decrypted coldkeypub')\n if not self.wallet.has_hotkey:\n self.wallet.create_new_hotkey( n_words = 12, use_password = False )\n if not self.wallet.has_hotkey:\n raise RuntimeError('Miner must have access to a decrypted hotkey')\n\n # ---- Subscribe to chain ----\n subscribe_success = self.subtensor.subscribe(\n wallet = self.wallet,\n ip = self.external_ip,\n port = self.external_port,\n modality = bittensor.proto.Modality.TEXT,\n wait_for_finalization = True,\n timeout = 4 * bittensor.__blocktime__,\n )\n if not subscribe_success:\n raise RuntimeError('Failed to subscribe neuron.')\n\n # ---- Starting axon ----\n self.axon.start()", "def main():\n\n # Fixes the startup process.\n util.replace_command('Launcher.exe', 'Borderlands2.exe')\n util.append_argument('-NoSplash')\n\n # Disables esync prevents crashes.\n util.disable_esync()\n\n # Enables NVIDIA PhysX in Borderlands 2.\n util.protontricks('physx')", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup_platform(hass, config, add_entities, discovery_info=None):\n name = config.get(CONF_NAME)\n description = config.get(CONF_DESCRIPTION)\n product_id = config.get(CONF_PRODUCT_ID)\n domain = config.get(CONF_LOCALE)\n\n add_entities([Geizwatch(name, description, product_id, domain)], True)", "def setupMonti():\n #Update /etc/hosts with mongo-server and management-engine nodes\n sudo(\"apt-get install zookeeper\")\n sudo(\"apt-get install zookeeperd\")\n sudo(\"pip2 install chariot-runtime\")\n #update configuration file located in /etc/chariot/chariot.conf\n run (\"cd /etc/init.d && sudo update-rc.d chariot-nmw defaults 99\")\n sudo(\"reboot\")", "def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'rtutank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusRTU(devconfig['icsifaces'][0], points.values())\n self.server.start()", "def setUpEnv(self):\n \n robot = Robot('atrv')\n\n pose = Sensor('pose')\n robot.append(pose)\n pose.configure_mw('yarp')\n\n motion = Actuator('v_omega')\n robot.append(motion)\n motion.configure_mw('yarp')\n \n env = Environment('indoors-1/indoor-1')\n env.configure_service('socket')", "def setup(bot):\n bot.add_cog(Miniscape(bot))", "def SetupEnvironment(self):\n self._adb.RunShellCommand('chmod 777 /data/local/tmp')\n self._adb.RunShellCommand('setenforce 0')\n for prop in self._wrap_properties:\n self._adb.RunShellCommand('setprop %s \"logwrapper %s\"' % (\n prop, self.GetTestWrapper()))\n SetChromeTimeoutScale(self._adb, self.GetTimeoutScale())", "def Setup(self):\n return True", "def setup(self):\n self.kernel = RunningKernel()\n self.setup_sanitize_files()", "def setup_environment():\n global _ENV_SETUP_DONE\n if _ENV_SETUP_DONE:\n return\n _ENV_SETUP_DONE = True\n\n _configure_libraries()\n\n custom_module_path = os.environ.get(\"DETECTRON2_ENV_MODULE\")\n\n if custom_module_path:\n setup_custom_environment(custom_module_path)\n else:\n # The default setup is a no-op\n pass", "def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }", "def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()", "def setup(self):\n pass", "def setup(self):\n pass", "def setup_application(self):\n pass", "def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'tcptank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusTCP( devconfig['icsifaces'][0], points.values() )\n self.server.start()", "def _setup(self):", "def _setup(self):", "def configure(ctx):\n mxmlc = ctx.options.mxmlc\n\n if not mxmlc:\n mxmlc = ctx.find_program('mxmlc')\n\n ctx.env.MXMLC = os.path.abspath(os.path.expanduser(mxmlc))\n\n ctx.env.JAVA = ctx.find_program('java')\n\n if not ctx.env.SIKULI_HOME:\n ctx.env.SIKULI_HOME = get_sikuli_home(ctx)\n ctx.msg('Setting SIKULI_HOME', ctx.env.SIKULI_HOME)\n\n if not os.path.exists(ctx.env.SIKULI_HOME):\n ctx.fatal('Unable to find Sikuli at %r' % (ctx.env.SIKULI_HOME,))\n\n ctx.env.FLASH_PLAYER = ctx.options.flash_player\n\n if not ctx.env.FLASH_PLAYER:\n ctx.fatal('Standalone Flash player required, supply --flash_player')\n\n ctx.msg('Using Flash Standalone Player', ctx.env.FLASH_PLAYER)", "def Setup(self):\n raise NotImplementedError(\n 'No runtime setup defined for %s' % self.__class__.__name__)", "def setup():\n pass", "def setup(self, *args, **kwargs):\n pass", "def startup(self) -> None:", "def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()", "def setup_pymol():\n pymol.finish_launching() # Prevent threading errors\n # Configure global settings\n cmd.set('scene_buttons', 1)\n cmd.set('matrix_mode', 1)\n cmd.set('movie_panel', 1)\n # Configure quality settings\n cmd.mset(\"1 x500\")\n cmd.set('ray_trace_frames', 1)\n cmd.viewport(800, 800)", "def startup(self):\n pass", "def setup(self):\n build_world.start_level(self)", "def setup(self): \n # Navigate to POS screen\n pos.connect()", "def setup_platform(hass, config, add_entities, discovery_info=None):\n add_entities([\n EzvizCamera(hass, config)\n ])\n return True", "def __setup(self):\n\n build_environment = []\n\n # The download URL has the format contains vMAJOR.MINOR in the\n # path and the tarball contains MAJOR.MINOR.REVISION, so pull\n # apart the full version to get the MAJOR and MINOR components.\n match = re.match(r'(?P<major>\\d+)\\.(?P<minor>\\d+)', self.version)\n major_minor = 'v{0}.{1}'.format(match.groupdict()['major'],\n match.groupdict()['minor'])\n tarball = 'openmpi-{}.tar.bz2'.format(self.version)\n url = '{0}/{1}/downloads/{2}'.format(self.baseurl, major_minor,\n tarball)\n\n # CUDA\n if self.cuda:\n if self.__toolchain.CUDA_HOME:\n self.configure_opts.append(\n '--with-cuda={}'.format(self.__toolchain.CUDA_HOME))\n else:\n self.configure_opts.append('--with-cuda')\n else:\n self.configure_opts.append('--without-cuda')\n\n # InfiniBand\n if self.infiniband:\n self.configure_opts.append('--with-verbs')\n else:\n self.configure_opts.append('--without-verbs')\n\n # UCX\n if self.__ucx:\n if isinstance(self.__ucx, string_types):\n # Use specified path\n self.configure_opts.append('--with-ucx={}'.format(self.__ucx))\n else:\n self.configure_opts.append('--with-ucx')\n\n # If UCX was built with CUDA support, it is linked with\n # libcuda.so.1, which is not available during the\n # build stage. Assume that if OpenMPI is built with\n # CUDA support, then UCX was as well...\n if self.cuda:\n cuda_home = \"/usr/local/cuda\"\n if self.__toolchain.CUDA_HOME:\n cuda_home = self.__toolchain.CUDA_HOME\n self.__commands.append('ln -s {0} {1}'.format(\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so'),\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so.1')))\n if not self.__toolchain.LD_LIBRARY_PATH:\n build_environment.append('LD_LIBRARY_PATH=\"{}:$LD_LIBRARY_PATH\"'.format(os.path.join(cuda_home, 'lib64', 'stubs')))\n\n if self.directory:\n # Use source from local build context\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd, self.directory),\n toolchain=self.__toolchain))\n else:\n # Download source from web\n self.__commands.append(self.download_step(url=url,\n directory=self.__wd))\n self.__commands.append(self.untar_step(\n tarball=os.path.join(self.__wd, tarball), directory=self.__wd))\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version)),\n environment=build_environment,\n toolchain=self.__toolchain))\n\n self.__commands.append(self.build_step())\n\n if self.__check:\n self.__commands.append(self.check_step())\n\n self.__commands.append(self.install_step())\n\n # Set library path\n libpath = os.path.join(self.prefix, 'lib')\n if self.ldconfig:\n self.__commands.append(self.ldcache_step(directory=libpath))\n else:\n self.__environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)\n\n if self.directory:\n # Using source from local build context, cleanup directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, self.directory)]))\n else:\n # Using downloaded source, cleanup tarball and directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, tarball),\n os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version))]))", "def start() -> None:\n # Authenticate\n ee.Authenticate()\n\n # Initialize the library\n ee.Initialize()", "def setup(self) -> None:\n self.setup_logging()\n self.setup_plugins()\n self.post_setup()", "def setup(self): \n pass", "def setup_method(self):\n # pylint: disable=attribute-defined-outside-init\n\n self.session = FakedSession('fake-host', 'fake-hmc', '2.14.1', '1.9')\n self.client = Client(self.session)\n self.faked_cpc = self.session.hmc.cpcs.add({\n 'object-id': CPC_OID,\n # object-uri is set up automatically\n 'parent': None,\n 'class': 'cpc',\n 'name': 'fake-cpc1-name',\n 'description': 'CPC #1 (DPM mode, storage mgmt feature enabled)',\n 'status': 'active',\n 'dpm-enabled': True,\n 'is-ensemble-member': False,\n 'iml-mode': 'dpm',\n 'available-features-list': [\n dict(name='dpm-storage-management', state=True),\n ],\n })\n assert self.faked_cpc.uri == CPC_URI\n self.cpc = self.client.cpcs.find(name='fake-cpc1-name')\n self.faked_console = self.session.hmc.consoles.add({\n # object-id is set up automatically\n # object-uri is set up automatically\n # parent will be automatically set\n # class will be automatically set\n 'name': 'fake-console-name',\n 'description': 'The HMC',\n })\n self.console = self.client.consoles.console", "def test_setup_platform(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.microsoftface_demo_camera\")", "def setup():\n global RBD_POOL, RBD_STORAGE_CLASS, RBD_SECRET, CEPHFS_OBJ, \\\n CEPHFS_STORAGE_CLASS, CEPHFS_SECRET, RBD_PVC, CEPHFS_PVC\n log.info(\"Creating RBD Pool\")\n RBD_POOL = helpers.create_ceph_block_pool()\n\n log.info(\"Creating RBD Secret\")\n RBD_SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL)\n\n log.info(\"Creating RBD StorageClass\")\n RBD_STORAGE_CLASS = helpers.create_storage_class(\n constants.CEPHBLOCKPOOL, RBD_POOL.name, RBD_SECRET.name\n )\n\n log.info(\"Creating CephFilesystem\")\n CEPHFS_OBJ = helpers.create_cephfilesystem()\n\n log.info(\"Creating FS Secret\")\n CEPHFS_SECRET = helpers.create_secret(constants.CEPHFILESYSTEM)\n\n log.info(\"Creating FS StorageClass\")\n CEPHFS_STORAGE_CLASS = helpers.create_storage_class(\n constants.CEPHFILESYSTEM, helpers.get_cephfs_data_pool_name(),\n CEPHFS_SECRET.name\n )\n\n log.info(\"Creating RBC PVC\")\n RBD_PVC = helpers.create_pvc(sc_name=RBD_STORAGE_CLASS.name)\n\n log.info(\"Creating CephFs PVC\")\n CEPHFS_PVC = helpers.create_pvc(sc_name=CEPHFS_STORAGE_CLASS.name)", "def setup(self):\n self.build_serverkeyhash()\n self.build_agent_pubkey()\n self.load_registration_key()", "def setup(cls):\n super(TestNonVendorProject, cls).setup()\n cls.change_directory(Path(\"..\"))\n cls.agent_name = \"generic_buyer\"\n cls.run_cli_command(\n \"fetch\", \"fetchai/generic_buyer:0.30.5\", \"--alias\", cls.agent_name\n )\n cls.agents.add(cls.agent_name)\n cls.set_agent_context(cls.agent_name)", "def setup(hass: HomeAssistant, base_config: ConfigType) -> bool: # noqa: C901\n\n hass.data[DOMAIN] = {}\n\n # Parse configuration into a dict of device name to physical address\n # represented as a list of four elements.\n device_aliases = {}\n devices = base_config[DOMAIN].get(CONF_DEVICES, {})\n _LOGGER.debug(\"Parsing config %s\", devices)\n device_aliases.update(parse_mapping(devices))\n _LOGGER.debug(\"Parsed devices: %s\", device_aliases)\n\n platform = base_config[DOMAIN].get(CONF_PLATFORM, SWITCH)\n\n loop = (\n # Create own thread if more than 1 CPU\n hass.loop\n if multiprocessing.cpu_count() < 2\n else None\n )\n host = base_config[DOMAIN].get(CONF_HOST)\n display_name = base_config[DOMAIN].get(CONF_DISPLAY_NAME, DEFAULT_DISPLAY_NAME)\n if host:\n adapter = TcpAdapter(host, name=display_name, activate_source=False)\n else:\n adapter = CecAdapter(name=display_name[:12], activate_source=False)\n hdmi_network = HDMINetwork(adapter, loop=loop)\n\n def _adapter_watchdog(now=None):\n _LOGGER.debug(\"Reached _adapter_watchdog\")\n event.call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n if not adapter.initialized:\n _LOGGER.info(\"Adapter not initialized; Trying to restart\")\n hass.bus.fire(EVENT_HDMI_CEC_UNAVAILABLE)\n adapter.init()\n\n _adapter_watchdog_job = HassJob(_adapter_watchdog, cancel_on_shutdown=True)\n\n @callback\n def _async_initialized_callback(*_: Any):\n \"\"\"Add watchdog on initialization.\"\"\"\n return event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n\n hdmi_network.set_initialized_callback(_async_initialized_callback)\n\n def _volume(call: ServiceCall) -> None:\n \"\"\"Increase/decrease volume and mute/unmute system.\"\"\"\n mute_key_mapping = {\n ATTR_TOGGLE: KEY_MUTE_TOGGLE,\n ATTR_ON: KEY_MUTE_ON,\n ATTR_OFF: KEY_MUTE_OFF,\n }\n for cmd, att in call.data.items():\n if cmd == CMD_UP:\n _process_volume(KEY_VOLUME_UP, att)\n elif cmd == CMD_DOWN:\n _process_volume(KEY_VOLUME_DOWN, att)\n elif cmd == CMD_MUTE:\n hdmi_network.send_command(\n KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)\n )\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n _LOGGER.info(\"Audio muted\")\n else:\n _LOGGER.warning(\"Unknown command %s\", cmd)\n\n def _process_volume(cmd, att):\n if isinstance(att, (str,)):\n att = att.strip()\n if att == CMD_PRESS:\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n elif att == CMD_RELEASE:\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n else:\n att = 1 if att == \"\" else int(att)\n for _ in range(0, att):\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n\n def _tx(call: ServiceCall) -> None:\n \"\"\"Send CEC command.\"\"\"\n data = call.data\n if ATTR_RAW in data:\n command = CecCommand(data[ATTR_RAW])\n else:\n src = data.get(ATTR_SRC, ADDR_UNREGISTERED)\n dst = data.get(ATTR_DST, ADDR_BROADCAST)\n if ATTR_CMD in data:\n cmd = data[ATTR_CMD]\n else:\n _LOGGER.error(\"Attribute 'cmd' is missing\")\n return\n if ATTR_ATT in data:\n if isinstance(data[ATTR_ATT], (list,)):\n att = data[ATTR_ATT]\n else:\n att = reduce(lambda x, y: f\"{x}:{y:x}\", data[ATTR_ATT])\n else:\n att = \"\"\n command = CecCommand(cmd, dst, src, att)\n hdmi_network.send_command(command)\n\n def _standby(call: ServiceCall) -> None:\n hdmi_network.standby()\n\n def _power_on(call: ServiceCall) -> None:\n hdmi_network.power_on()\n\n def _select_device(call: ServiceCall) -> None:\n \"\"\"Select the active device.\"\"\"\n if not (addr := call.data[ATTR_DEVICE]):\n _LOGGER.error(\"Device not found: %s\", call.data[ATTR_DEVICE])\n return\n if addr in device_aliases:\n addr = device_aliases[addr]\n else:\n entity = hass.states.get(addr)\n _LOGGER.debug(\"Selecting entity %s\", entity)\n if entity is not None:\n addr = entity.attributes[\"physical_address\"]\n _LOGGER.debug(\"Address acquired: %s\", addr)\n if addr is None:\n _LOGGER.error(\n \"Device %s has not physical address\", call.data[ATTR_DEVICE]\n )\n return\n if not isinstance(addr, (PhysicalAddress,)):\n addr = PhysicalAddress(addr)\n hdmi_network.active_source(addr)\n _LOGGER.info(\"Selected %s (%s)\", call.data[ATTR_DEVICE], addr)\n\n def _update(call: ServiceCall) -> None:\n \"\"\"Update if device update is needed.\n\n Called by service, requests CEC network to update data.\n \"\"\"\n hdmi_network.scan()\n\n def _new_device(device):\n \"\"\"Handle new devices which are detected by HDMI network.\"\"\"\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )\n\n def _shutdown(call):\n hdmi_network.stop()\n\n def _start_cec(callback_event):\n \"\"\"Register services and start HDMI network to watch for devices.\"\"\"\n hass.services.register(\n DOMAIN, SERVICE_SEND_COMMAND, _tx, SERVICE_SEND_COMMAND_SCHEMA\n )\n hass.services.register(\n DOMAIN, SERVICE_VOLUME, _volume, schema=SERVICE_VOLUME_SCHEMA\n )\n hass.services.register(\n DOMAIN,\n SERVICE_UPDATE_DEVICES,\n _update,\n schema=SERVICE_UPDATE_DEVICES_SCHEMA,\n )\n hass.services.register(DOMAIN, SERVICE_POWER_ON, _power_on)\n hass.services.register(DOMAIN, SERVICE_STANDBY, _standby)\n hass.services.register(DOMAIN, SERVICE_SELECT_DEVICE, _select_device)\n\n hdmi_network.set_new_device_callback(_new_device)\n hdmi_network.start()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_cec)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)\n return True", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n lights = []\n for channel, device_config in config[CONF_DEVICES].items():\n device = {}\n device[\"name\"] = device_config[CONF_NAME]\n device[\"dimmable\"] = device_config[\"dimmable\"]\n device[\"channel\"] = channel\n device[\"driver\"] = config[CONF_DRIVER]\n device[\"host\"] = config[CONF_HOST]\n device[\"port\"] = config[CONF_PORT]\n lights.append(FutureNowLight(device))\n\n add_entities(lights, True)", "def setPlatform(self):\n\t\treturn None" ]
[ "0.68824685", "0.64926875", "0.64503926", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.6356767", "0.62709063", "0.62343746", "0.62263495", "0.6225092", "0.6217122", "0.6190478", "0.6184887", "0.61841863", "0.61841863", "0.61841863", "0.6135502", "0.6131949", "0.6123077", "0.6119434", "0.61146224", "0.61116546", "0.6110563", "0.6103895", "0.60897034", "0.60215175", "0.60133773", "0.59769", "0.5972516", "0.597229", "0.597229", "0.597229", "0.597229", "0.59690464", "0.5953909", "0.5941452", "0.59341186", "0.59341186", "0.59341186", "0.59341186", "0.59341186", "0.5933806", "0.5916426", "0.59076524", "0.59043753", "0.5877403", "0.586857", "0.5868102", "0.5868102", "0.5868102", "0.5868102", "0.58609456", "0.5848876", "0.5842302", "0.58399385", "0.58367044", "0.58188635", "0.58133054", "0.58051044", "0.57697713", "0.5768033", "0.574555", "0.5744446", "0.5744446", "0.5742694", "0.5738907", "0.573867", "0.573867", "0.5725567", "0.57093215", "0.5708395", "0.57014406", "0.5690731", "0.56905484", "0.5690395", "0.5686235", "0.56770635", "0.5675494", "0.5672222", "0.56531334", "0.5644837", "0.5641663", "0.563687", "0.56280965", "0.5614021", "0.56108767", "0.5609696", "0.5605822", "0.560346", "0.55973506", "0.5596772" ]
0.6602621
1
Initialize the Clementine device.
def __init__(self, client, name): self._client = client self._attr_name = name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doInitializeDevice(self):\n super().doInitializeDevice()", "def initialise(self):\n self.device.initialise()\n return \"OK\"", "def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()", "def initialize(self, cwrap):\n pass", "def _initialize(self):\n self.send_init_command()", "async def init_device(self):\n await Device.init_device(self)\n # PROTECTED REGION ID(AsyncTabata.init_device) ENABLED START #\n self.logger = logging.getLogger(__name__)\n self._lock = threading.Lock()\n self._dev_factory = DevFactory()\n self._prepare = 10\n self._work = 20\n self._rest = 10\n self._cycles = 8\n self._tabatas = 1\n self._running_state = RunningState.PREPARE\n self.subscribed = False\n self.set_state(DevState.OFF)\n # The below commented commands are not really needed\n # since in GreenMode.Asyncio mode the monitor\n # lock is disabled by default.\n # util = tango.Util.instance()\n # util.set_serial_model(tango.SerialModel.NO_SYNC)\n # PROTECTED REGION END # // AsyncTabata.init_device", "async def init(self):\n logger.info(\"Init device: %s\", self._serial)\n self._callback(STATUS_INIT)\n\n self._init_binaries()\n self._init_apks()\n await self._init_forwards()\n\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --stop\")\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --nouia -d\")", "def initialize():\n dislin.disini()", "def __init__(self, device):\n self.device = device\n self.device.get_active_configuration()", "def Initialise(self):\n self.__m_Platform.Initialise()\n self.__m_Pump.Initialise( False )", "def teleopInit(self):\n # self.drive.setSafetyEnabled(True)\n self.compressor.start()\n pass", "def initialize(self):\n self.initilize_multiply_array() # m\n self.initialize_cameras()\n self.initialize_electronics()\n self.logger.info('Starting free runs and continuous reads')\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()\n self.camera_fiber.start_free_run()\n self.camera_fiber.continuous_reads()\n self.servo_off()\n\n time.sleep(1) #m Without the sleep below initialize_multiply_array does not work", "def _initialize(self):\n self.flush()\n print(\"Initializing sensor...\")\n try:\n self.get_sample()\n print('Initialization successful')\n except:\n print('Initialization failed. Please disconnect and reconnect sensor.')", "def __init__(self, device):\n logging.info(\"Cert: Init\")\n self._device = device\n self._device.wait_channel_ready()\n self._hci = PyHci(device)\n self._hci.register_for_events(\n hci_packets.EventCode.ENCRYPTION_CHANGE, hci_packets.EventCode.CHANGE_CONNECTION_LINK_KEY_COMPLETE,\n hci_packets.EventCode.CENTRAL_LINK_KEY_COMPLETE, hci_packets.EventCode.RETURN_LINK_KEYS,\n hci_packets.EventCode.PIN_CODE_REQUEST, hci_packets.EventCode.LINK_KEY_REQUEST,\n hci_packets.EventCode.LINK_KEY_NOTIFICATION, hci_packets.EventCode.ENCRYPTION_KEY_REFRESH_COMPLETE,\n hci_packets.EventCode.IO_CAPABILITY_REQUEST, hci_packets.EventCode.IO_CAPABILITY_RESPONSE,\n hci_packets.EventCode.REMOTE_OOB_DATA_REQUEST, hci_packets.EventCode.SIMPLE_PAIRING_COMPLETE,\n hci_packets.EventCode.USER_PASSKEY_NOTIFICATION, hci_packets.EventCode.KEYPRESS_NOTIFICATION,\n hci_packets.EventCode.USER_CONFIRMATION_REQUEST, hci_packets.EventCode.USER_PASSKEY_REQUEST,\n hci_packets.EventCode.REMOTE_HOST_SUPPORTED_FEATURES_NOTIFICATION)\n self._hci_event_stream = self._hci.get_event_stream()", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def __init__(self):\n super(UpnpEmbeddedDevice, self).__init__()\n return", "def __init__(self):\n self.data0 = [] # This will hold data from ADC0\n self.data1 = [] # This will hold data from ADC1\n self.dev = _configure_device()", "def initialize(self):\n self.ros.enable()\n self.phone_link.enable()", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.sensor_manager = SensorManager.instance()\n\n self.pwm = Adafruit_PCA9685.PCA9685(address=0x40, busnum=1) # create PCA9685-object at I2C-port\n self.pwm.set_pwm_freq(50)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(20, GPIO.OUT)\n GPIO.setup(21, GPIO.OUT)\n GPIO.setup(26, GPIO.OUT)\n self.driven_distance = 0", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def initialize_electronics(self):\n\n self.electronics = ArduinoModel(**self.config['electronics']['arduino'])\n self.logger.info('Initializing electronics arduino')\n self.electronics.initialize()", "def __init__(self):\n self.cad = pifacecad.PiFaceCAD()\n self.listener = pifacecad.SwitchEventListener(chip=self.cad)\n for i in range(8):\n self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)\n self.listener.activate()\n atexit.register(self.atexit)", "def __init__(self, dev):\n self.dev = dev\n self.dev.cla = 0x80", "def init():\n rino.initialize.initialize()", "def initialize():\n environment = Environment()\n environment.setup()", "def __init__(self) -> None:\n self.sensor = serial.Serial(config.DEVICE)\n super().__init__()", "def setUp(self):\n self.ser = Serial()\n self.device_obj = ZBSensor(self.ser)", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def _initialize(self):\r\n print(\"Set the CP mode to EVSE\")\r\n self.whitebeet.controlPilotSetMode(1)\r\n print(\"Set the CP duty cycle to 100%\")\r\n self.whitebeet.controlPilotSetDutyCycle(100)\r\n print(\"Start the CP service\")\r\n self.whitebeet.controlPilotStart()\r\n print(\"Start SLAC in EVSE mode\")\r\n self.whitebeet.slacStart(1)\r\n time.sleep(2)", "def initialize(self):\n self.logger.debug('Initializing Basler Camera')\n tl_factory = pylon.TlFactory.GetInstance()\n devices = tl_factory.EnumerateDevices()\n if len(devices) == 0:\n #print('No camera found')\n self.logger.warning('No camera found')\n\n self._driver = None\n for device in devices:\n if self.cam_num in device.GetFriendlyName():\n self._driver = pylon.InstantCamera()\n self._driver.Attach(tl_factory.CreateDevice(device))\n self._driver.Open()\n self.friendly_name = device.GetFriendlyName()\n print(device.GetFriendlyName())\n\n if not self._driver:\n msg = f'Basler {self.cam_num} not found. Please check if the camera is connected'\n self.logger.error(msg)\n return\n\n # self.logger.info(f'Loaded camera {self._driver.GetDeviceInfo().GetModelName()}')\n\n # self._driver.RegisterConfiguration(pylon.SoftwareTriggerConfiguration(), pylon.RegistrationMode_ReplaceAll,\n # pylon.Cleanup_Delete)\n\n #self.config.fetch_all()", "def initialise(self):\n self.set_up()", "def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()", "def __init__(self):\n self._device_info = None", "def __init__(self):\n super().__init__()\n\n # Gadget state\n \n self.isDoorOpen = False\n self.verified = True\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n \n self.ir_sensor = InfraredSensor()\n self.ir_sensor.mode = self.ir_sensor.MODE_IR_REMOTE\n self.color_sensor = ColorSensor()\n self.color_sensor.mode = 'COL-COLOR' # WHITE\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()", "def InitEnvironment(self):\r\n\t\t\r\n\t\t# Turn antialiasing on\r\n\t\trender.setAntialias(AntialiasAttrib.MMultisample,1)\r\n\t\t\r\n\t\t# load the falcon model\r\n\t\tfalcon = loader.loadModel(\"Content/falcon/falcon.bam\")\r\n\t\tfalcon.setScale(30)\r\n\t\tfalcon.setPos(0, 0, 28.5)\r\n\t\tfalcon.reparentTo(render)", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def init_ca(self):\n self._init_dir()\n self._init_serial()\n self._init_keys()", "def __init__(self, uid):\n Device.__init__(self, uid)\n\n self.expected_name = 'Analog Out Bricklet'\n\n self.binding_version = [1, 0, 0]", "def init(self):\n\n pygame.init()\n pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"Gears 4 Geeks\")\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.ser = serial.Serial('COM4', 9600)\n\n #ADAFRUIT_IO_KEY = 'd1a1bd3737714fa488e0364c775a4b4d' ##This will only be good until the end of the competition\n #self.aio = Client(ADAFRUIT_IO_KEY)", "def __init__(self):\n self.device_id = None\n self.devices = []\n self.onvif_config = {}", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def Init(self, config):\r\n pass", "def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()", "def initialize(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def initialize():\n\t\tDBHelper.con = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')", "def initialize(self) -> None:\n pass", "def teleopInit(self):\n self.globalInit()\n self.teleop.start()", "def __init__(self):\n nvmlInit()\n n_devices = nvmlDeviceGetCount()\n devices_handlers_list = [nvmlDeviceGetHandleByIndex(i) for i in range(n_devices)]\n\n self.devices = {\n '{}-{}'.format(NvmlHandler.exec_nvml_function(nvmlDeviceGetName, device).decode('ascii'), i): device\n for i, device in enumerate(devices_handlers_list)\n }", "def __init__(self):\r\n # Check device ID.\r\n chip_id = self._read_byte(_BME280_REGISTER_CHIPID)\r\n if _BME280_CHIPID != chip_id:\r\n raise RuntimeError('Failed to find BME280! Chip ID 0x%x' % chip_id)\r\n self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)\r\n time.sleep(0.5)\r\n self._read_coefficients()\r\n self.sea_level_pressure = 1013.25\r\n \"\"\"Pressure in hectoPascals at sea level. Used to calibrate `altitude`.\"\"\"\r\n # turn on humidity oversample 16x\r\n self._write_register_byte(_BME280_REGISTER_CTRL_HUM, 0x03)\r\n self._t_fine = None", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def initialize(self):\n self.lib.Initialize()\n\n self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,\n 'External Exposure': 7, 'External FVB EM': 9,\n 'Software Trigger': 10,\n 'External Charge Shifting': 12}\n self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}\n\n # Initial values\n\n self.readout_packing_state = False\n self.readout_packing = self.readout_packing_state\n\n self.readout_mode_mode = 'Image'\n self.readout_mode = self.readout_mode_mode\n\n self.photon_counting_mode_state = False\n self.photon_counting_mode = self.photon_counting_mode_state\n\n self.frame_transfer_mode_state = False\n self.frame_transfer_mode = self.frame_transfer_mode_state\n\n self.fan_mode_index = 'onfull'\n self.fan_mode = self.fan_mode_index\n\n self.EM_gain_mode_index = 'RealGain'\n self.EM_gain_mode = self.EM_gain_mode_index\n\n self.cooled_on_shutdown_value = False\n self.cooled_on_shutdown = self.cooled_on_shutdown_value\n\n self.baseline_offset_value = 100\n self.baseline_offset = self.baseline_offset_value\n\n self.adv_trigger_mode_state = True\n self.adv_trigger_mode = self.adv_trigger_mode_state\n\n self.acq_mode = 'Single Scan'\n self.acquisition_mode = self.acq_mode\n\n self.amp_typ = 0\n\n self.horiz_shift_speed_index = 0\n self.horiz_shift_speed = self.horiz_shift_speed_index\n\n self.vert_shift_speed_index = 0\n self.vert_shift_speed = self.vert_shift_speed_index\n\n self.preamp_index = 0\n self.preamp = self.preamp_index\n\n self.temperature_sp = 0 * degC\n self.temperature_setpoint = self.temperature_sp\n\n self.auxout = np.zeros(4, dtype=bool)\n for i in np.arange(1, 5):\n self.out_aux_port[i] = False\n\n self.trigger_mode_index = 'Internal'\n self.trigger_mode = self.trigger_mode_index", "def initialize(self):\n\t\tpass", "def __init__(self):\n i2c.Pn532_i2c.__init__(self)\n self._uid = False", "def init(self):\n # Initialize runtime and MDK:\n self.runtime = fakeRuntime()\n self.runtime.getEnvVarsService().set(\"DATAWIRE_TOKEN\", \"somevalue\")\n self.runtime.dependencies.registerService(\"failurepolicy_factory\",\n RecordingFailurePolicyFactory())\n self.mdk = MDKImpl(self.runtime)\n self.mdk.start()\n self.disco = self.mdk._disco\n # Create a session:\n self.session = self.mdk.session()", "def __init__(self, device):\n self._unique_id = device\n self._device = AehW4a1(device)\n self._fan_modes = FAN_MODES\n self._swing_modes = SWING_MODES\n self._preset_modes = PRESET_MODES\n self._attr_available = False\n self._on = None\n self._current_temperature = None\n self._target_temperature = None\n self._attr_hvac_mode = None\n self._fan_mode = None\n self._swing_mode = None\n self._preset_mode = None\n self._previous_state = None", "def doInitializeDevice(self):\n try:\n\n if self.serialNumber == \"*\" or self.serialNumber == \".*\":\n self.device = OISpectrometer.matchUniqueUSBDevice( idProduct=self.idProduct)\n else:\n self.device = OISpectrometer.matchUniqueUSBDevice( idProduct=self.idProduct,\n serialNumber=self.serialNumber)\n\n \"\"\" Below are all the USB protocol details. This requires reading\n the USB documentation, the Spectrometer documentation and many other \n details. What follows may sound like gibberish.\n\n There is a single USB Configuration (default) with a single USB Interface \n without alternate settings, so we can use (0,0).\n \"\"\"\n self.device.set_configuration()\n self.configuration = self.device.get_active_configuration()\n self.interface = self.configuration[(0,0)]\n\n \"\"\"\n We are working on the reasonable assumption from the documentation\n that the first input and output endpoints are the main endpoints and the\n second input is the data endpoint. If that is not the case, the subclass can\n simply reassign the endpoints properly in its __init__ function. \n \"\"\"\n for endpoint in self.interface:\n \"\"\" The endpoint address has the 8th bit set to 1 when it is an input.\n We can check with the bitwise operator & (and) 0x80. It will be zero\n if an output and non-zero if an input. \"\"\"\n if endpoint.bEndpointAddress & 0x80 != 0:\n self.inputEndpoints.append(endpoint)\n else:\n self.outputEndpoints.append(endpoint)\n\n\n if len(self.inputEndpoints) >= 2 or len(self.outputEndpoints) > 0:\n \"\"\" We have at least 2 input endpoints and 1 output. We assign the\n endpoints according to the documentation, otherwise\n the subclass will need to assign them.\"\"\"\n self.epCommandOut = self.outputEndpoints[self.epCommandOutIdx]\n self.epMainIn = self.inputEndpoints[self.epMainInIdx]\n self.epSecondaryIn = self.inputEndpoints[self.epSecondaryInIdx]\n self.epParameters = self.inputEndpoints[self.epParametersIdx]\n self.epStatus = self.inputEndpoints[self.epStatusIdx]\n\n self.flushEndpoints()\n self.sendCommand(b'0x01')\n time.sleep(0.1)\n self.getCalibration()\n except Exception as err:\n raise UnableToInitialize(\"Error when initializing device: {0}\".format(err))", "def init():", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def initialize_element(self):\n init_command = {\n \"StartLearning\": True,\n \"AgentID\": 1854\n }\n msg = json.dumps(init_command).encode('unicode_escape')\n self.socket_control.send(msg)", "def __init__(self):\r\n self.alarm_device = alarm.alarm()\r\n self.camera_device = camera.camera()", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)", "def __init__(self, config):\n self.__config = config\n self.__logger = SLoggerHandler().getLogger(LoggerNames.EXPERIMENT_C)\n self.__num_gpus = ConfigProvider().get_config(\"controllerConfig.json\")[\"hardware\"][\"numGPUs\"]", "def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0", "def initialize(self):\n pass # pragma: no cover", "def __init__(self, parent, endpoint):\n self._Parent = parent\n self.endpoint = endpoint\n self.device_type = self._Parent._DeviceTypes.get('wemo_switch')\n self.device_commands = parent._Devices.device_commands\n self.yombo_device = None\n self.state = self.endpoint.get_state()\n self.commands = {}\n self.last_request_id = None\n self.device_mfg = \"wemo\"\n self.FEATURES: dict = {}", "def init():\n pass", "def initialize(self):\n\t\tpcd8544.LCD.initialize(self)\n\t\tRPIO.setup(self._backlight_pin, RPIO.OUT, initial=RPIO.LOW)", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def _setup_device(self):\n if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_ACTIVATE_CRYPTO1, True) < 0:\n raise Exception(\"Error setting Crypto1 enabled\")\n if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_INFINITE_SELECT, False) < 0:\n raise Exception(\"Error setting Single Select option\")\n if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_AUTO_ISO14443_4, False) < 0:\n raise Exception(\"Error setting No Auto ISO14443-A jiggery pokery\")\n if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_HANDLE_PARITY, True) < 0:\n raise Exception(\"Error setting Easy Framing property\")", "def initialize(self):\n watch_tv = self.args['watch_tv']\n cleaning = self.args['cleaning']\n self.sensor_living = self.get_app('globals').sensor_living # type: Sensor\n self.sensor_bedroom = self.get_app('globals').sensor_bedroom # type: Sensor\n self.sensor_spare = self.get_app('globals').sensor_spare # type: Sensor\n self.listen_state(self.watching_tv, watch_tv, new=\"on\")\n self.listen_state(self.stop_watching, watch_tv, new=\"off\")\n self.listen_state(self.clean_on, cleaning, new='on')\n self.listen_state(self.clean_off, cleaning, new='off')", "def _initialize(self):\n if \"identification\" not in self._configuration:\n self._configuration[\"identification\"] = {}\n if \"uuid\" not in self._configuration[\"identification\"]:\n self._configuration[\"identification\"][\"uuid\"] = str(uuid.uuid1(1))\n self._configuration[\"identification\"][\"starttime\"] = time.time()\n self._configuration[\"identification\"][\"pid\"] = os.getpid()\n self._configuration[\"identification\"][\"node\"] = platform.node()\n if self._config_file:\n with open(self._config_file, \"w\") as json_file:\n json_file.write(json.dumps(self._configuration, indent=4, sort_keys=True))\n coordinators = []\n if \"coordination\" in self._configuration:\n if isinstance(self._configuration[\"coordination\"], dict):\n coordinators = [self._configuration[\"coordination\"]]\n elif isinstance(self._configuration[\"coordination\"], list):\n coordinators = self._configuration[\"coordination\"]\n\n #TODO (): handle multiplicity of coordinators or remove completely\n for coordinator in coordinators:\n self.logger.debug(coordinator)\n self.logger.info(\"Loading %s\", coordinator[\"classname\"])\n obj = self._get_class_by_name(coordinator[\"classname\"])\n self._coordination = obj(coordinator, on_node_update=self._handle_coordination_message)\n\n self.register()\n self._coordination.setup_watches()", "def __init__(self):\n super().__init__()\n # A Sunstone was seen to take 23 minutes or longer to come up\n # on a memory-overbooked server presumably thrashing to disk,\n # set this threshold to account for a busy server but don't\n # set so high as to accept a machine that is obviously running\n # too slowly to allow useful testing.\n self.INITIAL_LAUNCH_WAIT_SEC = 1200\n self.POST_PROMPT_WAIT_SEC = 110\n self.CONNECTION_TIMEOUT = 480\n self.EXPECT_TIMEOUT = 60\n self.INITIAL_LAUNCH_DISCOVERY_WAIT_SEC = 2\n\n # Wait this much time between telnetting to the device and\n # hitting <Enter>. Lack of a delay was causing connection timeouts\n # when running on a LaaS server in vCenter Esxi mode.\n #\n self.ESCAPE_CHAR_CALLBACK_PRE_SENDLINE_PAUSE_SEC = 1", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def initialize(self):\n if not self._ready:\n self._real_initialize()\n self._ready = True", "def _initialize_hardware(self):\n # Import\n try:\n import board\n import busio\n import adafruit_vl6180x\n except Exception as ex:\n logging.error(\n '\\n *** ERROR importing Adafruit libraries: {}'.format(\n ex,\n ),\n )\n\n # Things failed, so we must be running locally, not on a widget;\n # don't bother hooking up the VL6180X\n return\n\n # Initialize I2C and VL6180X\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n self._sensor = adafruit_vl6180x.VL6180X(i2c)\n except Exception as ex:\n logging.error(\n '\\n *** ERROR initializing I2C/LSM303: {}'.format(ex),\n )\n\n self._initialize_id_led()", "def __init__(self, serialNumber=None, connect=True):\n # TODO: Make A class for each channel\n # that way the settings will make more sense\n\n # These do not correspond to API values, but rather to\n # the \"true\" voltage as seen at the oscilloscope probe\n self.CHRange = [5.0] * self.NUM_CHANNELS\n self.CHOffset = [0.0] * self.NUM_CHANNELS\n self.CHCoupling = [1] * self.NUM_CHANNELS\n self.ProbeAttenuation = [1.0] * self.NUM_CHANNELS\n\n self.handle = None\n\n if connect is True:\n self.open(serialNumber)", "def initialise(self):", "def init(self) -> None:\n ...", "def initialize(self, *args, **kwargs):\n self.initialized = True", "def __init__(self, name='demo'):\n init()\n joystick.init()\n for i in range(joystick.get_count()):\n joystick.Joystick(i).init()\n\n State.game = util.load_cfg(name)\n State.clock = Clock(10, State.game['frame_rate'])\n State.window = display.set_mode(State.game['screen_size'])\n\n self._last_joystick_action = None\n self.create_screens()", "def device_connect(self):\n pass", "def __init__(self):\n self.pc = 0\n self.reg = [0] * 8\n self.ram = [0] * 256\n\n self.running = False", "def do_init(self):\n\n pass", "def initialize(self, butler):\n self.getRawMd(butler)\n self.getCalexp(butler)\n self.getPfsArm(butler)", "def __init__(self):\n self._read_calibration_data()\n self.set_oversamplings_and_mode(\n HumidityOversampling.x08,\n TemperatureOversampling.x08,\n PressureOversampling.x16,\n SensorMode.Normal)\n self.set_config(\n InactiveDuration.ms1000,\n FilterCoefficient.fc04)" ]
[ "0.65377396", "0.65024364", "0.638117", "0.63599783", "0.63138694", "0.62079704", "0.61737376", "0.61604065", "0.61414874", "0.6086899", "0.6078774", "0.607841", "0.6074798", "0.6074586", "0.60706323", "0.6059677", "0.605526", "0.6050378", "0.6042103", "0.60385203", "0.6037933", "0.6016432", "0.5997463", "0.5995013", "0.5994989", "0.59744155", "0.59671897", "0.59651476", "0.59643614", "0.5925359", "0.59044164", "0.58884734", "0.5874705", "0.58675075", "0.58668846", "0.5865704", "0.5865704", "0.5865704", "0.58553874", "0.58382064", "0.58272755", "0.58223045", "0.58190477", "0.58190477", "0.58190477", "0.58190477", "0.58190477", "0.5818156", "0.58168113", "0.58141834", "0.58072793", "0.5784087", "0.5779609", "0.5778427", "0.5778063", "0.5774093", "0.57422864", "0.57422864", "0.57403964", "0.57383376", "0.5737367", "0.57276666", "0.57104653", "0.57068884", "0.5706028", "0.57018584", "0.5701812", "0.5700097", "0.56968254", "0.56913173", "0.5685166", "0.5677001", "0.5674308", "0.5666612", "0.56598014", "0.5652815", "0.56527996", "0.5650118", "0.5644083", "0.5640586", "0.5640162", "0.5640162", "0.5640162", "0.5640162", "0.5640162", "0.5640162", "0.5640162", "0.5640162", "0.5631018", "0.56300193", "0.56084245", "0.56070536", "0.5593461", "0.55872005", "0.5584969", "0.55782163", "0.55769145", "0.5572812", "0.55704814", "0.55702156", "0.55606997" ]
0.0
-1
Retrieve the latest data from the Clementine Player.
def update(self) -> None: try: client = self._client if client.state == "Playing": self._attr_state = MediaPlayerState.PLAYING elif client.state == "Paused": self._attr_state = MediaPlayerState.PAUSED elif client.state == "Disconnected": self._attr_state = MediaPlayerState.OFF else: self._attr_state = MediaPlayerState.PAUSED if client.last_update and (time.time() - client.last_update > 40): self._attr_state = MediaPlayerState.OFF volume = float(client.volume) if client.volume else 0.0 self._attr_volume_level = volume / 100.0 if client.active_playlist_id in client.playlists: self._attr_source = client.playlists[client.active_playlist_id]["name"] else: self._attr_source = "Unknown" self._attr_source_list = [s["name"] for s in client.playlists.values()] if client.current_track: self._attr_media_title = client.current_track["title"] self._attr_media_artist = client.current_track["track_artist"] self._attr_media_album_name = client.current_track["track_album"] self._attr_media_image_hash = client.current_track["track_id"] else: self._attr_media_image_hash = None except Exception: self._attr_state = MediaPlayerState.OFF raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def player_data(self):\n return self._player", "def latest_data(self):\n if self._data:\n return self._data\n return None", "def NowPlaying(self):\n self.logger.debug(\"Fetching currently playing information\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n player = xbmc.Player.GetActivePlayers()[0]\n playerid = player['playerid']\n\n if player['type'] == 'video':\n playerprop = ['speed', 'position', 'time', 'totaltime',\n 'percentage', 'subtitleenabled', 'currentsubtitle',\n 'subtitles', 'currentaudiostream', 'audiostreams']\n itemprop = ['thumbnail', 'showtitle', 'season', 'episode', 'year', 'fanart']\n\n elif player['type'] == 'audio':\n playerprop = ['speed', 'position', 'time', 'totaltime', 'percentage']\n itemprop = ['thumbnail', 'title', 'artist', 'album', 'year', 'fanart']\n\n app = xbmc.Application.GetProperties(properties=['muted', 'volume'])\n player = xbmc.Player.GetProperties(playerid=playerid, properties=playerprop)\n item = xbmc.Player.GetItem(playerid=playerid, properties=itemprop)\n\n return {'playerInfo': player, 'itemInfo': item, 'app': app}\n except:\n self.logger.debug(\"Unable to fetch currently playing information!\")\n return", "def fetch_data(self):\n\n data_dict = {\n 'price': self.get_current_price(),\n }\n\n return self.save_data(data_dict)", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def latest_data(self):\n if self._data:\n return self._data[0]\n return None", "def get(self, player_name):\n player = self._get_player(player_name)\n return player['data'''] if player else None", "def data(self):\n self._get_latest_content()\n return self._data.get('data', {})", "def getLatestData(self):\n jsonText = self.session.get(self.jsonURL).text\n\n # Somehow, the output I am getting has some garbage at the beginning.\n # So, skipping all text before first instance of \"{\".\n jsonText = jsonText[jsonText.find(\"{\"):]\n latestData = json.loads(jsonText)\n return latestData", "def getnewdata():\n try:\n os.remove(cachepath)\n except os.error:\n pass\n tdelta = int(EPGHOURS)*60*60\n now = time.time()\n later = now + tdelta\n # 2020-03-24%2021%3A00%3A00.000%2B0000\n starttime = urllib.parse.quote(datetime.fromtimestamp(now).\n strftime('%Y-%m-%d %H:00:00.000+0000'))\n # 2020-03-25%2005%3A00%3A00.000%2B0000\n stoptime = urllib.parse.quote(datetime.fromtimestamp(later).\n strftime('%Y-%m-%d %H:00:00.000+0000'))\n url = \"http://api.pluto.tv/v2/channels?start=\" + starttime + \"&stop=\" + stoptime\n\n if debugmode:\n logging.debug(url)\n\n logging.debug(\"Using api.pluto.tv, writing %s.\", CACHEFILE)\n\n try:\n wget.download(url, out=cachepath)\n except IOError:\n logging.error(\"There was an issue downloading EPG data. Exiting.\")\n sys.exit()", "def data(self):\r\n if self.metadata is None:\r\n self.refresh()\r\n return self.metadata", "async def get_self(self) -> PlayerInfo:\n e = await self.request.request(url=f'https://users.roblox.com/v1/users/authenticated', method='get')\n a = PlayerInfo(player_id=e['id'], request=self.request)\n await a.update()\n return a", "def get(self):\n return self.get_data()", "def _get_player_info(self):\n return [player._player_info() for player in self.players.values()]", "def readPlaylistData(self):\n return gatherPlaylistData(10)", "def get(self):\r\n return self.data", "async def do_players():\n\n download = urllib.request.urlopen(server_api)\n data = json.loads(download.read())\n max = data['players']['max']\n now = data['players']['now']\n await bot.send_message(c, f'Max: {max}')\n await bot.send_message(c, f'Now: {now}')", "def main():\n print get_latest_data()", "def fetch_data(self):", "def data(self):\n return self.source_data[\"game\"]", "def state(self):\n _LOGGER.debug(\"Fetching player info\")\n parameters = {\n 'cmd': None,\n 'param3': 'state.json'\n }\n try:\n res = requests.get(url=self.url, headers=headers, params=parameters, timeout=self.timeout).json()\n except (ConnectionError, OSError) as e:\n _LOGGER.error(\"Fetching player state failed: %s\", e)\n res = None\n return res", "def get(self):\n return self.data", "def get_data(self):\n pass", "def get_data(self):\n pass", "async def fetch_data(self):\n url = URL_HASSIO_VERSION.format(self.upstream)\n try:\n _LOGGER.info(\"Fetch update data from %s\", url)\n with async_timeout.timeout(10, loop=self.loop):\n async with self.websession.get(url) as request:\n data = await request.json(content_type=None)\n\n except (aiohttp.ClientError, asyncio.TimeoutError, KeyError) as err:\n _LOGGER.warning(\"Can't fetch versions from %s -> %s\", url, err)\n return\n\n except json.JSONDecodeError as err:\n _LOGGER.warning(\"Can't parse versions from %s -> %s\", url, err)\n return\n\n # data valid?\n if not data:\n _LOGGER.warning(\"Invalid data from %s\", url)\n return\n\n # update versions\n self._data[ATTR_HOMEASSISTANT] = data.get('homeassistant')\n self._data[ATTR_HASSIO] = data.get('hassio')\n self.save()", "def _fetch_data(self):\n pass", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "def get_data(self):\n\n self.set_query_string()\n self.realtime_data = super().get_data()\n self.set_coordinate()\n return self.realtime_data", "def retrieve(self):\n pass", "def getCurrentCacheData(self):\n return self.getCacheData(int(self.currentFrameNumber - 1))", "def get_data():\n pass", "def get_player_data(self, playername=None):\r\n session = requests.session()\r\n url_comuniazo = 'http://www.comuniazo.com'\r\n user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:35.0) Gecko/20100101 Firefox/35.0'\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': url_comuniazo,\r\n \"User-Agent\": user_agent}\r\n url_jugadores = url_comuniazo + '/comunio/jugadores/'\r\n suffix, lastname = '', ''\r\n count = 0\r\n dates, points, prices = list(), list(), list()\r\n while True and len(dates) < 2:\r\n playername = self.check_exceptions(playername)\r\n req = session.get(url_jugadores + playername.replace(\" \", \"-\").replace(\".\", \"\").replace(\"'\", \"\") + suffix,\r\n headers=headers).content\r\n dates_re = re.search(\"(\\\"[0-9 ][0-9] de \\w+\\\",?,?)+\", req)\r\n try:\r\n dates = dates_re.group(0).replace('\"', '').split(\",\")\r\n dates = self.translate_dates(dates)\r\n except AttributeError:\r\n if count == 0:\r\n suffix = '-2'\r\n count += 1\r\n continue\r\n elif count == 1:\r\n lastname = playername.split(\" \")[1]\r\n playername = playername.split(\" \")[0]\r\n suffix = ''\r\n count += 1\r\n continue\r\n elif count == 2:\r\n playername = lastname\r\n count += 1\r\n continue\r\n\r\n data_re = re.search(\"data: \\[(([0-9nul]+,?)+)\\]\", req)\r\n if data_re is None:\r\n pass\r\n for price in data_re.group(1).split(','):\r\n try:\r\n prices.append(int(price))\r\n except ValueError:\r\n # No price\r\n pass\r\n\r\n try:\r\n html = BeautifulSoup(req, \"html.parser\")\r\n points_rows = html.find('table', {'class': 'points-list'}).find_all('tr')\r\n for row in points_rows:\r\n gameday = int(row.td.text)\r\n if row.div:\r\n points.append([gameday, int(row.div.text)])\r\n else:\r\n points.append([gameday, 0])\r\n except AttributeError:\r\n # Player without points\r\n pass\r\n\r\n if suffix == '-2' or len(dates) > 2:\r\n break\r\n else:\r\n suffix = '-2'\r\n\r\n return dates, prices, points", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def _get_soundcloud_data(self):\n self.title, self.stream_url = get_track_info(self.URL)", "def fetch_data():\n data.fetch_data()\n data.start_updating()", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def get_data(self):\r\n pass", "def data_wait(self):\n return self.get(timeout=self._timeout)", "def fetch(self) -> None:\n pass", "def getPlayerList(self):\n return(self.playerList)", "def fetch(self):\n pass", "def fetch(self):\n pass", "def _retrieve_plays(self):\n try:\n recents = self._spotify._get(\"me/player/recently-played\", limit=50)\n except SpotifyException as se:\n if 'The access token expired' in se.msg:\n self._renew_tokens()\n recents = self._spotify._get(\"me/player/recently-played\", limit=50)\n else:\n raise\n self._plays = recents['items']", "def get_current_volume(self):\n latest_data = ast.literal_eval(\n urlopen(\n 'https://www.predictit.org/PublicData/GetChartData?contractIds=' + str(self.cid) + '&timespan=24H').read().decode(\n 'utf-8').replace(\n 'false', 'False').replace('true', 'True').replace('null', 'None'))[-1]\n self.latest_volume = latest_data['TradeVolume']\n return", "def read_player_data(self, player_file):\n\n #read a single players file\n with open(self.player_path + player_file, 'r') as f:\n data = json.load(f)\n f.close()\n return(data)", "def latest(self):\n return self.journal_data[self.latest_id]", "def data(self):\n try:\n return self.get_nowait()\n except Empty:\n return None", "def get_all(self):\n try:\n return self.current_data\n except:\n print('No data received from sensor')", "def get_players(self):\r\n return self.players.values()", "async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output", "def get_currently_playing(self):\r\n return requests.get(\r\n f\"{API_URL}/me/player/currently-playing\",\r\n headers={\r\n \"Accept\": \"application/json\",\r\n \"Authorization\": f\"Bearer {self.access_token}\"\r\n }\r\n )", "def get_last_update(self):\n return self.ticker.all().order_by('-created').first()", "async def get_metadata(self, player: model.Player):\n return await self._get_metadata(\n player.server, player.realm, player.player_id)", "def get_data(self):", "def getCurentData(self):\n if not self.labExperiment:\n super().getCurentData()\n else:\n return np.array(self.connection.query('get_actuator_data'))", "def get_player(self):\r\n return self.player_control.get_player()", "def get_latest_ts_data(self):\n\n from .timeseriesdata import TimeSeriesData\n\n try:\n data = TimeSeriesData.objects.filter(\n sensor=self,\n ).latest(\"ts\")\n except TimeSeriesData.DoesNotExist:\n # If the device hasn't made any timeseries data yet.\n return {}\n\n return data", "def get(self):\n\t\tif not self.threaded:\n\t\t\tself.record()\n\t\timg = self.Video[-1]\n\t\ttime = self.timestamps[-1]\n\t\tif self.newAvailable:\n\t\t\tnew = True\n\t\t\tself.newAvailable = False\n\t\t\treturn new, img, time\n\t\telse:\n\t\t\tnew = False\n\t\t\treturn new, img, time", "def _get_connected_player_list(self):\r\n if not zpgapi.is_zgp_api_enabled():\r\n # API is not configured, skip this.\r\n return []\r\n\r\n cache_key = 'api_connected_players'\r\n cache_val = cache.get(cache_key)\r\n\r\n if cache_val != None:\r\n return cache_val\r\n\r\n api = zpgapi.get_zpg_api_iface()\r\n try:\r\n api_response = api.cmd_list_connected()\r\n cache_val = api_response['player_list']\r\n except urllib2.URLError:\r\n # Error with zombiepygman.\r\n # This will get cached, but that's OK. It will prevent request\r\n # pileup on the gunicorn workers.\r\n cache_val = []\r\n\r\n cache.set(cache_key, cache_val, 60)\r\n return cache_val", "def get_data():\n return", "def fetch_players_stats():\n players_scraper = PlayerStatsScraper(API_URL, API_HEADERS)\n result = players_scraper.save_objects()\n return result", "def get_data(self):\n if self.data is None:\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('SELECT data FROM sessions WHERE id = ?;',\n (self.sid,))\n data = cursor.fetchone()\n if data is not None:\n data = data[0]\n if data is not None:\n self.data = pickle.loads(base64.decodestring(data))\n return self.data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_player(self):\n return self.player", "def get_player(self):\n return self.player", "def last(self):\n data = self._http_get(\"last\")\n return data.json()", "async def get_player_history(understat_id): \n async with aiohttp.ClientSession() as session:\n understat = Understat(session)\n player_matches = await understat.get_player_matches(understat_id)\n #print(json.dumps(player_matches))\n return player_matches", "def get_latest_data():\n try:\n print '\\nRequesting new data.....\\n'\n response = get(\"https://api.myjson.com/bins/2csub\")\n if response.status_code is 200:\n print '\\nSuccess (200) in downloading data\\n'\n current_json = response.json()\n set_backup_data(current_json)\n else: \n current_json = get_backup_data()\n except ConnectionError:\n current_json = get_backup_data()\n return current_json", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "async def get_next_launches(self):\n response = {}\n\n if self._key:\n request_url = f\"{BASE_URL}?key={self._key}\"\n else:\n request_url = BASE_URL\n\n async with await self._session.get(request_url) as resp:\n response = await resp.text()\n\n if response is not None:\n try:\n return json.loads(response)\n except json.decoder.JSONDecodeError as error:\n raise ValueError(f\"Error decoding data from rocketlaunch.live ({error}).\")\n except Exception as error:\n raise ValueError(f\"Unknown error in rocketlaunch.live data ({error})\")\n else:\n raise ConnectionError(\"Error getting data from rocketlaunch.live.\")", "def data():\n return volumes_fetchers.get_json_data()", "def last_update(self):\r\n request = http.Request('GET', '/metadata/last_update.json')\r\n return request, parsers.parse_json", "def get_data(self):\n return self._result", "def get_new_data(self):\n msgs = self.consumer.get_new_messages()\n return convert_messages(msgs, json.loads)", "def getData(self):\n return self.data", "def getData(self):\n return self.data", "def getTheData(self, dev):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"getTheData FrontViewAPI method called.\")\n\n # dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Download\")\n try:\n url = 'http://' + dev.pluginProps['sourceXML'] + '/FrontView'\n r = requests.get(url,timeout=5)\n result = r.json()\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Result:\" + unicode(result))\n self.WaitInterval = 1\n dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Online\")\n dev.setErrorStateOnServer(None)\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n return result\n\n except Exception as error:\n\n indigo.server.log(u\"Error connecting to Device:\" + dev.name)\n self.WaitInterval = 60\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Device is offline. No data to return. \")\n dev.updateStateOnServer('deviceIsOnline', value=False, uiValue=\"Offline\")\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n dev.setErrorStateOnServer(u'Offline')\n result = \"\"\n return result", "def retrieveData():\n\n\t# My terribad first attempt at this based off of outflank example\n\t# I honestly have no idea what I was doing, but leaving it here just in case\n\t########\n\t# try:\n\t# \tdata = transSock.recv(4)\n\t# except:\n\t# \treturn(\"\")\n\t# if len(data) < 4:\n\t# \treturn()\n\t# slen = struct.unpack('<I', data)[0]\n\t# data = transSock.recv(slen)\n\t# while len(data) < slen:\n\t# \tdata = data + transSock.recv(slen - len(data))\n\t# return(data)\n\t########\n\n\t# Realizing that I have to unpack the buffer length first:\n\n\tframeSize = \"\"\n\twhile len(frameSize) != 4:\n\t\tframeSize = connSock.recv(4)\n\n\tdataSize = struct.unpack('<I', frameSize)[0]\n\tdata = connSock.recv(dataSize)\n\n\treturn data", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def getData(self, local_cache):", "def fetch_current():\n\n data = json.load(urllib2.urlopen(TICKER_URL))\n\n buy = float(data[\"ask\"])\n sell = float(data[\"bid\"])\n\n now = int(time.time()) # Get current unix time\n\n return now, buy, sell", "def current(self):\n\t\treturn self.reading_set.latest(field_name='time')", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = json.loads(data.decode()) \n print(\"DATA FROM BROKER : \", info)\n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def getLastData(self) -> ghidra.program.model.listing.Data:\n ...", "def getPlayer(self):\n return self.currentPlayer", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = pickle.loads(data) \n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def get(self):\n # Check if last_refreshed is saved in the storage\n if \"last_refreshed\" in storage:\n self.refresh_infos(storage[\"last_refreshed\"])\n else:\n # last_refreshed not found in the storage\n soccerInfo = SoccerInfo.objects.first()\n if soccerInfo and soccerInfo[\"last_refreshed\"]:\n self.refresh_infos(soccerInfo[\"last_refreshed\"])\n else:\n self.refresh_infos()\n return storage[\"cached_soccerInfos\"], 200", "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "def getCurrentPlayer(self):\r\n return self.currentPlayer", "def getData(self):\r\n return self._data", "def getStockData():\n pass", "def getLatestMeasurement(self): \n return self.measurement[len(self.measurement)-1]", "def getData(self):\n return self.__data" ]
[ "0.6848046", "0.6371471", "0.63664883", "0.6316709", "0.6299198", "0.61774623", "0.61145097", "0.6105679", "0.6088084", "0.60660213", "0.6014737", "0.5998504", "0.5996155", "0.59247184", "0.5897208", "0.5877519", "0.58722764", "0.5852134", "0.5835476", "0.5832391", "0.5816574", "0.581122", "0.5807814", "0.5807814", "0.58070886", "0.5789616", "0.57723385", "0.5763754", "0.57516116", "0.57510155", "0.5744969", "0.57322174", "0.5715736", "0.5715736", "0.5715736", "0.57082814", "0.5706965", "0.5694673", "0.5691315", "0.56848884", "0.5679339", "0.56790257", "0.56712824", "0.56712824", "0.56676865", "0.56675553", "0.5663473", "0.5661624", "0.56591284", "0.56576955", "0.5657063", "0.565182", "0.56388956", "0.5631399", "0.56270623", "0.5614113", "0.5596352", "0.55923176", "0.55816394", "0.5576578", "0.5573159", "0.5565658", "0.5562279", "0.5543774", "0.55305356", "0.55305356", "0.55305356", "0.55305356", "0.5529678", "0.5529678", "0.5529589", "0.5514391", "0.5508844", "0.55073255", "0.54955596", "0.5489752", "0.54844797", "0.5478844", "0.54744375", "0.5463508", "0.5463508", "0.5452548", "0.54468507", "0.54461044", "0.54461044", "0.54461044", "0.5436899", "0.5426404", "0.54260296", "0.54258084", "0.54256684", "0.54219013", "0.5419342", "0.54187316", "0.5408752", "0.5406815", "0.540224", "0.5401433", "0.53875506", "0.5383839", "0.53583986" ]
0.0
-1
Fetch media image of current playing image.
async def async_get_media_image(self) -> tuple[bytes | None, str | None]: if self._client.current_track: image = bytes(self._client.current_track["art"]) return (image, "image/png") return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def media_image_url(self):\n return self._current_item[\"image\"]", "def media_image_url(self):\n return self.coordinator.data.nowplaying[self.zone.SourceID].CurrSong.ArtworkURI", "def media_image_url(self):\n if 'artwork_url' in self._status:\n return self._status['artwork_url']\n return 'http://{server}:{port}/music/current/cover.jpg?player={player}'\\\n .format(\n server=self._lms.host,\n port=self._lms.http_port,\n player=self._id)", "def media_image_url(self):\n return self._media_image_url", "def media_image_url(self):\n return self._media_image_url", "def media_image_url(self):\n if (media_status := self._media_status()[0]) is None:\n return None\n\n images = media_status.images\n\n return images[0].url if images and images[0].url else None", "def media_image_url(self):\n return self._imageUrl", "def get_image(self):\n if not hasattr(self, '_BasePublication__image_cache'):\n images = self.get_images()\n self.__image_cache = images[0].picture if images else None\n return self.__image_cache", "def image(self):\n image_id = self.attrs.get('ImageID', self.attrs['Image'])\n if image_id is None:\n return None\n return self.client.images.get(image_id.split(':')[1])", "def _get_current_media(self):\n key = int(self.status.content_id.split(\"/\")[-1])\n media_item = self.pms.fetchItem(key).reload()\n media_idx = self.status.media_custom_data.get(\"mediaIndex\", 0)\n part_idx = self.status.media_custom_data.get(\"partIndex\", 0)\n media = media_item.media[media_idx]\n part = media.parts[part_idx]\n\n return media_item, media, part", "def media_image_url(self) -> str:\n return self._device.movie.cover", "def get_image():\n return models.Image.objects.all()[0]", "async def async_get_browse_image(\n self,\n media_content_type: MediaType | str,\n media_content_id: str,\n media_image_id: str | None = None,\n ) -> tuple[bytes | None, str | None]:\n cached_url = self.thumbnail_cache.get(media_content_id)\n image_url = self._volumio.canonic_url(cached_url)\n return await self._async_fetch_image(image_url)", "def get_image(self):\n return self.image", "def get_image(self):\n return self.image", "def get_image(self):\n return self.image", "def get_image(self, image_id):\n url = self.get_url(image_id)\n return image_util.load_image_from_url(url) if url else None", "def media_image_url(self):\n url = self._state.get(\"albumart\", None)\n return self._volumio.canonic_url(url)", "def cover_image(self):\n try:\n return self.images.all()[0].image\n except IndexError:\n return None", "def get_image(self, image_id):\r\n try:\r\n return self.get_all_images(image_ids=[image_id])[0]\r\n except IndexError: # None of those images available\r\n return None", "def media_image_url(self):\n\n if self._table.active_track:\n return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE)\n\n return super().media_image_url", "def image(self):\n if self.hasImage():\n return self._image.pixmap().toImage()\n return None", "def get_image(self, image):\n return self._get(_image.Image, image)", "def get_image(self):\n return self.camera.getImage()", "def GetImage(self):\r\n\r\n return self._image", "def image(self):\n return self.chunks.get('image')", "def get_image(self):\n try:\n self.image = self.soup.find(class_=\"recipe-header-image\").find(\"img\").get(\"src\", \"\")\n except Exception:\n current_app.logger.error(f\"Could not extract image: {traceback.format_exc()}\")\n self.image = \"\"", "def get_image ( self, object ):\n return self.image", "def self_media(self):\n\n url = \"https://api.instagram.com/v1/users/self/media/recent/?access_token={0}\".format(self.access_token)\n request = requests.get(url)\n return request.json()", "def get_current_image(self):\n raise NotImplementedError", "def getImage( self ):\n return self.__image;", "def image(self):\n if self.hasImage():\n return self._pixmapHandle.pixmap().toImage()\n return None", "def get_image(self, tweet):\n\n\t\t# iterate over all the entities associated with the tweet\n\t\tfor media in tweet.entities.get(\"media\",[{}]):\n\n\t\t\t# check if any og them contains a photo\n\t\t\tif media.get(\"type\", None) == \"photo\":\n\n\t\t\t\t# get the url of the photo\n\t\t\t\turl = media[\"media_url\"]\n\t \t\t\n\t \t\t# extract the filename\n\t\t\t\tfilename = url.split('/')[-1]\n\n\t\t\t\t# download the photo and load it into memory\n\t\t\t\tfd = urllib2.urlopen(url)\n\t\t\t\tfile = StringIO.StringIO(fd.read())\n\n\t\t\t\t# open as PIL image\n\t\t\t\timage = Image.open(file)\n\t\t\t\treturn image\n\n\t\t# if no image was found return None\n\t\treturn None", "async def fetch_image_by_id(\n image_uid: str\n):\n image_uid = int(image_uid)\n image = utils_com.get_com_image_by_uid(image_uid)\n return image", "def get_image(soup):\n image = soup.find(\"div\", {\"class\": \"specs-photo-main\"}).find('img')['src']\n return image", "def get_image(self, image_dir_root=None):\n image_dir_root = image_dir_root or image_dir\n return Image.open(os.path.join(image_dir_root, self.name))", "def image(self):\n return self._image", "def image(self):\n return self._image", "def image(self):\n return self._image", "def image(self):\n return self._image", "def image(self):\n return self._image", "def image(self):\n return self._image", "def get_image(self):\n shop = lfs_get_object_or_404(Shop, pk=1)\n return shop.image", "def image(self) -> object:\n return self._image", "def get_image(self, url):\n\n log(\"Getting image {}\".format(url))\n response = requests.get(url)\n if response.status_code == 200:\n image = self._pilimg.open(io.BytesIO(response.content))\n return image.convert('RGBA')\n return None", "async def async_camera_image(self) -> bytes:\n websession = async_get_clientsession(self.hass)\n\n with async_timeout.timeout(10):\n response = await websession.get(self._latest_url)\n\n image = await response.read()\n return image", "def fetch_cover(self) -> None:\n self.cover_path = None\n if self.cover_url is None:\n Logger.Logger.log('No cover picture found for this song.')\n return\n Logger.Logger.log('Retrieving cover picture from iTunes...')\n url_hash: str = md5(self.cover_url.encode('utf-8')).hexdigest()\n filename: str = tempfile.gettempdir() + url_hash + '.jpg'\n try:\n request.urlretrieve(self.cover_url, filename)\n self.cover_path = filename\n except (HTTPError, TimeoutError) as ex:\n Logger.Logger.log_error(str(ex))\n Logger.Logger.log_error('Request failed for URL: ' + Utils.Utils.str(self.cover_url))\n self.cover_path = None", "async def get_now_playing(self) -> models.MediaItem: # FIXME: Maybe want to abstract MediaItem out\n return await self._pytheos.api.player.get_now_playing_media(self.id)", "def get_image(article):\n image_url = None\n media = article.get('media', None)\n if media is not None:\n for m in media:\n media_type = m['media_type'].get('name', None) \n if media_type == 'image':\n image_url = m['url']\n break\n \n return image_url", "def get(self, img):\n\n return send_from_directory(\"images\", img)", "def fetch_image(img_url):\n\n r = requests.get(img_url)\n return r.content", "def _get_image(self, asset_id):\n try:\n return self.app.module_map.uploader.get(asset_id)\n except AssetNotFound:\n return None\n except Exception, e:\n return None\n return None", "def get_photo(self):\n photo_box = self.soup.find(attrs={\"class\": \"woocommerce-product-gallery__wrapper\"}).contents\n return list(filter(lambda x: isinstance(x, Tag), photo_box))[0].contents[0].contents[0].attrs[\"src\"]", "def get_image(self):\n return self.process_raw_image(self.get_raw_image())", "def get_local_image(self, src):\r\n local_image = ImageUtils.store_image(None,\r\n self.link_hash, src, self.config)\r\n return local_image", "def media(self, path):\n path = \"/media/%s%s\" % (self.session.root, format_path(path))\n\n url, params, headers = self.request(path, method='GET')\n\n return self.rest_client.GET(url, headers)", "def image(self):\n return self._image", "def fetch(self, tag):\n return fetch_image(self.collection.client, tag)", "def get_image(self, image_id):\n\n # see if we have an image\n image = self._get_from_redis(image_id)\n\n if not image:\n raise o.ImageNotFound('Could not get image', image_id)\n\n # pull the actual image data\n self._populate_image_data(image)\n\n return image", "def get_image(self, id):\n url = \"https://api.imgur.com/3/image/{0}\".format(id)\n resp = self._send_request(url)\n return Image(resp, self)", "def download_image(filename):\n return ImageApiHandler.image_handler.get(filename)", "def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)", "def get_image_url():", "def fetch_media(fname):\n uuid = request.args.get(\"id\")\n crop = request.args.get(\"crop\")\n # show_thumb for cropped image only\n show_thumb = request.args.get(\"full\", \"0\") == \"0\"\n fullname, mimetype = media.get_fullname(uuid)\n try:\n if crop:\n # crop dimensions are diescribed as % of width and height\n image = media.get_cropped_image(fullname, crop, show_thumb)\n logger.debug(\"-> bp.scene.routes.fetch_media cropped png\")\n # Create a png image in memery and display it\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n return Response(buffer.getvalue(), mimetype=\"image/png\")\n else:\n logger.debug(\"-> bp.scene.routes.fetch_media full\")\n return send_file(fullname, mimetype=mimetype)\n except FileNotFoundError:\n # Show default image\n ret = send_file(os.path.join(\"static\", \"image/noone.jpg\"), mimetype=mimetype)\n logger.debug(f\"-> bp.scene.routes.fetch_media none\")\n return ret", "def download(self):\n data = urllib.urlopen(self.remoteurl).read()\n s = StringIO.StringIO(data)\n return Image.open(s)", "def get_image(self, record_id):\n \n for img in self.img_lst:\n if img.get_recordId() == str(record_id):\n return img", "def get_current_pic(self):\n # Change the picture\n if clock() > self.next_frame:\n if self.exploding:\n if self.frame_num < self.num_of_explosion_frames:\n self.frame_num += 1\n else:\n self.away = True\n else:\n # Chose animation direction by the asteroid's horizontal direction\n self.frame_num += int(copysign(1, self.speed[0]+self.acceleration[0]))\n if self.frame_num >= self.num_of_images:\n self.frame_num = 0\n elif self.frame_num < 0:\n self.frame_num = self.num_of_images - 1\n self.next_frame = clock() + self.frame_time\n return self.current_image_set[self.frame_num]", "def grab_image(self):\n _, camera_image = self.camera.read()\n with self.lock:\n self.image = camera_image", "def get_image(self, record_id):\n for item in self.order_items:\n img = item.get_image()\n if img is None: return None\n if img.get_recordId() == record_id:\n return img", "def camera_image(self):\n return asyncio.run_coroutine_threadsafe(\n self.async_camera_image(), self.hass.loop\n ).result()", "def get_image(self):\n self.drawer.flush()\n return self.img", "async def fetch_media(self, community_id, media_id) -> Optional[Media]:\n media_url = self._api_communities_url + str(community_id) + \"/medias/\" + str(media_id)\n async with self.web_session.get(media_url, headers=self._headers) as resp:\n if self.check_status(resp.status, media_url):\n data = await resp.json()\n return create_media_object(data.get('media'))", "def image(image_id):\n\n found = False\n img = None\n \n try:\n for img in api.get_all_images():\n if img.id == image_id:\n found = True\n break\n except Exception:\n logging.error(\"Cannot make API connection to retrieve image info!\")\n\n if not found:\n return None\n\n return img", "def image(self):\n return self.__getattr__(\"image\", _returnType=\"value\", _location=\"remote\")", "def getMainImage(self):\n image = self.context.getField(\"image\").get(self.context)\n if len(image) != 0:\n return self.context\n else:\n try:\n return self.context.objectValues(\"EasyShopImage\")[0]\n except IndexError:\n return None", "def read(self):\n with self.lock:\n return self.image", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self):\n return self._img", "def get_image(self):\r\n try:\r\n\r\n message = self.communicator.get_dec_message(self.client_socket)\r\n output = StringIO.StringIO(message)\r\n image = pygame.image.load(output)\r\n return image\r\n\r\n except pygame.error:\r\n return None\r\n except socket.error:\r\n self.close_connection()" ]
[ "0.74042034", "0.731227", "0.71006846", "0.7026222", "0.7026222", "0.7007176", "0.6959414", "0.69316447", "0.67641294", "0.67636913", "0.6708021", "0.6683734", "0.66400856", "0.6608873", "0.6608873", "0.6608873", "0.6600421", "0.6589409", "0.6476685", "0.63501203", "0.6333794", "0.6241343", "0.623838", "0.61770475", "0.6175406", "0.61629987", "0.61587507", "0.6141327", "0.61095315", "0.6106911", "0.60604864", "0.60486287", "0.60468173", "0.603124", "0.6023322", "0.6018311", "0.600193", "0.600193", "0.600193", "0.600193", "0.600193", "0.600193", "0.59959596", "0.5983983", "0.59777105", "0.59696615", "0.5964985", "0.596416", "0.5947526", "0.5944664", "0.5943772", "0.59378767", "0.59326965", "0.59203494", "0.5913773", "0.59119", "0.5884889", "0.5875605", "0.58655316", "0.5865019", "0.5855568", "0.584858", "0.5848089", "0.5844206", "0.5840239", "0.5839517", "0.58393186", "0.58379704", "0.5814406", "0.5812272", "0.5809521", "0.5808348", "0.58074665", "0.58012325", "0.57872576", "0.5777334", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57761794", "0.57758605", "0.57708114" ]
0.7164615
2
Volume up the media player.
def volume_up(self) -> None: newvolume = min(self._client.volume + 4, 100) self._client.set_volume(newvolume)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def volume_up(self) -> None:\n self._state = self._player.volume_up()", "def volume_up(self):\n self._volume += settings.get(\"vlc\", \"volume\", \"step\")\n self._update_volume()\n # self.stdin_queue.put(\"volup\")", "def volume_up(self):\n self._remote.volume(int(self._volume * 60) + 2)", "def volume_up(self):\n return self._call_player_proxy('VolumeUp', GLib.Variant(\"(i)\", (10,)))", "def volume_up(self):\n self.handleCommand(24)", "def volume_up(self) -> None:\n self.volume = min(self.volume + self.config.volume_step, 100)", "def volume_up(self):\n if self.volume_level < 1:\n self.set_volume_level(min(1, self.volume_level + 0.1))", "def volume_down(self):\n self._volume -= settings.get(\"vlc\", \"volume\", \"step\")\n self._update_volume()\n # self.stdin_queue.put(\"voldown\")", "async def volume_up(self) -> None:\n return await self.relay(\"volume_up\")()", "async def volume_up(self) -> None:\n return await self.relay(\"volume_up\")()", "def volume_up(self) -> None:\n if self.volume_level is None:\n return\n volume = round(self.volume_level * MAX_VOLUME)\n self._monoprice.set_volume(self._zone_id, min(volume + 1, MAX_VOLUME))", "def volume_up():\n sonos.set_relative_volume(10)\n return \"Ok\"", "async def async_volume_up(self):\n if int(self._volume) == 100 and not self._muted:\n return\n\n volume = int(self._volume) + int(self._volume_step)\n if volume > 100:\n volume = 100\n\n if not (self._slave_mode and self._multiroom_wifidirect):\n\n if self._is_master:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:slave_vol:{0}\".format(str(volume)), None)\n else:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:vol:{0}\".format(str(volume)), None)\n\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_up. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n if self._snapshot_active:\n return\n value = await self._master.async_call_linkplay_httpapi(\"multiroom:SlaveVolume:{0}:{1}\".format(self._slave_ip, str(volume)), None)\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_up. Device: %s, Got response: %s\", self.entity_id, value)", "def volume_down(self) -> None:\n self.volume = max(self.volume - self.config.volume_step, 0)", "async def async_volume_up(self) -> None:\n await self._volumio.volume_up()", "def volume_down(self):\n self._remote.volume(int(self._volume * 60) - 2)", "def setVolume(self):\n\n self.mediaplayer.audio_set_volume(self.volumeslider.value())", "def volume_down(self) -> None:\n self._state = self._player.volume_down()", "def volume_down():\n sonos.set_relative_volume(-10)\n return \"Ok\"", "def volume_down(self) -> None:\n newvolume = max(self._client.volume - 4, 0)\n self._client.set_volume(newvolume)", "def volume_down(self):\n if self.volume_level > 0:\n self.set_volume_level(max(0, self.volume_level - 0.1))", "def volume_down(self) -> None:\n if self.volume_level is None:\n return\n volume = round(self.volume_level * MAX_VOLUME)\n self._monoprice.set_volume(self._zone_id, max(volume - 1, 0))", "def on_set_volume(self, event):\n self.currentVolume = self.volumeCtrl.GetValue()\n self.mplayer.SetProperty(\"volume\", self.currentVolume)", "def volume_down(self):\n return self._call_player_proxy('VolumeDown', GLib.Variant(\"(i)\", (10,)))", "def volume_up(self):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Volume Up',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.press_rc_key(self.rc.VOLUME_UP)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")", "def decrease_volume(self) -> None:\n for _ in range(10):\n self.media.volume_down()\n self.system.notify(f\"Jarvis::Decreased Volume: {self.media.get_volume()['volume']}%\")", "def increase_volume(self) -> None:\n for _ in range(10):\n self.media.volume_up()\n self.system.notify(f\"Jarvis::Increased Volume: {self.media.get_volume()['volume']}%\")", "def volume_down(self):\n self.handleCommand(25)", "def OnSetVolume(self):\r\n volume = self.volume_var.get()\r\n # vlc.MediaPlayer.audio_set_volume returns 0 if success, -1 otherwise\r\n if volume > 100:\r\n volume = 100\r\n if self.player.audio_set_volume(volume) == -1:\r\n self.errorDialog(\"Failed to set volume\")", "async def async_volume_up(self):\n if self._raumfeld.group_is_valid(self._rooms):\n await self._raumfeld.async_change_group_volume(\n self._rooms, CHANGE_STEP_VOLUME_UP\n )\n await self.async_update_volume_level()\n else:\n log_debug(\n \"Method was called although speaker group '%s' is invalid\" % self._rooms\n )", "def volume_increase():\n request_command(tv_command=TVCommand.volume_increase)", "def volume_decrease():\n request_command(tv_command=TVCommand.volume_decrease)", "def play(self, volume=100):\n # self.stdin_queue.put(\"play\")\n self._direct_stdin_writer(\"play\")\n self._media_volume = volume\n self._update_volume()", "async def volume(self, ctx, volume: int):\n\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume),delete_after=15)", "async def async_volume_down(self):\n if int(self._volume) == 0:\n return\n\n volume = int(self._volume) - int(self._volume_step)\n if volume < 0:\n volume = 0\n\n if not (self._slave_mode and self._multiroom_wifidirect):\n\n if self._is_master:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:slave_vol:{0}\".format(str(volume)), None)\n else:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:vol:{0}\".format(str(volume)), None)\n\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_down. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n if self._snapshot_active:\n return\n value = await self._master.async_call_linkplay_httpapi(\"multiroom:SlaveVolume:{0}:{1}\".format(self._slave_ip, str(volume)), None)\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_down. Device: %s, Got response: %s\", self.entity_id, value)", "def volume(self, value):\n for i in range(self._volume, value):\n self.send('volume-up')\n time.sleep(0.05)\n for i in range(value, self._volume):\n self.send('volume-down')\n time.sleep(0.05)\n self._volume = value", "async def volume(self, ctx, volume: int):\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume))", "def signal_volume(self):\n pass", "def volume_up(self, delta=0.1):\n if delta <= 0:\n raise ValueError(\n \"volume delta must be greater than zero, not {}\".format(delta))\n return self.set_volume(self.status.volume_level + delta)", "def OnToggleVolume(self, evt):\r\n is_mute = self.player.audio_get_mute()\r\n\r\n self.player.audio_set_mute(not is_mute)\r\n # update the volume slider;\r\n # since vlc volume range is in [0, 200],\r\n # and our volume slider has range [0, 100], just divide by 2.\r\n self.volume_var.set(self.player.audio_get_volume())", "def volume_up(self, delta=0.1):\n if delta <= 0:\n raise ValueError(f\"volume delta must be greater than zero, not {delta}\")\n return self.set_volume(self.status.volume_level + delta)", "def setVolume2(self):\n\n self.mediaplayer2.audio_set_volume(self.volumeslider2.value())", "async def test_volume_up(player) -> None:\n assert player.volume_level == 0\n await player.async_set_volume_level(0.5)\n assert player.volume_level == 0.5\n await player.async_volume_up()\n assert player.volume_level == 0.6", "def _update_volume(self):\n self._log(\"raw\", \"self._volume : {0}, type : {1}\".format(self._volume, type(self._volume)))\n self._log(\"raw\", \"self._media_volume : {0}, type : {1}\".format(self._media_volume, type(self._media_volume)))\n self._log(\"debug\", \"setting volume : {0}, type : {1}\".format(settings.get(\"vlc\", \"volume\", \"master\"), type(settings.get(\"vlc\", \"volume\", \"master\"))))\n volume = float(int(self._volume) * int(self._media_volume) * int(settings.get(\"vlc\", \"volume\", \"master\")) / 10000)\n if volume > 100:\n volume = 100\n elif volume < 0:\n volume = 0\n self.stdin_queue.put_nowait(\"volume {0}\".format(int(volume)))", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.current.source.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))", "def set_audio_volume(self, new_volume: float) -> None:\n self.powerup_collected_sound.set_volume(new_volume)", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n # if not ctx.voice_state.is_playing:\n # return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100.')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send(f\"Volume of the player set to {volume}%\\nThe volume will be applied in the next song.\")", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))\n await ctx.message.add_reaction('✅')", "async def async_volume_down(self) -> None:\n await self._volumio.volume_down()", "def set_volume(self, val):\n self.sound.volume = val", "def press_volume_up(self,ignore_error_handle = False):\n message = {};\n step = 'press VOLUME UP key'\n try:\n self.driver.press_keycode(24);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def set_volume(self, target: int) -> None:\n self.media.set_volume(target)\n self.system.notify(f\"Jarvis::Volume has been set to: {self.media.get_volume()['volume']}%\")", "async def volume_down(self) -> None:\n return await self.relay(\"volume_down\")()", "async def volume_down(self) -> None:\n return await self.relay(\"volume_down\")()", "def on_volume(self, _instance, volume):\n self._set_volume(volume)", "def _set_volume(self, value):\n if self.player:\n vol = 100 if abs(value) >= 1.0 else 100 * abs(value)\n self.player.audio_set_volume(int(vol))", "def volup(self, raiseby=1):\n command + 'volup ' + str(raiseby)\n self.run_command(command)", "def movePlayerUp(self):\r\n self.player.moveUp()", "def set_volume_level(self, volume):\n # self._vlc.audio_set_volume(int(volume * 100))\n self._volume = volume", "async def volume(self, ctx, level:int):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n if 0 <= level <= 200:\n voice.source.volume = level / 100\n await ctx.send(f\"Adjusted volume to {level}%.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "def _volume(call: ServiceCall) -> None:\n mute_key_mapping = {\n ATTR_TOGGLE: KEY_MUTE_TOGGLE,\n ATTR_ON: KEY_MUTE_ON,\n ATTR_OFF: KEY_MUTE_OFF,\n }\n for cmd, att in call.data.items():\n if cmd == CMD_UP:\n _process_volume(KEY_VOLUME_UP, att)\n elif cmd == CMD_DOWN:\n _process_volume(KEY_VOLUME_DOWN, att)\n elif cmd == CMD_MUTE:\n hdmi_network.send_command(\n KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)\n )\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n _LOGGER.info(\"Audio muted\")\n else:\n _LOGGER.warning(\"Unknown command %s\", cmd)", "async def volume(self, ctx: commands.Context, volume: int):\n if not 0 <= volume <= 100:\n raise InvalidVolume()\n\n player = ctx.bot.lavalink.player_manager.get(ctx.guild.id)\n \n await player.set_volume(volume)\n await ctx.send(f'Volume alterado para {volume}%.')", "def set_volume_level(self, volume: float) -> None:\n self._get_chromecast().set_volume(volume)", "def volume_up(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)", "def set_volume(self):\n import fcntl\n import struct\n try:\n knob = struct.pack(\"III\", 0, 0, self.volume) # VOLUME_DEVICE_ID, VOLUME_KNOB_ID, volume_level\n fcntl.ioctl(self.mixer_fd, 3, knob)\n except:\n pass", "async def volume(\n self, ctx: commands.Context, volume: int = None\n ) -> Optional[float]:\n\n if volume is None:\n return ctx.voice_client.source.volume * 100\n\n ctx.voice_client.source.volume = volume / 100\n self.queue[ctx.guild.id].volume = volume / 100\n return ctx.voice_client.source.volume * 100", "async def async_media_pause(self):\n self._pause_duration = self._meural_device[\"imageDuration\"]\n await self.meural.update_device(self.meural_device_id, {\"imageDuration\": 0})", "def set_volume_level(self, volume):\n self._device.set_volume(mute=False, volume=int(volume * 100))\n self._volume = volume", "async def async_media_pause(self) -> None:\n if self._state.get(\"trackType\") == \"webradio\":\n await self._volumio.stop()\n else:\n await self._volumio.pause()", "def _control_pause(self):\n self.player.pause()", "async def test_volume_down(player) -> None:\n assert player.volume_level == 0\n await player.async_set_volume_level(0.5)\n assert player.volume_level == 0.5\n await player.async_volume_down()\n assert player.volume_level == 0.4", "async def volume_up(self, group_id: int, step_level: int=5) -> None:\n if not self.VOLUME_STEP_MIN < step_level <= self.VOLUME_STEP_MAX:\n raise ValueError(f'Step level must be between {self.VOLUME_STEP_MIN} and {self.VOLUME_STEP_MAX}')\n\n await self._api.call('group', 'volume_up', gid=group_id, step=step_level)", "def voldown(self, raiseby=1):\n command + 'voldown ' + str(raiseby)\n self.run_command(command)", "async def async_media_pause(self) -> None:\n await self._projector.send_command(PAUSE)", "def volume(self, value):\n self._volume = value\n self._sendCommand('%03dVL' % value)", "def vol_up_and_validate(self):\n self.logger.info('Increasing volume')\n before_vol = self.dut.volume('Up', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Up', 1)\n if not after_vol or not before_vol or after_vol <= before_vol:\n self.logger.error(\n 'Unable to increase the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error increasing volume')", "async def set_volume(self, value: int):\n if value < self._pytheos.api.player.VOLUME_MIN:\n value = self._pytheos.api.player.VOLUME_MIN\n elif value > self._pytheos.api.player.VOLUME_MAX:\n value = self._pytheos.api.player.VOLUME_MAX\n\n await self._pytheos.api.player.set_volume(self.id, value)", "def set_volume_level(self, level):\n self._remote.volume(int(level * 60))", "def quieter(self):\n self._prepare()\n vol = self._eng.getProperty(\"volume\")\n newvol = vol - 0.25\n logging.debug(\"quieter %f => %f\" %(vol, newvol))\n self._eng.setProperty(\"volume\", newvol)\n self._eng.runAndWait()\n self.say(\"quieter\")", "def set_volume(self, volume):\n self.get(COMMAND_UIC, 'SetVolume', [('volume', int(volume))])", "def set_volume(self, percent):\n self._socket_client.receiver_controller.set_volume(float(percent / 100))", "def destroy(self):\r\n\r\n return self.driver.destroy_volume(volume=self)", "def set_volume_level(self, volume: float) -> None:\n self._monoprice.set_volume(self._zone_id, round(volume * MAX_VOLUME))", "async def set_volume(self, vol: int):\n self.volume = max(min(vol, 150), 0)\n await self._bot.lavalink.ws.send(op='volume', guildId=self.guild_id, volume=self.volume)", "def vol_push_callback(channel):\n \n global volume, mute\n \n if mute:\n subprocess.run([\"mpc\", \"volume\", str(volume)],stdout=subprocess.DEVNULL)\n else:\n print(\"mute\")\n subprocess.run([\"mpc\", \"volume\", \"0\"],stdout=subprocess.DEVNULL)\n mute = not mute", "def set_volume_level(self, volume):\n volume_percent = str(int(volume*100))\n self._lms.query(self._id, 'mixer', 'volume', volume_percent)\n self.update_ha_state()", "async def volume(self, ctx, vol=-1):\n server_id = ctx.message.server.id\n srv = self.get_server_dict(server_id)\n vol = int(vol)\n if self.user_in_channel(server_id, ctx.message.author) and vol <= 200 and vol >= 0:\n srv['volume'] = vol/100\n if srv['player']:\n srv['player'].volume = srv['volume']\n await ctx.bot.send_message(ctx.message.channel, self.format_volume_bar(srv['volume']))", "def volume_level(self) -> float:\n return int(self._state.get(\"playback_volume\", 0)) / 100", "async def volume(self, msg, vol: int):\n\n if vol > 200:\n vol = 200\n vol = vol/100\n if msg.author.voice is not None:\n if msg.voice_client is not None:\n if msg.voice_client.channel == msg.author.voice.channel and msg.voice_client.is_playing() is True:\n msg.voice_client.source.volume = vol\n self.player[msg.guild.id]['volume'] = vol\n # if (msg.guild.id) in self.music:\n # self.music[str(msg.guild.id)]['vol']=vol\n return await msg.message.add_reaction(emoji='✅')\n\n return await msg.send(\"**Please join the same voice channel as the bot to use the command**\".title(), delete_after=30)", "def mute_volume(self, mute):\n mute_numeric = '1' if mute else '0'\n self._lms.query(self._id, 'mixer', 'muting', mute_numeric)\n self.update_ha_state()", "def set_volume_level(self, volume):\n targetVolume = volume * 100\n tempVolume = -1\n oldVolume = -2\n i = 0\n while int(targetVolume) != tempVolume:\n self.getVolume()\n tempVolume = self._volumeLevel\n i = i + 1\n if tempVolume != oldVolume or i >= 10:\n i = 0\n if tempVolume > targetVolume:\n self.volume_down()\n else:\n self.volume_up()\n oldVolume = tempVolume", "def getVolume(self):\n return self.__volume", "async def get_volume(self) -> int:\n return await self._pytheos.api.player.get_volume(self.id)", "def louder(self):\n self._prepare()\n vol = self._eng.getProperty(\"volume\")\n newvol = vol + 0.25\n logging.debug(\"louder %f => %f\" %(vol, newvol))\n self._eng.setProperty(\"volume\", newvol)\n self._eng.runAndWait()\n self.say(\"louder\")", "async def async_media_play(self) -> None:\n await self._volumio.play()", "def volume_down(self, delta=0.1):\n if delta <= 0:\n raise ValueError(\n \"volume delta must be greater than zero, not {}\".format(delta))\n return self.set_volume(self.status.volume_level - delta)", "def setVoiceVolume(self, volume):\n\n try:\n assert volume >= 0 and volume <= 1.0\n\n except AssertionError:\n self.logger.warning(\"Incorrect volume, 0.5 taken into account\")\n volume = 0.5\n\n self.tts.setVolume(volume)", "def volume_down(self):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Volume Down',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.press_rc_key(self.rc.VOLUME_DOWN)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")", "def volume_down(self, delta=0.1):\n if delta <= 0:\n raise ValueError(f\"volume delta must be greater than zero, not {delta}\")\n return self.set_volume(self.status.volume_level - delta)", "async def async_volume_down(self):\n if self._raumfeld.group_is_valid(self._rooms):\n await self._raumfeld.async_change_group_volume(\n self._rooms, CHANGE_STEP_VOLUME_DOWN\n )\n await self.async_update_volume_level()\n else:\n log_debug(\n \"Method was called although speaker group '%s' is invalid\" % self._rooms\n )" ]
[ "0.83150697", "0.8157221", "0.79739726", "0.79416966", "0.7691732", "0.7642095", "0.7590928", "0.7454616", "0.74177897", "0.74177897", "0.7358487", "0.7341644", "0.7250314", "0.72229487", "0.7213308", "0.7209929", "0.71201", "0.70324516", "0.7030003", "0.7005662", "0.6947025", "0.68113977", "0.67675316", "0.67477775", "0.6722698", "0.66873425", "0.6662563", "0.6614791", "0.656081", "0.6552754", "0.6543311", "0.6535803", "0.6480051", "0.64699507", "0.646397", "0.6451156", "0.6433012", "0.64144534", "0.6411117", "0.639038", "0.63896465", "0.63501513", "0.6349075", "0.6312488", "0.6305558", "0.62720245", "0.62716657", "0.626575", "0.6248344", "0.6242852", "0.6203971", "0.6203288", "0.6181139", "0.6181139", "0.61352956", "0.6096697", "0.60300475", "0.60156393", "0.6000966", "0.5975957", "0.5971663", "0.59715176", "0.5958627", "0.5952931", "0.59502804", "0.59199864", "0.59145087", "0.5908862", "0.5895612", "0.58877826", "0.58877736", "0.58871776", "0.5883201", "0.5874137", "0.58514947", "0.5839762", "0.580189", "0.5763651", "0.5730763", "0.57297665", "0.57135636", "0.57043105", "0.5681377", "0.5674154", "0.56705177", "0.5669683", "0.5655388", "0.5648691", "0.5636904", "0.56364584", "0.5626887", "0.5625815", "0.562346", "0.56212735", "0.5617597", "0.5616017", "0.5608438", "0.5604441", "0.56016433", "0.5598048" ]
0.7521488
7
Volume down media player.
def volume_down(self) -> None: newvolume = max(self._client.volume - 4, 0) self._client.set_volume(newvolume)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def volume_down(self):\n self._volume -= settings.get(\"vlc\", \"volume\", \"step\")\n self._update_volume()\n # self.stdin_queue.put(\"voldown\")", "def volume_down(self) -> None:\n self._state = self._player.volume_down()", "def volume_down(self):\n self._remote.volume(int(self._volume * 60) - 2)", "def volume_down(self):\n return self._call_player_proxy('VolumeDown', GLib.Variant(\"(i)\", (10,)))", "def volume_down():\n sonos.set_relative_volume(-10)\n return \"Ok\"", "def volume_down(self) -> None:\n self.volume = max(self.volume - self.config.volume_step, 0)", "async def volume_down(self) -> None:\n return await self.relay(\"volume_down\")()", "async def volume_down(self) -> None:\n return await self.relay(\"volume_down\")()", "def volume_down(self):\n if self.volume_level > 0:\n self.set_volume_level(max(0, self.volume_level - 0.1))", "def volume_decrease():\n request_command(tv_command=TVCommand.volume_decrease)", "def volume_down(self):\n self.handleCommand(25)", "async def async_volume_down(self):\n if int(self._volume) == 0:\n return\n\n volume = int(self._volume) - int(self._volume_step)\n if volume < 0:\n volume = 0\n\n if not (self._slave_mode and self._multiroom_wifidirect):\n\n if self._is_master:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:slave_vol:{0}\".format(str(volume)), None)\n else:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:vol:{0}\".format(str(volume)), None)\n\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_down. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n if self._snapshot_active:\n return\n value = await self._master.async_call_linkplay_httpapi(\"multiroom:SlaveVolume:{0}:{1}\".format(self._slave_ip, str(volume)), None)\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_down. Device: %s, Got response: %s\", self.entity_id, value)", "def decrease_volume(self) -> None:\n for _ in range(10):\n self.media.volume_down()\n self.system.notify(f\"Jarvis::Decreased Volume: {self.media.get_volume()['volume']}%\")", "def volume_down(self) -> None:\n if self.volume_level is None:\n return\n volume = round(self.volume_level * MAX_VOLUME)\n self._monoprice.set_volume(self._zone_id, max(volume - 1, 0))", "async def async_volume_down(self) -> None:\n await self._volumio.volume_down()", "def OnToggleVolume(self, evt):\r\n is_mute = self.player.audio_get_mute()\r\n\r\n self.player.audio_set_mute(not is_mute)\r\n # update the volume slider;\r\n # since vlc volume range is in [0, 200],\r\n # and our volume slider has range [0, 100], just divide by 2.\r\n self.volume_var.set(self.player.audio_get_volume())", "def volume_up(self) -> None:\n self._state = self._player.volume_up()", "def setVolume(self):\n\n self.mediaplayer.audio_set_volume(self.volumeslider.value())", "async def async_turn_off(self):\n await self.async_mute_volume(True)", "async def test_volume_down(player) -> None:\n assert player.volume_level == 0\n await player.async_set_volume_level(0.5)\n assert player.volume_level == 0.5\n await player.async_volume_down()\n assert player.volume_level == 0.4", "async def async_media_stop(self) -> None:\n await self._volumio.stop()", "def on_set_volume(self, event):\n self.currentVolume = self.volumeCtrl.GetValue()\n self.mplayer.SetProperty(\"volume\", self.currentVolume)", "async def async_volume_down(self):\n if self._raumfeld.group_is_valid(self._rooms):\n await self._raumfeld.async_change_group_volume(\n self._rooms, CHANGE_STEP_VOLUME_DOWN\n )\n await self.async_update_volume_level()\n else:\n log_debug(\n \"Method was called although speaker group '%s' is invalid\" % self._rooms\n )", "def volume_up(self):\n self._volume += settings.get(\"vlc\", \"volume\", \"step\")\n self._update_volume()\n # self.stdin_queue.put(\"volup\")", "def OnSetVolume(self):\r\n volume = self.volume_var.get()\r\n # vlc.MediaPlayer.audio_set_volume returns 0 if success, -1 otherwise\r\n if volume > 100:\r\n volume = 100\r\n if self.player.audio_set_volume(volume) == -1:\r\n self.errorDialog(\"Failed to set volume\")", "async def volume(self, ctx, volume: int):\n\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume),delete_after=15)", "def volume_up(self):\n self.handleCommand(24)", "def volume_down(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)", "def volume_up(self):\n self._remote.volume(int(self._volume * 60) + 2)", "def volume_down(self):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Volume Down',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.press_rc_key(self.rc.VOLUME_DOWN)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")", "def _control_stop(self):\n self.player.stop()", "def volume_up(self):\n return self._call_player_proxy('VolumeUp', GLib.Variant(\"(i)\", (10,)))", "async def volume(self, ctx, volume: int):\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume))", "def signal_volume(self):\n pass", "def volume(self, value):\n for i in range(self._volume, value):\n self.send('volume-up')\n time.sleep(0.05)\n for i in range(value, self._volume):\n self.send('volume-down')\n time.sleep(0.05)\n self._volume = value", "async def async_media_pause(self) -> None:\n if self._state.get(\"trackType\") == \"webradio\":\n await self._volumio.stop()\n else:\n await self._volumio.pause()", "def volume_down(self, delta=0.1):\n if delta <= 0:\n raise ValueError(\n \"volume delta must be greater than zero, not {}\".format(delta))\n return self.set_volume(self.status.volume_level - delta)", "def destroy(self):\r\n\r\n return self.driver.destroy_volume(volume=self)", "def volume_down(self, delta=0.1):\n if delta <= 0:\n raise ValueError(f\"volume delta must be greater than zero, not {delta}\")\n return self.set_volume(self.status.volume_level - delta)", "def setVolume2(self):\n\n self.mediaplayer2.audio_set_volume(self.volumeslider2.value())", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))\n await ctx.message.add_reaction('✅')", "def set_volume(self, val):\n self.sound.volume = val", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n # if not ctx.voice_state.is_playing:\n # return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100.')\n\n ctx.voice_state.volume = volume / 100\n await ctx.send(f\"Volume of the player set to {volume}%\\nThe volume will be applied in the next song.\")", "async def volume_up(self) -> None:\n return await self.relay(\"volume_up\")()", "async def volume_up(self) -> None:\n return await self.relay(\"volume_up\")()", "async def _volume(self, ctx: commands.Context, *, volume: int):\n\n if not ctx.voice_state.is_playing:\n return await ctx.send('Nothing being played at the moment.')\n\n if 0 > volume > 100:\n return await ctx.send('Volume must be between 0 and 100')\n\n ctx.voice_state.current.source.volume = volume / 100\n await ctx.send('Volume of the player set to {}%'.format(volume))", "def volume_up():\n sonos.set_relative_volume(10)\n return \"Ok\"", "def volume_up(self) -> None:\n self.volume = min(self.volume + self.config.volume_step, 100)", "def detach(self):\r\n\r\n return self.driver.detach_volume(volume=self)", "def press_volume_down(self,ignore_error_handle = False):\n message = {};\n step = 'press VOLUME DOWN key';\n try:\n self.driver.press_keycode(25);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def play(self, volume=100):\n # self.stdin_queue.put(\"play\")\n self._direct_stdin_writer(\"play\")\n self._media_volume = volume\n self._update_volume()", "def volume_up(self):\n if self.volume_level < 1:\n self.set_volume_level(min(1, self.volume_level + 0.1))", "async def volume(self, ctx, level:int):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n if 0 <= level <= 200:\n voice.source.volume = level / 100\n await ctx.send(f\"Adjusted volume to {level}%.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "async def volume_down(self, group_id: int, step_level: int = 5) -> None:\n if not self.VOLUME_STEP_MIN < step_level <= self.VOLUME_STEP_MAX:\n raise ValueError(f'Step level must be between {self.VOLUME_STEP_MIN} and {self.VOLUME_STEP_MAX}')\n\n await self._api.call('group', 'volume_down', gid=group_id, step=step_level)", "def stop(self):\n if self.player and self.player.is_playing():\n self.player.pause()\n super().stop()", "async def async_volume_up(self):\n if int(self._volume) == 100 and not self._muted:\n return\n\n volume = int(self._volume) + int(self._volume_step)\n if volume > 100:\n volume = 100\n\n if not (self._slave_mode and self._multiroom_wifidirect):\n\n if self._is_master:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:slave_vol:{0}\".format(str(volume)), None)\n else:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:vol:{0}\".format(str(volume)), None)\n\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_up. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n if self._snapshot_active:\n return\n value = await self._master.async_call_linkplay_httpapi(\"multiroom:SlaveVolume:{0}:{1}\".format(self._slave_ip, str(volume)), None)\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_up. Device: %s, Got response: %s\", self.entity_id, value)", "def mute(self, status=None):\n if status is None:\n status = not self.status.volume_muted\n\n self._socket_client.receiver_controller.set_volume_muted(status)", "def mute(self, status=None):\n if status is not None:\n st = status\n else:\n st = not status.volume_muted\n\n self._socket_client.receiver_controller.set_volume_muted(st)", "def volume_up(self) -> None:\n newvolume = min(self._client.volume + 4, 100)\n self._client.set_volume(newvolume)", "def movePlayerDown(self):\r\n self.player.moveDown()", "async def volume(self, msg, vol: int):\n\n if vol > 200:\n vol = 200\n vol = vol/100\n if msg.author.voice is not None:\n if msg.voice_client is not None:\n if msg.voice_client.channel == msg.author.voice.channel and msg.voice_client.is_playing() is True:\n msg.voice_client.source.volume = vol\n self.player[msg.guild.id]['volume'] = vol\n # if (msg.guild.id) in self.music:\n # self.music[str(msg.guild.id)]['vol']=vol\n return await msg.message.add_reaction(emoji='✅')\n\n return await msg.send(\"**Please join the same voice channel as the bot to use the command**\".title(), delete_after=30)", "def _control_pause(self):\n self.player.pause()", "async def async_toggle(self):\n await self.async_mute_volume(not self._muted)", "def voldown(self, raiseby=1):\n command + 'voldown ' + str(raiseby)\n self.run_command(command)", "def _set_volume(self, value):\n if self.player:\n vol = 100 if abs(value) >= 1.0 else 100 * abs(value)\n self.player.audio_set_volume(int(vol))", "def stop_video(self):\n\n # Enabling all the buttons, the speedCombo and the checkbox\n self.enable_btns()\n\n if self.mediaPlayer.state() == QMediaPlayer.PlayingState or self.mediaPlayer.state() == QMediaPlayer.PausedState:\n self.mediaPlayer.stop()\n else:\n pass", "def _volume(call: ServiceCall) -> None:\n mute_key_mapping = {\n ATTR_TOGGLE: KEY_MUTE_TOGGLE,\n ATTR_ON: KEY_MUTE_ON,\n ATTR_OFF: KEY_MUTE_OFF,\n }\n for cmd, att in call.data.items():\n if cmd == CMD_UP:\n _process_volume(KEY_VOLUME_UP, att)\n elif cmd == CMD_DOWN:\n _process_volume(KEY_VOLUME_DOWN, att)\n elif cmd == CMD_MUTE:\n hdmi_network.send_command(\n KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)\n )\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n _LOGGER.info(\"Audio muted\")\n else:\n _LOGGER.warning(\"Unknown command %s\", cmd)", "def detach_volume(self):\n\n # Choose the volume\n volume_id = self._choose_among_used_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Detach the volume\n print '# Detaching volume \"%s\"!' % volume_id\n if self.compute.detach_volume(volume_id):\n print 'The volume has been detached!'\n else:\n print 'The volume could not been detached'", "def volume_up(self) -> None:\n if self.volume_level is None:\n return\n volume = round(self.volume_level * MAX_VOLUME)\n self._monoprice.set_volume(self._zone_id, min(volume + 1, MAX_VOLUME))", "async def volume(self, ctx: commands.Context, volume: int):\n if not 0 <= volume <= 100:\n raise InvalidVolume()\n\n player = ctx.bot.lavalink.player_manager.get(ctx.guild.id)\n \n await player.set_volume(volume)\n await ctx.send(f'Volume alterado para {volume}%.')", "def volume_increase():\n request_command(tv_command=TVCommand.volume_increase)", "async def async_media_pause(self):\n self._pause_duration = self._meural_device[\"imageDuration\"]\n await self.meural.update_device(self.meural_device_id, {\"imageDuration\": 0})", "def stop_music(self):\n self.load_music(None)", "def voice_decrease():\n request_command(tv_command=TVCommand.voice_decrease)", "async def async_media_pause(self) -> None:\n await self._projector.send_command(PAUSE)", "def mute_volume(self, mute):\n mute_numeric = '1' if mute else '0'\n self._lms.query(self._id, 'mixer', 'muting', mute_numeric)\n self.update_ha_state()", "def set_volume(self):\n import fcntl\n import struct\n try:\n knob = struct.pack(\"III\", 0, 0, self.volume) # VOLUME_DEVICE_ID, VOLUME_KNOB_ID, volume_level\n fcntl.ioctl(self.mixer_fd, 3, knob)\n except:\n pass", "def set_volume_level(self, volume):\n self._device.set_volume(mute=False, volume=int(volume * 100))\n self._volume = volume", "def set_volume(self, target: int) -> None:\n self.media.set_volume(target)\n self.system.notify(f\"Jarvis::Volume has been set to: {self.media.get_volume()['volume']}%\")", "def on_volume(self, _instance, volume):\n self._set_volume(volume)", "async def async_volume_up(self) -> None:\n await self._volumio.volume_up()", "def vol_down_and_validate(self):\n self.logger.info('Decreasing volume')\n before_vol = self.dut.volume('Down', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Down', 1)\n if not after_vol or not before_vol or after_vol >= before_vol:\n self.logger.error(\n 'Unable to decrease the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error decreasing volume')", "def change_volume(self, sound_type, volume):\n if sound_type == \"background\":\n self.bg_volume = volume\n pygame.mixer.music.set_volume(self.bg_volume)\n elif sound_type == \"sound\":\n self.se_volume = volume\n for sound in self.sound_lib:\n self.sound_lib[sound].set_volume(volume)", "def unsetVolume(self):\n return _libsbml.Compartment_unsetVolume(self)", "async def test_volume_up(player) -> None:\n assert player.volume_level == 0\n await player.async_set_volume_level(0.5)\n assert player.volume_level == 0.5\n await player.async_volume_up()\n assert player.volume_level == 0.6", "def stop_soundtrack(self):\n SoundManager.remove_music(self.source, self)", "def stop_soundtrack(self):\n SoundManager.remove_music(self.source, self)", "def unassign_volume(VolumeId=None):\n pass", "async def async_turn_on(self):\n await self.async_mute_volume(False)", "def test_sound_volume(self):\n return self.send(\"test_sound_volume\")", "def mute():\n request_command(tv_command=TVCommand.mute)", "async def volume(\n self, ctx: commands.Context, volume: int = None\n ) -> Optional[float]:\n\n if volume is None:\n return ctx.voice_client.source.volume * 100\n\n ctx.voice_client.source.volume = volume / 100\n self.queue[ctx.guild.id].volume = volume / 100\n return ctx.voice_client.source.volume * 100", "async def set_volume(self, value: int):\n if value < self._pytheos.api.player.VOLUME_MIN:\n value = self._pytheos.api.player.VOLUME_MIN\n elif value > self._pytheos.api.player.VOLUME_MAX:\n value = self._pytheos.api.player.VOLUME_MAX\n\n await self._pytheos.api.player.set_volume(self.id, value)", "def mute_volume(self, mute: bool) -> None:\n self._monoprice.set_mute(self._zone_id, mute)", "def stopping_sound(self):\n logger.warning('current sound play is being stopped')\n self.stop_sound = True", "def _update_volume(self):\n self._log(\"raw\", \"self._volume : {0}, type : {1}\".format(self._volume, type(self._volume)))\n self._log(\"raw\", \"self._media_volume : {0}, type : {1}\".format(self._media_volume, type(self._media_volume)))\n self._log(\"debug\", \"setting volume : {0}, type : {1}\".format(settings.get(\"vlc\", \"volume\", \"master\"), type(settings.get(\"vlc\", \"volume\", \"master\"))))\n volume = float(int(self._volume) * int(self._media_volume) * int(settings.get(\"vlc\", \"volume\", \"master\")) / 10000)\n if volume > 100:\n volume = 100\n elif volume < 0:\n volume = 0\n self.stdin_queue.put_nowait(\"volume {0}\".format(int(volume)))", "def stop_media(self):\n self.stdin_queue.put(\"stop\")", "async def async_mute_volume(self, mute: bool) -> None:\n if mute:\n await self._volumio.mute()\n else:\n await self._volumio.unmute()", "def stop(self,event=None):\r\n if self.controlLock.locked():\r\n return\r\n self.controlLock.acquire()\r\n self.videoPlayer.stop()\r\n self.videoPlayer.updateDataplayers()\r\n self.controlLock.release()", "def force_unpause(self):\n self.timer.start(self.timer_interval)\n QMetaObject.invokeMethod(self.video_player, \"play\", Qt.QueuedConnection)\n self.paused = False\n\n # Re-enable video buttons\n self.update.workerUnpaused.emit()" ]
[ "0.8112854", "0.804127", "0.79031795", "0.7766054", "0.7751566", "0.7663452", "0.76481754", "0.76481754", "0.7636538", "0.7614832", "0.7582992", "0.745221", "0.7447365", "0.7420914", "0.72983295", "0.68772507", "0.68442637", "0.6788666", "0.67060065", "0.6633467", "0.66051424", "0.6590486", "0.64844817", "0.6465973", "0.6447868", "0.64441574", "0.6429904", "0.6409437", "0.64092535", "0.64024544", "0.63484937", "0.6339433", "0.6336128", "0.6296658", "0.6269261", "0.6254758", "0.62508345", "0.62471694", "0.6237986", "0.62360835", "0.62146074", "0.62117", "0.62000597", "0.6199636", "0.6199636", "0.6194399", "0.6182112", "0.6181386", "0.6174301", "0.6138162", "0.6130729", "0.611975", "0.611012", "0.61003643", "0.60876936", "0.6080675", "0.6045119", "0.5991565", "0.5964997", "0.5957785", "0.5944277", "0.5943653", "0.59415996", "0.5927205", "0.5926716", "0.5921892", "0.5919033", "0.5906643", "0.5890346", "0.5889328", "0.5881042", "0.58619374", "0.58562124", "0.58477354", "0.5847712", "0.58474445", "0.5830151", "0.58194363", "0.5816486", "0.58148026", "0.5812921", "0.58096635", "0.58059436", "0.5783395", "0.57249284", "0.572144", "0.572144", "0.5720585", "0.5717904", "0.57093745", "0.5699896", "0.5694095", "0.5693401", "0.56863153", "0.56716037", "0.5668289", "0.5663519", "0.5657652", "0.5650769", "0.56493634" ]
0.7441462
13
Simulate play pause media player.
def media_play_pause(self) -> None: if self.state == MediaPlayerState.PLAYING: self.media_pause() else: self.media_play()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_media_play_pause(player) -> None:\n assert player.state == STATE_OFF\n await player.async_media_play_pause()\n assert player.state == STATE_PLAYING\n await player.async_media_play_pause()\n assert player.state == STATE_PAUSED", "def _control_pause(self):\n self.player.pause()", "async def async_media_pause(self) -> None:\n await self._projector.send_command(PAUSE)", "async def play_pause(self) -> None:\n return await self.relay(\"play_pause\")()", "def test_pause(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.queue(source)\n player.play()\n start_time = time.time()\n\n stage = 0\n while player.source:\n if stage == 0 and time.time() - start_time > 0.25:\n player.pause()\n stage = 1\n if stage == 1 and time.time() - start_time > 0.75:\n player.play()\n stage = 2\n player.dispatch_events()", "def pause_song(self):\r\n if self.isPlaying:\r\n self.playSong[0].pause()\r\n print(\"Song paused. To continue type Play.\")\r\n else:\r\n print(\"Play a song first...\")", "def media_pause(self) -> None:\n self._attr_state = MediaPlayerState.PAUSED\n self._client.pause()", "def play_pause(self):\n return self._call_player_proxy('PlayPause', None)", "def media_play_pause(self):\n if self._state == STATE_PLAYING:\n self._state = STATE_PAUSED\n else:\n self._state = STATE_PLAYING", "async def async_media_pause(self):\n if not self._slave_mode:\n if self._playing_stream and not (self._playing_mediabrowser or self._playing_mass):\n # Pausing a live stream will cause a buffer overrun in hardware. Stop is the correct procedure in this case.\n # If the stream is configured as an input source, when pressing Play after this, it will be started again (using self._prev_source).\n await self.async_media_stop()\n return\n\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:pause\", None)\n if value == \"OK\":\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n if self._playing_spotify:\n self._spotify_paused_at = utcnow()\n self._state = STATE_PAUSED\n if self._slave_list is not None:\n for slave in self._slave_list:\n await slave.async_set_state(self._state)\n await slave.async_set_position_updated_at(self.media_position_updated_at)\n# self.async_schedule_update_ha_state(True)\n else:\n _LOGGER.warning(\"Failed to pause playback. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self._master.async_media_pause()", "def media_pause(self):\n self._state = STATE_PAUSED", "def media_pause(self):\n self._state = STATE_PAUSED", "def pause(self):\n self.paused = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "async def async_media_pause(self) -> None:\n if self._state.get(\"trackType\") == \"webradio\":\n await self._volumio.stop()\n else:\n await self._volumio.pause()", "def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()", "def media_play_pause(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, data)", "async def _pause(self, ctx: commands.Context):\n try:\n if ctx.voice_state.voice.is_playing:\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')\n\n except AttributeError:\n await ctx.send(\"Can't pause. No song is being played!\")", "def pause_play(self):\n\n try:\n if self.entrada:\n if self.estado == \"playing\": # pausa\n self.__pause()\n\n elif self.estado == \"paused\":\n self.__pause(True)\n self.estado = \"playing\"\n self.emit(\"estado\", \"playing\")\n\n else:\n #if self.uri: self.load(self.uri)\n pass\n\n except Exception, e:\n print \"HA OCURRIDO UN ERROR EN PAUSE_PLAY DEL REPRODUCTOR\", e", "def pause_play(self):\n\n if self.estado == gst.STATE_PAUSED \\\n or self.estado == gst.STATE_NULL \\\n or self.estado == gst.STATE_READY:\n self.__play()\n\n elif self.estado == gst.STATE_PLAYING:\n self.__pause()", "def pause(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPause())", "def test_pause_queue(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.pause()\n player.queue(source)\n\n while player.source:\n player.dispatch_events()\n player.play()", "def pause(self):\n self.sendCommand(\"pause\")", "async def async_media_pause(hass: HomeAssistant, entity_id: str | None = None) -> None:\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PAUSE, data)", "async def async_media_pause(self):\n self._pause_duration = self._meural_device[\"imageDuration\"]\n await self.meural.update_device(self.meural_device_id, {\"imageDuration\": 0})", "async def _pause(self, ctx: commands.Context):\n\n if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "async def _pause(self, ctx: commands.Context):\n\n if not ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "def media_pause(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_MEDIA_PAUSE, data)", "def pause(self):\n pass", "def pause(self):\n pass", "def pause(self,event=None):\r\n # If pause -> pause or stop -> pause, ignore, or if no video\r\n if not self.isPlaying():\r\n return\r\n # If play -> pause\r\n self.progress = time.time() - self.startTimestamp\r\n if self.hasAudio:\r\n mixer.music.pause()\r\n self.state = VideoPlayer.State.PAUSED", "async def toggle_play_pause(self):\n _LOGGER.debug(\"[Foobar2k] In Play / Pause\")\n if (self._power == POWER_ON):\n if (self._state == STATE_STOPPED):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PLAY_PLAYLIST.format(self._current_playlist_id, self._current_index), data=None)\n else: \n await self.prep_fetch(HTTP_POST, POST_PLAYER_PAUSE_TOGGLE, data=None)", "def pause(self) :\n raise NotImplementedError(\"pause not implemented\")", "def pause(self):\n raise NotImplementedError()", "def pause(self):\n raise NotImplementedError()", "def pause(self):\n self.block.mobile = not self.block.mobile\n if not self.paused:\n self.paused = True\n # Also print paused message\n self.screen.print(\"PAUSED\")\n else:\n self.paused = False\n self.screen.print(\"\")\n # Also reset tick time\n self.t = time.time()", "def execute_pause(self):\n pass", "def pause(self):\n if not self.paused:\n pygame.mixer.music.pause()\n self.paused = True\n else:\n pygame.mixer.music.unpause()\n self.paused = False", "def pause(self):\n\t\tpass", "async def async_media_play(self) -> None:\n await self._projector.send_command(PLAY)", "async def pause(self, ctx):\n if not await self.control_checks(ctx):\n return\n server_id = ctx.message.server.id\n srv = self.get_server_dict(server_id)\n if self.is_playing(server_id):\n srv['player'].pause()\n else:\n srv['player'].resume()", "def pause(): # real signature unknown; restored from __doc__\n pass", "def pause(self):\n \n self.pause = True", "def pause_video(self):\n\n if self.is_playing and self.is_paused is False:\n print(f\"Pausing video: {self.playing_now}\")\n self.is_paused = True\n elif self.is_paused:\n print(f\"Video already paused: {self.playing_now}\")\n elif self.is_playing is False:\n print(\"Cannot pause video: No video is currently playing\")", "async def pause(self, msg):\n if msg.author.voice is not None and msg.voice_client is not None:\n if msg.voice_client.is_paused() is True:\n return await msg.send(\"Song is already paused\")\n\n if msg.voice_client.is_paused() is False:\n msg.voice_client.pause()\n await msg.message.add_reaction(emoji='✅')", "def force_pause(self):\n self.timer.stop()\n QMetaObject.invokeMethod(self.video_player, \"pause\", Qt.QueuedConnection)\n self.paused = True\n\n # Re-enable video buttons\n self.update.workerPaused.emit()", "async def async_media_play(self):\n if self._pause_duration != 0:\n await self.meural.update_device(self.meural_device_id, {\"imageDuration\": self._pause_duration})\n else:\n await self.meural.update_device(self.meural_device_id, {\"imageDuration\": 300})", "def pause(self):\n self.paused_time = time.time()\n self.paused = True", "def __pause(self, reset=False):\n\n self.entrada.write('pause 0\\n')\n self.entrada.flush()\n self.__new_handle(reset)\n self.estado = \"paused\"\n self.emit(\"estado\", \"paused\")", "def pause():\n click.pause()", "def pause(self) -> None:\n self.system.notify(\"Jarvis::Paused\")\n self.media.pause()", "def auto_play_pause(self):\r\n if ActiveCheck.not_active():\r\n return\r\n \r\n if not self.playing:\r\n return # Suppress activity\r\n\r\n player = self.get_player()\r\n if not player.auto:\r\n return\r\n self.auto_delay_waiting = True\r\n pause = player.pause\r\n if self.speed_step >= 0:\r\n pause = self.speed_step\r\n delay_ms = int(pause*1000)\r\n self.mw.after(delay_ms)\r\n return", "def pause_video(self):\n if self.now_playing_videoid:\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n if self.pause == True:\n print(f\"Video already paused: {video_playing.title}\")\n else:\n print(f\"Pausing video: {video_playing.title}\")\n self.pause = True\n \n else: \n print(f\"Cannot pause video: No video is currently playing\")\n\n # print(\"pause_video needs implementation\")", "def on_pause(self):\n pass", "def hw_pause(self):\n self.logger.debug(\"Pause called (no-op)\")", "async def pause(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not connected to a voice channel :no_entry:\")\n if not player.is_playing:\n return await ctx.send(\"Nothing is currently playing :no_entry:\")\n if player.paused:\n await player.set_pause(False)\n await ctx.send(\"Resumed.\")\n else:\n await player.set_pause(True)\n await ctx.send(\"Paused.\")", "def on_pause(self):\r\n store = get_store()\r\n store.put(\"pause\", value=self.sm.current)\r\n return True", "def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break", "async def skip(self):\n await self.play()", "def pause(ms=None): #py:pause\n if ms is None:\n RUR._pause_()\n else:\n RUR._pause_(ms)", "def media_play(self):\n self._state = STATE_PLAYING", "def media_play(self):\n self._state = STATE_PLAYING", "def pause_video(self):\n\n if self.playerType == VLC:\n if self.playMode == FFMPEG:\n self.FFmpegTimer.stop()\n else:\n if self.mediaListPlayer.get_state() != vlc.State.Paused:\n\n self.timer.stop()\n self.timer_spectro.stop()\n\n self.mediaListPlayer.pause()\n # wait for pause\n\n # wait until video is paused or ended\n while True:\n if self.mediaListPlayer.get_state() in [vlc.State.Paused, vlc.State.Ended]:\n break\n\n # second video together\n if self.simultaneousMedia:\n if self.mediaListPlayer2.get_state() != vlc.State.Paused:\n self.mediaListPlayer2.pause()\n\n logging.debug(\"pause_video: player #1 state: {}\".format(self.mediaListPlayer.get_state()))\n if self.simultaneousMedia:\n logging.debug('pause_video: player #2 state {}'.format(self.mediaListPlayer2.get_state()))\n pass\n\n time.sleep(1)\n self.timer_out()\n self.timer_spectro_out()", "def force_unpause(self):\n self.timer.start(self.timer_interval)\n QMetaObject.invokeMethod(self.video_player, \"play\", Qt.QueuedConnection)\n self.paused = False\n\n # Re-enable video buttons\n self.update.workerUnpaused.emit()", "def pause(self):\r\n cmd = MsgHelper.createMessage(Messages.CMD_PAUSE)\r\n self.mailbox.push( cmd, high_priority = True )", "def pause_button(self):\r\n self.is_action = True\r\n self.update_settings()\r\n self.is_pause = True\r\n if self.pause_call is not None:\r\n self.wm.after(1, self.pause_call)", "def testPlayback(self):\n \n pass", "async def pause(self, ctx):\n state = self.get_voice_state(ctx.message.server)\n if state.is_playing():\n player = state.player\n player.pause()\n await self.bot.say('Paused.')", "def __pause(self):\n\n self.set_state(gst.STATE_PAUSED)", "def test_pause(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n message: Optional[str],\n) -> None:\n subject.pause(msg=message)\n decoy.verify(mock_engine_client.wait_for_resume(message=message))", "async def pause(self, ctx):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n voice.pause()\n await ctx.send(\"Playing paused.\")\n else:\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "def pause_video(self):\n if self.is_paused() is not None:\n print(\"Video already paused: {}\".format(self.is_paused()._title))\n elif self.is_playing() is not None:\n print(\"Pausing video: {}\".format(self.is_playing()._title))\n self.is_playing()._status = 2\n else:\n print(\"Cannot pause video: No video is currently playing\")", "def pause(self, _):\n if not self.is_ended:\n self.canvas.create_text(self.game.width // 2,\n self.game.height // 2,\n text=\"Paused\",\n font=(Game.FONT, 50),\n fill=Game.TEXT_COLOUR,\n tag='pause_text')\n self.game.running = not self.game.running\n if self.game.running:\n self.canvas.delete('pause_text')", "def pause_video(self):\n global value\n if value==1:\n value=2\n print(f\"Pausing video: {name}\")\n elif value==0:\n print(f\"Cannot pause video: No video is currently playing\")\n else:\n print(f\"Video already paused: {name}\")", "def pause_video(self):\n if self.current_video is None:\n print(\"Cannot pause video: No video is currently playing\")\n elif self.current_paused is False:\n print(\"Pausing video:\", self.current_video.title)\n self.current_paused = True\n elif self.current_paused:\n print(\"Video already paused:\", self.current_video.title)", "def do_pause(self, args) :\r\n self.__Logger.warn(\"pausing all simulations\")\r\n\r\n self.cmds[\"SimulatorPaused\"] = True", "def set_pause(self, pause):\n\n game_status = self.game.get_game_status();\n if(game_status == GameStatus.NotStarted or game_status == GameStatus.Finished):\n return;\n\n if(pause == True):\n self.game.set_game_status(GameStatus.Paused);\n self.bttn_pause.set_text(\"Reprendre la partie\");\n\n self.game.stop_timer();\n\n elif(pause == False):\n self.game.set_game_status(GameStatus.InProgress);\n self.bttn_pause.set_text(\"Mettre en pause\");\n\n self.game.start_timer();", "def pause(self):\n return self.client.api.pause(self.id)", "def pause_music():\n from Functions import Menu\n if Menu.music == False:\n pygame.mixer.music.unpause()\n Menu.music = True\n else:\n pygame.mixer.music.pause()\n Menu.music = False", "def pause(self):\n if self._pause:\n self._pause = False\n else:\n self._pause = True\n self.step() # trigger the next step", "def pause(self):\n self._cleanup()\n self._paused = True", "def pause_video(self):\n if self._paused:\n print(f\"Video already paused: {self._current_video.title}\")\n return\n elif self._current_video is None:\n print(\"Cannot pause video: No video is currently playing\")\n return\n print(f\"Pausing video: {self._current_video.title}\")\n self._paused = True", "def _toggle_paused(self, paused=None):\n #automatically start the first wave\n if self._wave == 0:\n self.next_wave()\n\n if paused is None:\n paused = not self._paused\n\n #Task 1.5 (Play Controls): Reconfigure the pause button here\n \n if paused:\n self.pause()\n self._play_button_text.set(\"play\")\n else:\n self.start()\n self._play_button_text.set(\"pause\")\n\n self._paused = paused", "def pause(self,event=None):\r\n if self.controlLock.locked():\r\n return\r\n self.controlLock.acquire()\r\n self.videoPlayer.pause()\r\n for dp in self.dataPlayers:\r\n dp.update(self.videoPlayer.startTimestamp)\r\n self.controlLock.release()", "def start_pause(self, **kwargs):\n if self.is_on:\n self.turn_off()\n else:\n self.turn_on()", "async def set_pause(self, pause: bool):\n await self._bot.lavalink.ws.send(op='pause', guildId=self.guild_id, pause=pause)\n self.paused = pause", "def on_pause(self, event):\n self.pre_check(event)\n if not self.get_player(event.guild.id).paused:\n self.get_player(event.guild.id).pause()", "async def pause_behaviors(self) -> None:", "def pause_videos(self):\n if (not self.playing) or (self.pausing) or (self.shutdown):\n return\n self.enable_video_buttons(False, False, False)\n self.pausing = True\n\n # Pause the background worker\n self.worker.force_pause()", "def on_worker_paused(self):\n self.playing = False\n self.pausing = False\n self.enable_video_buttons(True, False, True)", "def toggle_pause(self):\n self.m_btn_pause = not self.m_btn_pause", "def pause(self, state):\n resp = yield from self.command('pause '+str(state))\n return True", "def pause_video(self):\n current_video = self._video_library.get_video()\n if self.title == current_video.title:\n print(f\"Pausing video: {self.title}\")\n elif current_video == None:\n print(f\"Cannot pause video: No video is currently playing\")\n else:\n print(f\"Video already paused: {self.title}\")", "def play(self): \n if(self.isLoaded() == False):\n return\n self.isPlaying = True\n #if the video is end, we restart\n if(self.ret == False):\n self.video.set(1, 0)", "async def pause(self):\n return await self.send_command_and_read_reply(\n Protocol1Command(command=\"\", execution_command=\"K\")\n )", "def toggle_pause(self):\n message = None\n while self.model.paused:\n for pygame_event in pygame.event.get():\n print('event: ', pygame_event)\n if pygame_event.type == pygame.KEYDOWN:\n message = self.down_keys(message, pygame_event)\n if isinstance(message, events.TogglePause):\n self.ev_manager.post(message)", "def play(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPlay())", "def handle_play(self, message):\n self.audio_service.resume()", "def play(self):\n pass", "def pause(self, instance, callback):\n pass", "async def set_paused(self, value: bool):\n await self._pytheos.api.player.set_play_state(self.id, models.player.PlayState.Paused if value else models.player.PlayState.Playing)" ]
[ "0.8116942", "0.80330086", "0.8000475", "0.7805966", "0.7696691", "0.76267785", "0.7618228", "0.7533583", "0.7516676", "0.7498765", "0.74963427", "0.74963427", "0.7420536", "0.7380519", "0.736805", "0.73425657", "0.73408324", "0.72820014", "0.7275913", "0.7251658", "0.72471094", "0.7186009", "0.7133423", "0.7130647", "0.71142995", "0.70999706", "0.7045512", "0.69505596", "0.69505596", "0.69463485", "0.6922026", "0.6903543", "0.69030786", "0.69030786", "0.6897311", "0.6851986", "0.6839096", "0.682573", "0.6819568", "0.67864853", "0.6747966", "0.6737366", "0.6711889", "0.6709177", "0.67090464", "0.66892487", "0.6675648", "0.667555", "0.6668794", "0.66673577", "0.6661762", "0.66430664", "0.6639714", "0.661764", "0.66174006", "0.65984905", "0.65888387", "0.65851784", "0.6565193", "0.6541972", "0.6541972", "0.6540115", "0.6538793", "0.6537291", "0.65333825", "0.6528218", "0.6513513", "0.65103734", "0.6484871", "0.64701307", "0.6464033", "0.6452751", "0.6438799", "0.6435629", "0.6394778", "0.6385955", "0.6382713", "0.63817495", "0.63794756", "0.63603383", "0.6345981", "0.6329399", "0.6320516", "0.63197297", "0.63087624", "0.6305357", "0.62770116", "0.6268723", "0.6257461", "0.6252011", "0.6251526", "0.6231405", "0.62270653", "0.6217516", "0.62168765", "0.6209853", "0.62070626", "0.6200355", "0.619592", "0.61910796" ]
0.8151165
0
Send media pause command to media player.
def media_pause(self) -> None: self._attr_state = MediaPlayerState.PAUSED self._client.pause()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_media_pause(self) -> None:\n await self._projector.send_command(PAUSE)", "def pause(self):\n self.sendCommand(\"pause\")", "def media_play_pause(self) -> None:\n if self.state == MediaPlayerState.PLAYING:\n self.media_pause()\n else:\n self.media_play()", "async def async_media_pause(self):\n if not self._slave_mode:\n if self._playing_stream and not (self._playing_mediabrowser or self._playing_mass):\n # Pausing a live stream will cause a buffer overrun in hardware. Stop is the correct procedure in this case.\n # If the stream is configured as an input source, when pressing Play after this, it will be started again (using self._prev_source).\n await self.async_media_stop()\n return\n\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:pause\", None)\n if value == \"OK\":\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n if self._playing_spotify:\n self._spotify_paused_at = utcnow()\n self._state = STATE_PAUSED\n if self._slave_list is not None:\n for slave in self._slave_list:\n await slave.async_set_state(self._state)\n await slave.async_set_position_updated_at(self.media_position_updated_at)\n# self.async_schedule_update_ha_state(True)\n else:\n _LOGGER.warning(\"Failed to pause playback. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self._master.async_media_pause()", "def media_pause(self):\n self._state = STATE_PAUSED", "def media_pause(self):\n self._state = STATE_PAUSED", "def _control_pause(self):\n self.player.pause()", "def media_pause(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_MEDIA_PAUSE, data)", "async def _pause(self, ctx: commands.Context):\n try:\n if ctx.voice_state.voice.is_playing:\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')\n\n except AttributeError:\n await ctx.send(\"Can't pause. No song is being played!\")", "async def play_pause(self) -> None:\n return await self.relay(\"play_pause\")()", "def pause(self):\n self.paused = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "async def async_media_pause(hass: HomeAssistant, entity_id: str | None = None) -> None:\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PAUSE, data)", "async def _pause(self, ctx: commands.Context):\n\n if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "def pause(self):\r\n cmd = MsgHelper.createMessage(Messages.CMD_PAUSE)\r\n self.mailbox.push( cmd, high_priority = True )", "def media_play_pause(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, data)", "async def _pause(self, ctx: commands.Context):\n\n if not ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "async def async_media_pause(self):\n self._pause_duration = self._meural_device[\"imageDuration\"]\n await self.meural.update_device(self.meural_device_id, {\"imageDuration\": 0})", "def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()", "def media_play_pause(self):\n if self._state == STATE_PLAYING:\n self._state = STATE_PAUSED\n else:\n self._state = STATE_PLAYING", "async def async_media_pause(self) -> None:\n if self._state.get(\"trackType\") == \"webradio\":\n await self._volumio.stop()\n else:\n await self._volumio.pause()", "async def pause(self, ctx):\n if not await self.control_checks(ctx):\n return\n server_id = ctx.message.server.id\n srv = self.get_server_dict(server_id)\n if self.is_playing(server_id):\n srv['player'].pause()\n else:\n srv['player'].resume()", "def pause_song(self):\r\n if self.isPlaying:\r\n self.playSong[0].pause()\r\n print(\"Song paused. To continue type Play.\")\r\n else:\r\n print(\"Play a song first...\")", "async def pause(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not connected to a voice channel :no_entry:\")\n if not player.is_playing:\n return await ctx.send(\"Nothing is currently playing :no_entry:\")\n if player.paused:\n await player.set_pause(False)\n await ctx.send(\"Resumed.\")\n else:\n await player.set_pause(True)\n await ctx.send(\"Paused.\")", "def play_pause(self):\n return self._call_player_proxy('PlayPause', None)", "def pause(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPause())", "def pause(self,event=None):\r\n # If pause -> pause or stop -> pause, ignore, or if no video\r\n if not self.isPlaying():\r\n return\r\n # If play -> pause\r\n self.progress = time.time() - self.startTimestamp\r\n if self.hasAudio:\r\n mixer.music.pause()\r\n self.state = VideoPlayer.State.PAUSED", "async def pause(self, ctx):\n state = self.get_voice_state(ctx.message.server)\n if state.is_playing():\n player = state.player\n player.pause()\n await self.bot.say('Paused.')", "async def pause(self):\n return await self.send_command_and_read_reply(\n Protocol1Command(command=\"\", execution_command=\"K\")\n )", "async def set_pause(self, pause: bool):\n await self._bot.lavalink.ws.send(op='pause', guildId=self.guild_id, pause=pause)\n self.paused = pause", "async def pause(self, msg):\n if msg.author.voice is not None and msg.voice_client is not None:\n if msg.voice_client.is_paused() is True:\n return await msg.send(\"Song is already paused\")\n\n if msg.voice_client.is_paused() is False:\n msg.voice_client.pause()\n await msg.message.add_reaction(emoji='✅')", "def pause(self):\n self.block.mobile = not self.block.mobile\n if not self.paused:\n self.paused = True\n # Also print paused message\n self.screen.print(\"PAUSED\")\n else:\n self.paused = False\n self.screen.print(\"\")\n # Also reset tick time\n self.t = time.time()", "def pause(self) -> None:\n self.system.notify(\"Jarvis::Paused\")\n self.media.pause()", "def pause_video(self):\n\n if self.is_playing and self.is_paused is False:\n print(f\"Pausing video: {self.playing_now}\")\n self.is_paused = True\n elif self.is_paused:\n print(f\"Video already paused: {self.playing_now}\")\n elif self.is_playing is False:\n print(\"Cannot pause video: No video is currently playing\")", "async def pause(self, ctx):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n voice.pause()\n await ctx.send(\"Playing paused.\")\n else:\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "def pause_video(self):\n if self.current_video is None:\n print(\"Cannot pause video: No video is currently playing\")\n elif self.current_paused is False:\n print(\"Pausing video:\", self.current_video.title)\n self.current_paused = True\n elif self.current_paused:\n print(\"Video already paused:\", self.current_video.title)", "def pause_video(self):\n if self.is_paused() is not None:\n print(\"Video already paused: {}\".format(self.is_paused()._title))\n elif self.is_playing() is not None:\n print(\"Pausing video: {}\".format(self.is_playing()._title))\n self.is_playing()._status = 2\n else:\n print(\"Cannot pause video: No video is currently playing\")", "def pause(self,event=None):\r\n if self.controlLock.locked():\r\n return\r\n self.controlLock.acquire()\r\n self.videoPlayer.pause()\r\n for dp in self.dataPlayers:\r\n dp.update(self.videoPlayer.startTimestamp)\r\n self.controlLock.release()", "def pause_video(self):\n if self.now_playing_videoid:\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n if self.pause == True:\n print(f\"Video already paused: {video_playing.title}\")\n else:\n print(f\"Pausing video: {video_playing.title}\")\n self.pause = True\n \n else: \n print(f\"Cannot pause video: No video is currently playing\")\n\n # print(\"pause_video needs implementation\")", "def pause(self) :\n raise NotImplementedError(\"pause not implemented\")", "def pause_play(self):\n\n try:\n if self.entrada:\n if self.estado == \"playing\": # pausa\n self.__pause()\n\n elif self.estado == \"paused\":\n self.__pause(True)\n self.estado = \"playing\"\n self.emit(\"estado\", \"playing\")\n\n else:\n #if self.uri: self.load(self.uri)\n pass\n\n except Exception, e:\n print \"HA OCURRIDO UN ERROR EN PAUSE_PLAY DEL REPRODUCTOR\", e", "def pause_video(self):\n if self._paused:\n print(f\"Video already paused: {self._current_video.title}\")\n return\n elif self._current_video is None:\n print(\"Cannot pause video: No video is currently playing\")\n return\n print(f\"Pausing video: {self._current_video.title}\")\n self._paused = True", "def execute_pause(self):\n pass", "def __pause(self, reset=False):\n\n self.entrada.write('pause 0\\n')\n self.entrada.flush()\n self.__new_handle(reset)\n self.estado = \"paused\"\n self.emit(\"estado\", \"paused\")", "def pause_play(self):\n\n if self.estado == gst.STATE_PAUSED \\\n or self.estado == gst.STATE_NULL \\\n or self.estado == gst.STATE_READY:\n self.__play()\n\n elif self.estado == gst.STATE_PLAYING:\n self.__pause()", "async def test_media_play_pause(player) -> None:\n assert player.state == STATE_OFF\n await player.async_media_play_pause()\n assert player.state == STATE_PLAYING\n await player.async_media_play_pause()\n assert player.state == STATE_PAUSED", "async def toggle_play_pause(self):\n _LOGGER.debug(\"[Foobar2k] In Play / Pause\")\n if (self._power == POWER_ON):\n if (self._state == STATE_STOPPED):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PLAY_PLAYLIST.format(self._current_playlist_id, self._current_index), data=None)\n else: \n await self.prep_fetch(HTTP_POST, POST_PLAYER_PAUSE_TOGGLE, data=None)", "def pause_video(self):\n\n if self.playerType == VLC:\n if self.playMode == FFMPEG:\n self.FFmpegTimer.stop()\n else:\n if self.mediaListPlayer.get_state() != vlc.State.Paused:\n\n self.timer.stop()\n self.timer_spectro.stop()\n\n self.mediaListPlayer.pause()\n # wait for pause\n\n # wait until video is paused or ended\n while True:\n if self.mediaListPlayer.get_state() in [vlc.State.Paused, vlc.State.Ended]:\n break\n\n # second video together\n if self.simultaneousMedia:\n if self.mediaListPlayer2.get_state() != vlc.State.Paused:\n self.mediaListPlayer2.pause()\n\n logging.debug(\"pause_video: player #1 state: {}\".format(self.mediaListPlayer.get_state()))\n if self.simultaneousMedia:\n logging.debug('pause_video: player #2 state {}'.format(self.mediaListPlayer2.get_state()))\n pass\n\n time.sleep(1)\n self.timer_out()\n self.timer_spectro_out()", "def pause(self):\n if not self.paused:\n pygame.mixer.music.pause()\n self.paused = True\n else:\n pygame.mixer.music.unpause()\n self.paused = False", "def force_pause(self):\n self.timer.stop()\n QMetaObject.invokeMethod(self.video_player, \"pause\", Qt.QueuedConnection)\n self.paused = True\n\n # Re-enable video buttons\n self.update.workerPaused.emit()", "def test_pause(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.queue(source)\n player.play()\n start_time = time.time()\n\n stage = 0\n while player.source:\n if stage == 0 and time.time() - start_time > 0.25:\n player.pause()\n stage = 1\n if stage == 1 and time.time() - start_time > 0.75:\n player.play()\n stage = 2\n player.dispatch_events()", "def pause(self):\n return self.client.api.pause(self.id)", "def pause(self):\n pass", "def pause(self):\n pass", "def on_pause(self, event):\n self.pre_check(event)\n if not self.get_player(event.guild.id).paused:\n self.get_player(event.guild.id).pause()", "def on_pause(self):\r\n store = get_store()\r\n store.put(\"pause\", value=self.sm.current)\r\n return True", "def pause(self, _):\n if not self.is_ended:\n self.canvas.create_text(self.game.width // 2,\n self.game.height // 2,\n text=\"Paused\",\n font=(Game.FONT, 50),\n fill=Game.TEXT_COLOUR,\n tag='pause_text')\n self.game.running = not self.game.running\n if self.game.running:\n self.canvas.delete('pause_text')", "def pause(self):\n self.paused_time = time.time()\n self.paused = True", "def __pause(self):\n\n self.set_state(gst.STATE_PAUSED)", "def pause(self):\n raise NotImplementedError()", "def pause(self):\n raise NotImplementedError()", "def pause(self):\n \n self.pause = True", "def pause_videos(self):\n if (not self.playing) or (self.pausing) or (self.shutdown):\n return\n self.enable_video_buttons(False, False, False)\n self.pausing = True\n\n # Pause the background worker\n self.worker.force_pause()", "def pause(self, state):\n resp = yield from self.command('pause '+str(state))\n return True", "def pause_video(self):\n global value\n if value==1:\n value=2\n print(f\"Pausing video: {name}\")\n elif value==0:\n print(f\"Cannot pause video: No video is currently playing\")\n else:\n print(f\"Video already paused: {name}\")", "def pause(self, enabled=True):\n if self._dev_name is None:\n raise SpeakerError\n\n #self._device.pause(enabled) that statement throws an exception\n self._paused = enabled", "async def async_pause(self) -> None:\n await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))", "def pause_button(self):\r\n self.is_action = True\r\n self.update_settings()\r\n self.is_pause = True\r\n if self.pause_call is not None:\r\n self.wm.after(1, self.pause_call)", "def test_pause_queue(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.pause()\n player.queue(source)\n\n while player.source:\n player.dispatch_events()\n player.play()", "async def async_media_play(self) -> None:\n await self._projector.send_command(PLAY)", "def pause(self, waitMs):\n return self.sendCommand(\"_\" + str(int(waitMs)))", "def pause_video(self):\n current_video = self._video_library.get_video()\n if self.title == current_video.title:\n print(f\"Pausing video: {self.title}\")\n elif current_video == None:\n print(f\"Cannot pause video: No video is currently playing\")\n else:\n print(f\"Video already paused: {self.title}\")", "def pause(self):\n\t\tpass", "def on_pause(self):\n pass", "def send_worker_pause(self, worker_id):\n pass", "def set_pause(self, pause):\n\n game_status = self.game.get_game_status();\n if(game_status == GameStatus.NotStarted or game_status == GameStatus.Finished):\n return;\n\n if(pause == True):\n self.game.set_game_status(GameStatus.Paused);\n self.bttn_pause.set_text(\"Reprendre la partie\");\n\n self.game.stop_timer();\n\n elif(pause == False):\n self.game.set_game_status(GameStatus.InProgress);\n self.bttn_pause.set_text(\"Mettre en pause\");\n\n self.game.start_timer();", "def pause(self, id=None):\n self._select_interface(self._rc_pause, self._http_pause, id)", "def toggle_pause(self):\n message = None\n while self.model.paused:\n for pygame_event in pygame.event.get():\n print('event: ', pygame_event)\n if pygame_event.type == pygame.KEYDOWN:\n message = self.down_keys(message, pygame_event)\n if isinstance(message, events.TogglePause):\n self.ev_manager.post(message)", "def test_pause(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n message: Optional[str],\n) -> None:\n subject.pause(msg=message)\n decoy.verify(mock_engine_client.wait_for_resume(message=message))", "def pause(ms=None): #py:pause\n if ms is None:\n RUR._pause_()\n else:\n RUR._pause_(ms)", "def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break", "def control_pause(self, wait_for_paused: bool = True) -> None:\n self.__logger.debug('Eva.control_pause called')\n return self.__http_client.control_pause(wait_for_paused=wait_for_paused)", "def do_pause(self, args) :\r\n self.__Logger.warn(\"pausing all simulations\")\r\n\r\n self.cmds[\"SimulatorPaused\"] = True", "async def async_media_play(self):\n if self._pause_duration != 0:\n await self.meural.update_device(self.meural_device_id, {\"imageDuration\": self._pause_duration})\n else:\n await self.meural.update_device(self.meural_device_id, {\"imageDuration\": 300})", "def send_music_pause_event_and_validate(self):\n paused_detection_timeout = 10\n if not self.dut.is_streaming():\n self.logger.info('Music not streaming. Skipping pause event..')\n return\n self.logger.info(\"Pausing video...\")\n is_paused = self.dut.music_control_events(\n AVRCPSTATUS, self.dut.apollo_log_regex.AVRCP_PAUSE_REGEX)\n if not is_paused:\n self.logger.error('AVRCP Paused statue not found')\n raise TestActsError('AVRCP Paused status not found.')\n wait_until(\n lambda: not self.dut.is_streaming(),\n paused_detection_timeout,\n sleep_s=0.25)\n if self.dut.is_streaming():\n self.logger.error('Device is still in deviceA2DPStreaming state')\n raise TestActsError(\n 'Device is still in deviceA2DPStreaming state.')", "def _pause(self):\n data_paused = None\n while self.target.is_active and data_paused != '01':\n data_paused = self._mem_read(self.data['paused'][0], 1)\n time.sleep(self.pause_time)\n self.data['paused'][1] = data_paused\n return", "def hw_pause(self):\n self.logger.debug(\"Pause called (no-op)\")", "async def pause(self, ctx: commands.Context) -> Optional[bool]:\n\n if ctx.voice_client.is_paused():\n await self.call_event(\n \"on_music_error\", ctx, AlreadyPaused(\"Player is already paused.\")\n )\n return\n\n if self.type == ManagerType.LAVALINK:\n await ctx.voice_client.set_pause(pause=True)\n else:\n (await self.now_playing(ctx)).last_pause_timestamp = time.time()\n ctx.voice_client.pause()\n\n create_task(self.bot.loop, self.ensure_activity(ctx))\n return True", "def video_playing(self, paused, title, total_time, position):\n self.device.responses['playing'] = PlayingResponse(\n paused=paused, title=title,\n total_time=total_time, position=position,\n mediakind=3)", "def do_pause(self, text):\n sm.input(text + '\\n')", "def pause(self):\n self._cleanup()\n self._paused = True", "def pause(instance):\n if instance.state == STOPPED:\n return\n\n Queue.objects.add(function=\"pause\", instance=instance)", "def name(self) -> Text:\n\n return \"pause_conversation\"", "def paused():\n pause_time = time()\n cache.set('paused', pause_time)\n socketio.emit('paused', pause_time)", "def pause():\n click.pause()", "def pause(): # real signature unknown; restored from __doc__\n pass", "def pause_update(self):\n if self.pause_event:\n self.pause_event.activate()", "def force_unpause(self):\n self.timer.start(self.timer_interval)\n QMetaObject.invokeMethod(self.video_player, \"play\", Qt.QueuedConnection)\n self.paused = False\n\n # Re-enable video buttons\n self.update.workerUnpaused.emit()", "def pause(self):\n self._event.clear()", "def togglePause(self):\n self.model.paused = not self.model.paused\n self.proc.send_signal(signal.SIGUSR1)", "def pause(self, instance, callback):\n pass" ]
[ "0.809023", "0.759442", "0.7524616", "0.75083613", "0.7491196", "0.7491196", "0.7414291", "0.7250745", "0.724168", "0.72367144", "0.71571475", "0.71521604", "0.71426934", "0.71061397", "0.70989716", "0.7085035", "0.70630133", "0.6954041", "0.6925387", "0.68717307", "0.6850086", "0.6817418", "0.68085146", "0.6805329", "0.6804825", "0.68026525", "0.67627025", "0.67408997", "0.67057854", "0.66973066", "0.6651319", "0.66085964", "0.6579777", "0.6574487", "0.65474284", "0.6519089", "0.6504719", "0.6504257", "0.64810145", "0.64757967", "0.64634836", "0.6457215", "0.6451056", "0.6436954", "0.6386968", "0.6378572", "0.6377863", "0.6370631", "0.63573253", "0.63571495", "0.6353249", "0.6325762", "0.6325762", "0.6320812", "0.63174653", "0.6302726", "0.6295444", "0.62899953", "0.626861", "0.626861", "0.6263197", "0.62341565", "0.6223127", "0.6222381", "0.62183106", "0.62112874", "0.620586", "0.61933625", "0.6192836", "0.617278", "0.61599153", "0.6148293", "0.6124835", "0.6106747", "0.60923916", "0.60830873", "0.6082949", "0.6065203", "0.6064828", "0.60103905", "0.6005227", "0.59869176", "0.59829706", "0.5953798", "0.5926067", "0.59109426", "0.59025675", "0.58928263", "0.58895093", "0.588628", "0.5850517", "0.5844092", "0.58235395", "0.5819357", "0.5805813", "0.57956696", "0.5790981", "0.57865816", "0.5782445", "0.5777316" ]
0.77531683
1
Send next track command.
def media_next_track(self) -> None: self._client.next()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def media_next_track(self):\n self.handleCommand(27)", "async def async_media_next_track(self):\n await self.local_meural.send_key_right()", "async def async_media_next_track(self):\n if not self._slave_mode:\n if not self._playing_mass:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:next\", None)\n self._playhead_position = 0\n self._duration = 0\n self._position_updated_at = utcnow()\n self._trackc = None\n if value != \"OK\":\n _LOGGER.warning(\"Failed skip to next track. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self.hass.services.async_call(\"mass\",\"queue_command\", service_data = {\"entity_id\": self.entity_id, \"command\": \"next\"})\n else:\n await self._master.async_media_next_track()", "def media_next_track(self):\n self._device.next_track()", "async def async_media_next_track(self):\n await self.coordinator.data.player_action(self.zone_id, \"next\")\n await self.coordinator.async_refresh()", "def media_next_track(self):\n self._lms.query(self._id, 'playlist', 'index', '+1')\n self.update_ha_state()", "def _next(verbose=0, quiet=False):\n Spotify.request('me/player/next', method='POST')\n if not quiet:\n from cli.commands.status import status\n status.callback(verbose=verbose)\n\n return", "def media_next_track(self) -> None:\n self._state = self._player.next_track()", "def on_next_command(self, event):\n self.pre_check(event)\n if not self.get_player(event.guild.id).queue:\n return event.channel.send_message(\"There aren't any songs queued.\")\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[0].metadata,\n )\n event.channel.send_message(\n \"Next in queue is ``{}`` by ``{}`` with length ``{}`` minutes using ``{}``.\".format(\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )", "def media_next_track(self) -> None:\n media_controller = self._media_controller()\n media_controller.queue_next()", "async def async_media_next_track(self) -> None:\n await self._device.next()", "async def async_media_next_track(self) -> None:\n cur_track_index = self._get_current_track_index()\n\n await self._table.active_playlist.play(\n self._table.active_playlist.tracks[cur_track_index + 1]\n )", "def next_track(self, device=None, **kwargs):\n return self._post(API.NEXT.value, device_id=device, check_202=True, **kwargs)", "async def play_next(self):\n _LOGGER.debug(\"[Foobar2k] In Next\")\n if (self._power == POWER_ON):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_NEXT, data=None)\n time.sleep(0.2)\n await self.async_update()", "async def async_media_next_track(self) -> None:\n await self._volumio.next()", "def send_next(self):\n event = next(self)\n self.send(event)\n return event", "async def async_media_next_track(self):\n if self._raumfeld.group_is_valid(self._rooms):\n await self._raumfeld.async_group_next_track(self._rooms)\n elif self._is_spotify_sroom:\n await self._raumfeld.async_room_next_track(self._room)\n else:\n log_debug(\n \"Method was called although speaker group '%s' is invalid\" % self._rooms\n )\n await self.async_update_track_info()", "def sendNextPage(self):\n self.collector.callRemote(\"gotPage\", self.nextPage(), pbanswer=False)", "def add_next(self, requester: int, track: dict):\n self.queue.insert(0, AudioTrack().build(track, requester))", "def track(self,name):\n self.sendCommand(\"global /track\",name)", "async def next_player_message(ctx):\n message = \"It is now {}'s turn.\".format(NNB.next_player(ctx.message.author.id).mention)\n await ctx.send(message)", "def media_next_track(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)", "def sendNextPage(self):\n if not self.chunks:\n return\n val = self.chunks.pop(0)\n self.producer.resumeProducing()\n self.collector.callRemote(\"gotPage\", val, pbanswer=False)", "def toggle_next(self):\n self.bot.loop.call_soon_threadsafe(self.play_next_song.set)", "async def async_media_next_track(\n hass: HomeAssistant, entity_id: str | None = None\n) -> None:\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)", "def user_next_command(self, tracer):\n try:\n self.queue.get_nowait()(tracer)\n except Empty:\n return", "def play_next(self):\n\t\tprint(\"plext_next() args:\")\n\t\tprint(args)\n\t\tself.next()\n\t\tself.play()", "async def next(self):\n await self._pytheos.api.player.play_next(self.id)", "def send(event):\r\n dog_stats_api.increment('track.send.count')\r\n\r\n for name, backend in backends.iteritems():\r\n with dog_stats_api.timer('track.send.backend.{0}'.format(name)):\r\n backend.send(event)", "def media_previous_track(self):\n self.handleCommand(28)", "def on_limit(self, track):\n print(track)", "def go_to_next_spike(self, ):\n self._jump_to_spike(+1)", "def next(self, event):\n self.result = 1", "def on_queue_next_command(self, event, index):\n self.pre_check(event)\n self.same_channel_check(event)\n if 1 < index <= len(self.get_player(event.guild.id).queue):\n index -= 1\n self.get_player(event.guild.id).queue.insert(\n 0,\n self.get_player(event.guild.id).queue.pop(index),\n )\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[0].metadata,\n )\n api_loop(\n event.channel.send_message,\n \"Moved ``{}`` to the front of the queue.\".format(\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )\n else:\n api_loop(event.channel.send_message, \"Invalid index input.\")", "def quick_play(self, index=0):\n self.play(self.download(self.results[index]))", "def next(self):\n resp = yield from self.command('next')\n return True", "async def skip(self, ctx: commands.Context) -> None:\n player = self.get_player(ctx.guild)\n\n # Don't even try if we're not connected.\n if not player.is_connected:\n fail = Embeds.status(success=False, desc=\"I'm not even connected. 😪\")\n return await ctx.send(embed=fail)\n\n # Handle all the track calculations using futuretrack().\n player.action = Action.skip\n if (track := player.futuretrack()) is not None:\n embed = discord.Embed(\n title=\"Skipping to next track.\",\n description=f\"[{track.title}]({track.uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(track.length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if track.thumb is not None:\n embed.set_thumbnail(url=track.thumb)\n await ctx.send(embed=embed)\n\n # Use the Wavelink listener to advance if the bot is playing audio.\n if player.is_playing:\n await player.stop()\n else:\n await player.advance()\n else:\n fail = Embeds.status(success=False, desc=\"No tracks to skip to!\")\n await ctx.send(embed=fail)", "def toggle_next_speech(self):\n self.bot.loop.call_soon_threadsafe(self.play_next_speech.set)", "async def _queue(self, ctx: commands.Context, *, page: int = 1):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Empty queue.')\n\n items_per_page = 10\n pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)\n\n start = (page - 1) * items_per_page\n end = start + items_per_page\n\n queue = ''\n for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):\n queue += '`{0}.` [**{1.source.title}**]({1.source.url})\\n'.format(i + 1, song)\n\n embed = (discord.Embed(description='**{} tracks:**\\n\\n{}'.format(len(ctx.voice_state.songs), queue))\n .set_footer(text='Viewing page {}/{}'.format(page, pages)))\n await ctx.send(embed=embed)", "def next_step(self):\n self.proceed()\n self.execute_current()", "async def _queue(self, ctx: commands.Context, *, page: int = 1):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('The queue is empty.')\n\n items_per_page = 1\n pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)\n\n start = (page - 1) * items_per_page\n end = start + items_per_page\n\n queue = ''\n for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):\n queue += f\"`{i + 1}.` [**{song.source.title}**]({song.source.url})\\n\"\n\n embed = (discord.Embed(\n description=f\"**{len(ctx.voice_state.songs)} tracks:**\\n\\n{queue}\")\n .set_footer(\n text=f\"Viewing page {page}/{pages}\"))\n\n await ctx.send(embed=embed)", "def __next_utterance(self, name, completed):\n text = self.__say_queue.get()\n if text is not None:\n self.__engine.say(self.__to_printable(text))\n else:\n self.__engine.endLoop()", "def next(self):\n print(f\" {colored('[', 'yellow')}{bold(self.progress[self.pos])}{colored(']', 'yellow')} \"\n f\"{bold('Processing, please wait...')}\",\n end=\"\\r\",\n flush=True\n )\n self.increment()", "def test_track(self):\n if not os.environ['SENDSAY_TEST_EMAIL']:\n raise Exception(\"SENDSAY_TEST_EMAIL doesn't exist in environmental variables.\")\n\n data = data_from_file(\"test_track_wait.json\")\n response = self.api.request('issue.send', {\n 'sendwhen':'now',\n 'letter': {\n 'subject' : data['letter']['subject'],\n 'from.name' : data['letter']['from.name'],\n 'from.email': data['letter']['from.email'],\n 'message': data['letter']['message'],\n 'attaches': [\n attach_file(x) for x in data['letter']['attaches']\n ],\n },\n 'relink' : 1,\n 'users.list': os.environ['SENDSAY_TEST_EMAIL'],\n 'group' : 'masssending',\n })\n\n self.assertIn('track.id', response.data,\n msg=\"'issue.send' request haven't returned 'track.id'\")\n\n track = response.track\n if track:\n while track.check():\n sleep(5)\n\n self.assertEqual(track.status, -1, msg=\"issue.send tracking haven't finished with success\")\n self.assertEqual(track.status_message, 'FINISHED_WITH_SUCCESS',\n msg=\"issue.send tracking haven't returned a correct status message\")", "def next_song(sid):\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.next(host['ip'])\n return jsonify({})\n except:\n abort(400)", "def getNextWaypoint(self, request, context):\n\n waypointNumber = self.vehicle.commands.next -1\n missionlist = self.vehicle.waypoints\n if len(missionlist)==0:\n waypointNumber = -1\n dronePosition = droneconnect_pb2.Position(lat = float(0),\n lon = float(0),\n gpsAltitude = float(0))\n else:\n waypoint = missionlist[waypointNumber]\n dronePosition = droneconnect_pb2.Position(lat = float(waypoint[0]),\n lon = float(waypoint[1]),\n gpsAltitude = float(waypoint[2]))\n \n return droneconnect_pb2.IndexedPosition(position = dronePosition, index = waypointNumber)", "def advance(self):\n if self.has_more_commands():\n self.counter += 1", "async def async_media_previous_track(self):\n await self.local_meural.send_key_left()", "async def seek(self, pos: int):\n pos = max(pos, 0) # Prevent seeking before start of track\n await self._bot.lavalink.ws.send(op='seek', guildId=self.guild_id, position=pos)", "def step(self):\r\n cmd = struct.pack('>B', 54)\r\n self.send(cmd)", "def runStep(self):\n if self.done:\n pass\n elif self.frame_num < self.num_iters:\n start, end = self.t, self.t + 1\n frame = self.data[start:end, :]\n t = time.time()\n id = self.client.put([self.t, frame], \"acq_bubble\" + str(self.frame_num))\n self.timestamp.append([time.time(), self.frame_num])\n try:\n self.q_out.put([str(self.frame_num), id])\n self.frame_num += 1\n self.t += self.l\n # also log to disk #TODO: spawn separate process here?\n except Exception as e:\n logger.error(\"Acquirer general exception: {}\".format(e))\n logger.error(traceback.format_exc())\n\n\n time.sleep(1/self.framerate) # pretend framerate\n self.total_times.append(time.time() - t)\n\n else: # simulating a done signal from the source\n logger.error(\"Done with all available frames: {0}\".format(self.frame_num))\n self.data = None\n self.q_comm.put(None)\n self.done = True # stay awake in case we get e.g. a shutdown signal", "def next(self):\n return self._call_player_proxy('Next', None)", "async def jump(self, ctx, song_index: int):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send(embed=self.error_embed(f'Not playing. [{ctx.message.author.mention}]'))\n\n if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):\n # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot\n # may not disconnect the bot.\n return await ctx.send(embed=self.error_embed(f'Not connected to the same voice channel. [{ctx.message.author.mention}]'))\n\n if song_index > len(player.queue) + 1:\n return await ctx.send(embed=self.error_embed(\"There is no such song in the queue.\"))\n\n for i in range(song_index - 1):\n player.queue.pop(0)\n await player.skip()\n await ctx.message.add_reaction(\"✅\")", "def advance(self):\n if self.has_more_commands():\n self.counter += 1\n # if self._is_label():\n # self.counter += 1", "def send_next_packet():\n #\"global\" required here to be able to read and write to SEQUENCE \n global SEQUENCE\n data = sys.stdin.buffer.read(DATA_SIZE)\n if (len(data) > 0):\n rtt_start = time.time()\n msg_obj = {\"sequence\": SEQUENCE, \"data\": b64encode(data).decode(), \"ack\": True, \"eof\": False}\n if handle_packet_send(msg_obj):\n log(f\"Sequence number: \" + str(SEQUENCE))\n SEQUENCE += len(data)\n log(f'updating sender seq: {SEQUENCE}')\n return PacketInfo(msg_obj, rtt_start)\n return False", "def test_next_song(monkeypatch, bot, bot_arg, update):\n tracks = [fake_res['title'], 'crop killer']\n song_next = Song(fake_res['artist'], 'crop killer', fake_res['album'])\n bot.log_result('chat_id', fake_log)\n monkeypatch.setattr(bot, 'get_album_tracks', lambda x: tracks)\n monkeypatch.setattr(bot, 'get_lyrics', lambda s, c: f'Searching for {s}')\n\n next_song(bot_arg, update)\n assert bot_arg.msg_log[0] == f'Searching for {song_next}'", "def tick(self):\n \n # next historical order to be sent\n mktorder = self.hist_orders[self.mkt_idx+1]\n # if I have queued orders\n if self.my_queue:\n # if my order reaches the market before the next historical order\n if self.my_queue[0].timestamp < mktorder[self.col_idx['timestamp']]:\n my_order = self.my_queue.popleft()\n self._send_to_market(my_order, is_mine=True)\n self.mkt_time = my_order[self.col_idx['timestamp']]\n return\n \n # otherwise sent next historical order\n self._send_historical_order(mktorder)", "def upload_add_progress(self, nbytes):\n\n self.send_cur_nbytes += nbytes\n if self.send_goal_nbytes != 0:\n self.republish_output()", "def next(self):\n\t\tif not self.play_random:\n\t\t\t# normal\n\t\t\tif self.direction is \"forward\":\n\t\t\t\tself._current_id += 1\n\t\t\telse:\n\t\t\t\tself._current_id -= 1\n\t\t\tself.limit_id_range()\n\n\t\t# random\n\t\telse:\n\t\t\tif not len(self._id_queue):\n\t\t\t\tself._id_queue = range(len(self.files))\n\t\t\telse:\n\t\t\t\tself._current_id = random.randrange(len(self._id_queue))\n\t\t\t\tself._id_queue.remove(self.current_id)\n\t\t\t\tself._id_queue_past.append(self.current_id)\n\n\t\tself._dispatch_update()", "def send_resp(self):\n self.n_send_resp += 1", "def Track(self, argin):\n handler = self.get_command_object(\"Track\")\n handler(argin)", "def track(self, q, page=None):\r\n return self.get('track', q, page)", "def next_btn(self: object, controller: Iterator[str]) -> str:\n try:\n to_print = next(controller)\n if isinstance(to_print, list):\n return '\\n'.join(to_print)\n else: \n return to_print\n\n except StopIteration:\n return \"The protocole is finished\"", "def get_next_command( self, ):\n self.ix_command += 1\n if self.ix_command >= len( self.command_list ):\n ret = None\n else:\n ret = self.command_list[ self.ix_command ]\n# print( f\"command = { self.ix_command} {ret} \", flush = True )\n return ret", "def _track_finished(self, *_args):\n if not self.loop:\n self.stop()\n else:\n self.seek(0.)\n self.player.play()", "def skip_to_next(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackSkipToNext())", "def Next(self):\n next_task = fleetspeak_test_lib.PopMessage(self.client_id)\n if next_task is None:\n return False\n\n try:\n responses = self.client_mock.HandleMessage(next_task)\n except Exception as e: # pylint: disable=broad-except\n logging.exception(\"Error %s occurred in client\", e)\n responses = [\n self.client_mock.GenerateStatusMessage(\n next_task, 1, status=\"GENERIC_ERROR\")\n ]\n\n # Now insert those on the flow state queue\n for response in responses:\n self.PushToStateQueue(response)\n\n return True", "def sendTrack(self, interpreter, title, link=None):\n\t\treturn urllib2.urlopen(\n\t\t\t\t\turllib2.Request(\n\t\t\t\t\t\tself.api_base_url+self.api_key,\n\t\t\t\t\t\turllib.urlencode({\n\t\t\t\t\t\t\t'interpreter': interpreter,\n\t\t\t\t\t\t\t'title': title,\n\t\t\t\t\t\t\t'link': link\n\t\t\t\t\t\t}))\n\t\t\t\t\t).read()", "def tick(self):\n self.current_count += 1\n self.progress(self.current_count)", "def next_question(update: Update, context: CallbackContext) -> None:\n update.callback_query.answer()\n if not isinstance(context.chat_data, dict):\n raise AssertionError\n if context.chat_data['question_number'] < (\n len(context.chat_data['qlist']) - 1):\n context.chat_data['question_number'] += 1\n context.chat_data['question_attempted_by'] = []\n msg_text, option_keyboard = Quiz.parse_question(\n context.chat_data['qlist'][\n context.chat_data['question_number']])\n option_keyboard.append([\n InlineKeyboardButton(\"Next (Admin Only)\", callback_data=\"next\")\n ])\n context.chat_data['message'] = context.bot.edit_message_text(\n text=msg_text,\n chat_id=context.chat_data['message'].chat.id,\n message_id=context.chat_data['message'].message_id,\n reply_markup=InlineKeyboardMarkup(option_keyboard),\n parse_mode=ParseMode.MARKDOWN)\n else:\n Quiz.send_scoreboard(context=context)", "def do_next(*_args):\n nonlocal current_prompt_id\n current_prompt_id = None\n last_wav_path = None\n next_button.config(bg=\"#F0F0F0\")\n if prompts_left:\n wav_names = [wav_path.name for wav_path in wav_dir.glob(\"*.wav\")]\n current_prompt_id = prompts_left.pop()\n wav_prefix = f\"{current_prompt_id}_\"\n while has_wav(wav_names, wav_prefix):\n current_prompt_id = None\n if prompts_left:\n current_prompt_id = prompts_left.pop()\n wav_prefix = f\"{current_prompt_id}_\"\n else:\n break\n\n if current_prompt_id:\n textbox.delete(1.0, tk.END)\n textbox.insert(1.0, prompts[current_prompt_id])\n next_button[\"state\"] = tk.DISABLED\n else:\n tkinter.messagebox.showinfo(message=\"All done :)\")\n\n progress[\"value\"] = 100 * ((total_prompts - len(prompts_left)) / total_prompts)", "def send_req(self):\n self.n_send_req += 1", "def track_04():\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)\n return \"Ok\"", "def track_03():\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen', force_radio=True)\n return \"Ok\"", "async def async_media_previous_track(self):\n if not self._slave_mode:\n if not self._playing_mass:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:prev\", None)\n self._playhead_position = 0\n self._duration = 0\n self._position_updated_at = utcnow()\n self._trackc = None\n if value != \"OK\":\n _LOGGER.warning(\"Failed to skip to previous track.\" \" Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self.hass.services.async_call(\"mass\",\"queue_command\", service_data = {\"entity_id\": self.entity_id, \"command\": \"previous\"})\n else:\n await self._master.async_media_previous_track()", "def send(self):\n \n # Check that we have something to send\n if len(self.items) > 0:\n \n # If no items 'sent' or 'playing', send next item in queue\n sent_items = [item for item in self.playlist_store.find({'status':'sent'})]\n playing_items = [item for item in self.playlist_store.find({'status':'playing'})]\n \n # Look for any expired items in playing\n expired = False\n for item in playing_items:\n end_date = item['start_date'] + datetime.timedelta(seconds=item['track']['track']['length'])\n expired = expired or end_date < datetime.datetime.now()\n \n # Assume we send nothing\n send_item = False\n # Conditions under which we send...\n # 1. Nothing sent, and nothing playing\n send_item = send_item or (len(sent_items) == 0 and len(playing_items) == 0)\n # 2. Nothing sent, and something expired marked as playing\n send_item = send_item or (len(sent_items) == 0 and len(playing_items) > 0 and expired)\n \n if send_item:\n \n # Send next item in queue\n self.current_item = self.items.pop(0)\n print \" [x] Sending %r\" % (self.current_item['track']['track']['name'],)\n \n # Send using the broadcast exchange (Pub/Sub)\n self.amqp_primary_channel.basic_publish(exchange=self.amqp_broadcast_exchange,\n routing_key='',\n body=json.dumps({'_id': str(self.current_item['_id']),\n 'track': self.current_item['track'],\n 'from': self.current_item['from']}),\n properties=pika.BasicProperties(\n content_type=\"application/json\",\n delivery_mode=2))\n \n # Mark item as sent\n self.current_item['status'] = 'sent'\n self.playlist_store.update({'_id': self.current_item['_id']}, self.current_item)\n \n elif len(sent_items) == 0 and len(playing_items) > 0 and not expired:\n # TODO\n # If something playing and nothing sent, set up timer\n # timer = Timer(self.current_item['track']['track']['length'], self.next)\n # timer.start()\n pass", "def start_loop_recording(self, track):\n pass", "def next(self):\n if self.skip:\n return\n\n self.counter += 1\n if self.pbar is None and (time.time() - self.start_time) > self.threshold:\n self.pbar = tqdm(total=self.n, desc=self.title, initial=self.counter)\n elif self.pbar is not None:\n self.pbar.update(1)", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def next_cmd(self):\n if not self.validate():\n self.initial_focus.focus_set()\n return\n self.player_ships[self.values[0]] = self.values[1]\n self.num_players += 1\n self.e1.delete(0, END)\n self.buttonbox()\n self.e1.focus_set()\n self.e2.reset()", "def track(bot, update, args):\n try:\n logger.info(\"Chamando o método 'track'\")\n\n # É feito o rastreamento de cada código informado\n for code in args:\n object_tracking = Tracking()\n places = object_tracking.track(code)\n order = f'HISTÓRICO DO OBJETO {code} \\n ========================================== \\n '\n\n # Concatena as atualiações feitas na encomenda para enviar somente uma mensagem\n if places:\n for place in places:\n order = f'{order} \\n {place} \\n ------------------------------------------'\n else:\n order = f'{order} \\n Objeto não encontrado'\n\n # Envia a mensagem para o usuário\n bot.send_message(chat_id=update.message.chat_id, text=order)\n except Exception as ex:\n error(bot, update, ex)", "def send(self, count: int):\n return self.analytics.send(self.anal_name, count)", "def next_url(self) -> str:\n return 'https://www.mta-dialog.de/stellenmarkt.html?tx_jobs_pi1[action]=next'", "def _send_pong(self):\r\n self._send(\"PONG\")", "def next_wave(self):\n if self._wave == self._level.get_max_wave():\n return\n\n self._wave += 1\n\n #Task 1.3 (Status Bar): Update the current wave display here\n self._status_bar.set_wave(self._wave)\n\n #Task 1.5 (Play Controls): Disable the add wave button here (if this is the last wave)\n if self._wave == 20:\n self._wave_button.config(state=tk.DISABLED)\n\n #Generate wave and enqueue\n wave = self._level.get_wave(self._wave, self._game)\n for step, enemy in wave:\n enemy.set_cell_size(self._game.grid.cell_size)\n\n self._game.queue_wave(wave)", "def getNext(self, player):", "async def send(self):", "async def ripgupta(self, ctx, count, *, message):\n int(count)\n gupta = 468209010978455552\n channel = 617525238392946699\n mloop = 0\n int(mloop) \n while mloop > count:\n await channel.send(\"{} {}\".format(gupta.mention, message))\n int(mloop)\n mloop = mloop + 1", "def next_tour_button(self):\r\n if self.paths_gen is None:\r\n SlTrace.lg(\"paths_gen connection has NOT been setup\")\r\n return\r\n \r\n self.paths_gen.next_tour()", "def __send_msg(self, msg):\n self.frame_nb += 1\n self.__send_frame(self.frame_nb, msg)", "def test_track(self):\r\n track = FedexTrackRequest(CONFIG_OBJ)\r\n track.TrackPackageIdentifier.Type = 'TRACKING_NUMBER_OR_DOORTAG'\r\n track.TrackPackageIdentifier.Value = '798114182456'\r\n track.send_request()\r\n \r\n for match in track.response.TrackDetails:\r\n # This should be the same tracking number on the response that we\r\n # asked for in the request.\r\n self.assertEqual(match.TrackingNumber, tracking_num)", "def track_01():\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title='FM4.ORF.AT', force_radio=True)\n return \"Ok\"", "def tick(self):\n self.count += 1", "def advance(self):\n\n try: # kind of hacky ... use for loop instead!\n self.cmd = next(self.__iter__())\n except StopIteration:\n pass", "def on_queued_command(self, event, index=None):\n self.pre_check(event)\n if not self.get_player(event.guild.id).queue:\n api_loop(\n event.channel.send_message,\n \"There aren't any songs queued right now.\",\n )\n elif index is None:\n api_loop(\n event.channel.send_message,\n \"There are {} songs queued ({} minutes). To get a specific song's info, just do this command + index.\".format(\n len(self.get_player(event.guild.id).queue),\n self.minutes_format(self.get_player(\n event.guild.id,\n ).queue_length),\n ),\n )\n elif (index.replace(\"-\", \"\").strip(\" \").isdigit() and\n 0 <= (int(index.replace(\"-\", \"\").strip(\" \")) - 1) <=\n len(self.get_player(event.guild.id).queue)):\n ytdata = self.get_ytdl_values(\n self.get_player(event.guild.id).queue[\n int(index.replace(\"-\", \"\").strip(\" \")) - 1\n ].metadata,\n )\n api_loop(\n event.channel.send_message,\n \"The song at index ``{}`` is ``{}`` by ``{}`` with length ``{}`` minutes and is sourced from ``{}``.\".format(\n int(index.replace(\"-\", \"\").strip(\" \")),\n ytdata[\"title\"],\n ytdata[\"uploader\"],\n ytdata[\"time_formated\"],\n ytdata[\"source\"],\n ),\n )\n elif index.replace(\"-\", \"\").isdigit():\n api_loop(event.channel.send_message, \"Invalid index input.\")\n else:\n matched_list = dict()\n for item in self.get_player(event.guild.id).queue:\n ratio = partial_ratio(item.metadata[\"title\"], index)\n if ratio >= 70:\n matched_list[\"#{} ({}% match)\".format(\n self.get_player(event.guild.id).queue.index(item)+1,\n ratio,\n )] = item.metadata[\"title\"]\n if matched_list:\n embed = bot.generic_embed_values(\n title=\"Queue search results\",\n footer_text=\"Requested by {}\".format(event.author),\n non_inlines={\n k: matched_list[k] for k in list(matched_list)[-25:]\n },\n footer_img=event.author.get_avatar_url(size=32),\n timestamp=event.msg.timestamp.isoformat(),\n )\n api_loop(event.channel.send_message, embed=embed)\n else:\n api_loop(\n event.channel.send_message,\n \"No similar items found in queue.\",\n )", "async def rewind(self, ctx: commands.Context) -> None:\n player = self.get_player(ctx.guild)\n\n # Don't even try if we're not connected.\n if not player.is_connected:\n fail = Embeds.status(success=False, desc=\"I'm not even connected. 😪\")\n return await ctx.send(embed=fail)\n\n # Handle all the track calculations using futuretrack().\n player.action = Action.rewind\n if (track := player.futuretrack()) is not None:\n embed = discord.Embed(\n title=\"Rewinding to previous track.\",\n description=f\"[{track.title}]({track.uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(track.length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if track.thumb is not None:\n embed.set_thumbnail(url=track.thumb)\n await ctx.send(embed=embed)\n\n # Use the Wavelink listener to advance if the bot is playing audio.\n if player.is_playing:\n await player.stop()\n else:\n await player.advance()\n else:\n fail = Embeds.status(success=False, desc=\"No tracks to rewind to!\")\n await ctx.send(embed=fail)", "def nextinline(self, irc, msg, args):\n channel = self.registryValue('checkOpsInChannel')\n if channel == '':\n self.log.error('checkOpsInChannel not set!')\n return\n if channel not in irc.state.channels:\n self.log.warn('not in %s' % channel)\n return\n if msg.nick not in irc.state.channels[channel].ops:\n self.log.warn('denying access to non-chanop user %r' % msg.nick)\n return\n if len(self._queue) > 0:\n nick, notice = self._queue.pop(0)\n response = \"Next in line is %s\" % nick\n if notice is not None:\n response += \" with notice: %s\" % notice\n self._dump_queue()\n irc.reply(response)\n else:\n irc.reply(\"There's nobody queued up right now.\")", "def on_next_clicked(self):\r\n self.signal_step.emit()", "def next(self):\n self.jumpahead(1)", "def episode_step(self):\n self.nsteps += 1" ]
[ "0.7637263", "0.7182071", "0.7067623", "0.693913", "0.6791026", "0.669103", "0.65849733", "0.6515745", "0.65091896", "0.6454078", "0.64358157", "0.64263296", "0.64238304", "0.6292885", "0.6282449", "0.61358553", "0.6131442", "0.6077058", "0.6062422", "0.59923947", "0.59777033", "0.59604144", "0.58847004", "0.58356434", "0.57970893", "0.576467", "0.57493776", "0.5748124", "0.5743673", "0.57293886", "0.5721041", "0.57100666", "0.5679798", "0.56739676", "0.56658465", "0.56567675", "0.5608805", "0.559463", "0.55255735", "0.55235195", "0.5522789", "0.55214155", "0.5498883", "0.54332244", "0.54038334", "0.54018253", "0.536506", "0.53597796", "0.53413504", "0.5333482", "0.533177", "0.5309805", "0.5301499", "0.53012264", "0.52873605", "0.5242037", "0.5238914", "0.52199143", "0.520144", "0.5179046", "0.5179029", "0.51771075", "0.5167136", "0.516316", "0.51558787", "0.51500416", "0.5147466", "0.51278055", "0.5123879", "0.512341", "0.51153463", "0.5115036", "0.5114078", "0.51069903", "0.50895435", "0.50891095", "0.5087879", "0.5087852", "0.5087578", "0.50845945", "0.5076455", "0.50729764", "0.50697213", "0.50686526", "0.5060953", "0.5054142", "0.50521994", "0.5050482", "0.50474083", "0.50452137", "0.5041391", "0.5037585", "0.5031869", "0.50282514", "0.5012793", "0.5008548", "0.50080854", "0.4984002", "0.4983811", "0.49829856" ]
0.70252526
3
Send the previous track command.
def media_previous_track(self) -> None: self._client.previous()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def media_previous_track(self):\n self.handleCommand(28)", "async def async_media_previous_track(self):\n await self.local_meural.send_key_left()", "async def async_media_previous_track(self):\n if not self._slave_mode:\n if not self._playing_mass:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:prev\", None)\n self._playhead_position = 0\n self._duration = 0\n self._position_updated_at = utcnow()\n self._trackc = None\n if value != \"OK\":\n _LOGGER.warning(\"Failed to skip to previous track.\" \" Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self.hass.services.async_call(\"mass\",\"queue_command\", service_data = {\"entity_id\": self.entity_id, \"command\": \"previous\"})\n else:\n await self._master.async_media_previous_track()", "def media_previous_track(self):\n self._device.previous_track()", "def media_previous_track(self) -> None:\n self._state = self._player.previous_track()", "async def play_previous(self):\n _LOGGER.debug(\"[Foobar2k] In Previous\")\n if (self._power == POWER_ON):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PREVIOUS, data=None)\n time.sleep(0.2)\n await self.async_update()", "def media_previous_track(self):\n self._lms.query(self._id, 'playlist', 'index', '-1')\n self.update_ha_state()", "async def async_media_previous_track(self):\n await self.coordinator.data.player_action(self.zone_id, \"previous\")\n await self.coordinator.async_refresh()", "async def async_media_previous_track(self) -> None:\n cur_track_index = self._get_current_track_index()\n\n await self._table.active_playlist.play(\n self._table.active_playlist.tracks[cur_track_index - 1]\n )", "async def async_media_previous_track(self) -> None:\n await self._device.previous()", "async def async_media_previous_track(self) -> None:\n await self._volumio.previous()", "def previous_track(self, device=None, **kwargs):\n return self._post(\n API.PREVIOUS.value, device_id=device, check_202=True, **kwargs\n )", "def media_previous_track(self) -> None:\n media_controller = self._media_controller()\n media_controller.queue_prev()", "async def previous(self):\n await self._pytheos.api.player.play_previous(self.id)", "def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')", "def previous(self, event):\n self.result = -1", "def previous(self):\n resp = yield from self.command('previous')\n return True", "async def async_media_previous_track(self):\n if self._raumfeld.group_is_valid(self._rooms):\n await self._raumfeld.async_group_previous_track(self._rooms)\n elif self._is_spotify_sroom:\n await self._raumfeld.async_room_previous_track(self._room)\n else:\n log_debug(\n \"Method was called although speaker group '%s' is invalid\" % self._rooms\n )\n await self.async_update_track_info()", "def media_prev_track(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_MEDIA_PREV_TRACK, data)", "def skip_to_prev(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackSkipToPrev())", "def previous(self):\n return self._call_player_proxy('Prev', None)", "async def async_media_previous_track(\n hass: HomeAssistant, entity_id: str | None = None\n) -> None:\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)", "async def rewind(self, ctx: commands.Context) -> None:\n player = self.get_player(ctx.guild)\n\n # Don't even try if we're not connected.\n if not player.is_connected:\n fail = Embeds.status(success=False, desc=\"I'm not even connected. 😪\")\n return await ctx.send(embed=fail)\n\n # Handle all the track calculations using futuretrack().\n player.action = Action.rewind\n if (track := player.futuretrack()) is not None:\n embed = discord.Embed(\n title=\"Rewinding to previous track.\",\n description=f\"[{track.title}]({track.uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(track.length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if track.thumb is not None:\n embed.set_thumbnail(url=track.thumb)\n await ctx.send(embed=embed)\n\n # Use the Wavelink listener to advance if the bot is playing audio.\n if player.is_playing:\n await player.stop()\n else:\n await player.advance()\n else:\n fail = Embeds.status(success=False, desc=\"No tracks to rewind to!\")\n await ctx.send(embed=fail)", "def go_to_previous_spike(self, ):\n self._jump_to_spike(-1)", "async def prev(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:\n if self.cursor < 1:\n return await interaction.response.defer()\n\n self.cursor -= 1\n await interaction.response.edit_message(content=self.current())", "def previous(self):\n self._select_interface(self._rc_previous, self._http_previous)", "def previous(self):\n\n pass", "def goToPrevLink():\n if wikiPageStackTrace[-2].getUrl() != \"\":\n oldpage = wikiPageStackTrace[-2]\n print(\"going back to \", oldpage.getUrl())\n titleStackTrace.append(oldpage.getTitle())\n urlStackTrace.append(oldpage.getUrl())\n del wikiPageStackTrace[-1]\n update()\n else:\n update()", "def media_next_track(self):\n self.handleCommand(27)", "def prev_artist(self):\n test = self.artist_list.currentIndex() - 1\n if test >= 0:\n self.artist_list.setCurrentIndex(test)\n self.update_navigation_buttons()", "def display_previous_command( self, event ):\n if self.command_history_index > 0:\n if self.command_history_index == len( self.command_history ) - 1:\n self.command_history[ -1 ] = self.get()\n self.command_history_index -= 1\n self.delete( 0, tk.END )\n self.insert( 0, self.command_history[ self.command_history_index ] )", "def onPrevious(self, event):\n\t\tself.previousPicture()", "def prev_artist(self):\n test = self.artists_list.currentIndex() - 1\n if test >= 0:\n self.artists_list.setCurrentIndex(test)\n self.update_navigation_buttons()", "def prev(self):\n if self.signbit.dec_value == 0:\n method = 'prev'\n else:\n method = 'next'\n return self._step(method)", "def set_skip_current_track(self):\n self.get(COMMAND_CPM, 'SetSkipCurrentTrack')", "def prev(self):\n\t\tif not self.play_random:\n\t\t\t# normal\n\t\t\tif self.direction is \"forward\":\n\t\t\t\tself._current_id -= 1\n\t\t\telse:\n\t\t\t\tself._current_id += 1\n\t\t\tself.limit_id_range()\n\n\t\t# random\n\t\telse:\n\t\t\tif not len(self._id_queue_past):\n\t\t\t\treturn # no more previous queue to go to, so don't do anything\n\t\t\telse:\n\t\t\t\tself._current_id = self._id_queue_past.pop()\n\t\t\t\tself._id_queue.append(self.current_id)\n\n\t\tself._dispatch_update()", "def prev_tour_button(self):\r\n if self.paths_gen is None:\r\n SlTrace.lg(\"paths_gen connection has NOT been setup\")\r\n return\r\n \r\n self.paths_gen.prev_tour()", "async def previous_page(self):\n await self.checked_show_page(self.current_page - 1)", "def previous(self):\n current = self.listbox.curselection()[0]\n if current > 0:\n self.listbox.selection_clear(current)\n self.listbox.activate(current-1)\n self.listbox.select_set(current-1)\n selected = self.files[self.listbox.selection_get()]\n pygame.mixer.music.load(selected)\n pygame.mixer.music.play(loops=0)", "def prev_album(self):\n test = self.albums_list.currentIndex() - 1\n if test >= 0:\n self.albums_list.setCurrentIndex(test)\n self.update_navigation_buttons()", "def untrackCmd(self):\n if self._cmdToTrack:\n self._cmdToTrack.addCallback(self._cmdCallback)\n self._cmdToTrack = None", "def svn_fs_history_prev(*args):\r\n return _fs.svn_fs_history_prev(*args)", "async def previous(\n self, ctx: commands.Context, index: int = None, no_autoplay: bool = False\n ) -> Optional[List[Player]]:\n\n queue = self.queue[ctx.guild.id]\n\n previous_index = 2 if index is None else index + 1\n if 0 >= previous_index:\n if index:\n await self.call_event(\n \"on_music_error\",\n ctx,\n InvalidPreviousIndex(\"Previous index invalid.\"),\n )\n return\n\n original_queue_position = queue.pos\n queue.pos -= previous_index\n previous_players = queue.queue[queue.pos + 1 : original_queue_position]\n\n if no_autoplay:\n for player in previous_players[:]:\n if not player.requester:\n previous_players.remove(player)\n queue.queue.remove(player)\n\n await maybe_coroutine(ctx.voice_client.stop)\n return previous_players", "def previous(self, _event):\n self.set_val(self.val - 1)", "def prev(self, prev):\n\n self._prev = prev", "def MoveToPreviousSlide(self, event):\n pass", "async def skip(self, ctx: commands.Context) -> None:\n player = self.get_player(ctx.guild)\n\n # Don't even try if we're not connected.\n if not player.is_connected:\n fail = Embeds.status(success=False, desc=\"I'm not even connected. 😪\")\n return await ctx.send(embed=fail)\n\n # Handle all the track calculations using futuretrack().\n player.action = Action.skip\n if (track := player.futuretrack()) is not None:\n embed = discord.Embed(\n title=\"Skipping to next track.\",\n description=f\"[{track.title}]({track.uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(track.length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if track.thumb is not None:\n embed.set_thumbnail(url=track.thumb)\n await ctx.send(embed=embed)\n\n # Use the Wavelink listener to advance if the bot is playing audio.\n if player.is_playing:\n await player.stop()\n else:\n await player.advance()\n else:\n fail = Embeds.status(success=False, desc=\"No tracks to skip to!\")\n await ctx.send(embed=fail)", "def previous(self):\n return self.my_previous", "def previous_page(self):\r\n if self.page.has_previous():\r\n previous = self.page.previous_page_number()\r\n if previous == 1:\r\n if 'page' in self.query_dict:\r\n del self.query_dict['page']\r\n else:\r\n self.query_dict['page'] = previous\r\n return \"%s?%s\" % (self.path, urlencode(self.query_dict))\r\n return \"\"", "def go_prev(self, inst):\n\n # Change active date\n self.active_date = [self.active_date[0], self.quarter_nums[0][1],\n self.quarter_nums[0][0]]\n\n # Name of prev screen\n n = self.quarter_nums[0][1] - 1\n prev_scr_name = \"%s-%s\" % (self.month_names_eng[n],\n self.quarter_nums[0][0])\n\n # If it's doen't exitst, create it\n if not self.sm.has_screen(prev_scr_name):\n self.create_month_scr(self.quarter[0])\n\n self.sm.current = prev_scr_name\n self.sm.transition.direction = \"left\"\n\n self.get_quarter()\n self.title = \"%s - %s\" % (self.month_names[self.active_date[1] - 1],\n self.active_date[2])\n\n self.title_label.text = self.title", "async def prev_page(self):\n if self.page_num == 1:\n self.page_num = len(self.pages) # Loop around to the last item\n else:\n self.page_num -= 1\n return await self.update()", "def previous_character(self) -> str:\n return self.seek(self.index - 1)", "def previous(self, result, **kwargs):\n if result[\"previous\"]:\n return self._get(result[\"previous\"], **kwargs)\n\n return None", "def goToPrevHistory(self: Self, event: Event = None) -> None:\n c = self\n c.nodeHistory.goPrev()", "def back(self):\n\n\t\tself.controller.showFrame(self.prevFrame)", "def previous(self) -> Optional[Chainable]:\n return None", "def back( self ):\n self._has_change = True\n print( \"Back\" )", "def get_prev_player(self):\r\n prev_cmd = self.get_last_cmd()\r\n if prev_cmd is None:\r\n return None # No previous player\r\n prev_player = prev_cmd.new_player\r\n return prev_player", "def previous(self,dec=-1):\n for i in range(-dec):\n self.currentSub._previous()", "def setPrev(self, prev):\n\t\t\tself.prev = prev", "def on_prev_button(self, event):\n if 'saved' not in self.Data[self.s]['pars'] or self.Data[self.s]['pars']['saved'] != True:\n # check preferences\n if self.auto_save.GetValue():\n self.on_save_interpretation_button(None)\n else:\n del self.Data[self.s]['pars']\n self.Data[self.s]['pars'] = {}\n self.Data[self.s]['pars']['lab_dc_field'] = self.Data[self.s]['lab_dc_field']\n self.Data[self.s]['pars']['er_specimen_name'] = self.Data[self.s]['er_specimen_name']\n self.Data[self.s]['pars']['er_sample_name'] = self.Data[self.s]['er_sample_name']\n # return to last saved interpretation if exist\n if 'er_specimen_name' in list(self.last_saved_pars.keys()) and self.last_saved_pars['er_specimen_name'] == self.s:\n for key in list(self.last_saved_pars.keys()):\n self.Data[self.s]['pars'][key] = self.last_saved_pars[key]\n self.last_saved_pars = {}\n\n index = self.specimens.index(self.s)\n if index == 0:\n index = len(self.specimens)\n index -= 1\n self.s = self.specimens[index]\n self.specimens_box.SetStringSelection(self.s)\n self.update_selection()", "def backtrack(self, data=None):\n _, cb = self._track.pop()\n if cb:\n cb(data)\n\n if self._track:\n self._update(self._get_current_pane())\n else:\n raise urwid.ExitMainLoop()", "def previous(self):\n return Reference(\":\".join(self.names[:-2]))", "def prev_page(self):\n if self.history.position > self.lines and self.history.top:\n mid = min(len(self.history.top),\n int(math.ceil(self.lines * self.history.ratio)))\n\n self.history.bottom.extendleft(reversed(self.buffer[-mid:]))\n self.history = self.history \\\n ._replace(position=self.history.position - self.lines)\n\n self.buffer[:] = list(reversed([\n self.history.top.pop() for _ in range(mid)\n ])) + self.buffer[:-mid]\n\n self.dirty = set(range(self.lines))", "def prev_song(self, wrap=True):\n bumped = False\n if not self.songs:\n return None\n\n self._song_idx = self._song_idx - 1\n if self._song_idx < 0:\n if wrap:\n self._song_idx = len(self.songs)-1;\n else:\n self._song_idx = 0\n bumped = True\n\n self._cur_song = self.songs[self._song_idx]\n if not bumped:\n self._song_position = 0\n return self._cur_song\n else:\n return None", "def previous_media_file(self):\n if len(self.pj[OBSERVATIONS][self.observationId][FILE][PLAYER1]) == 1:\n return\n\n if self.playerType == VLC:\n\n if self.playMode == FFMPEG:\n\n currentMedia = \"\"\n for idx, media in enumerate(self.pj[OBSERVATIONS][self.observationId][FILE][PLAYER1]):\n if self.FFmpegGlobalFrame < self.duration[idx + 1]:\n self.FFmpegGlobalFrame = self.duration[idx - 1]\n break\n self.FFmpegGlobalFrame -= 1\n self.ffmpegTimerOut()\n\n else:\n\n # check if media not first media\n if self.media_list.index_of_item(self.mediaplayer.get_media()) > 0:\n\n # remember if player paused (go previous will start playing)\n flagPaused = self.mediaListPlayer.get_state() == vlc.State.Paused\n self.mediaListPlayer.previous()\n\n while True:\n if self.mediaListPlayer.get_state() in [vlc.State.Playing, vlc.State.Ended]:\n break\n\n if flagPaused:\n self.mediaListPlayer.pause()\n else:\n\n if self.media_list.count() == 1:\n self.statusbar.showMessage(\"There is only one media file\", 5000)\n else:\n if self.media_list.index_of_item(self.mediaplayer.get_media()) == 0:\n self.statusbar.showMessage(\"The first media is playing\", 5000)\n\n self.timer_out()\n self.timer_spectro_out()\n # self.timer_plot_data_out()\n\n # no subtitles\n # self.mediaplayer.video_set_spu(0)\n\n if hasattr(self, \"spectro\"):\n self.spectro.memChunk = -1", "def StopTrack(self):\n handler = self.get_command_object(\"StopTrack\")\n handler()", "def prev_action(self):\n raise NotImplementedError", "def back(self):", "def moveToPreviousFrame(self):\n\t\tall_ts = [s for t in self.stamps_by_stream.values() for s in t]\n\t\tall_ts.sort()\n\t\tfirst_frame = all_ts[0]\n\n\t\tselected_index = bisect.bisect_right(all_ts, self._timeline.current_pos)-1\n\t\tif selected_index <= 0 or all_ts[selected_index-1] < first_frame:\n\t\t\t# There is no data before, or no frame. Do nothing\n\t\t\treturn\n\t\tself._timeline.current_pos = all_ts[selected_index-1]\n\t\tself.objectSelected.emit(\n\t\t self.getFileAtStamp(self._timeline.current_pos)\n\t\t)", "def set_previous(self, new_previous):\n self.previous = new_previous", "def on_btSubPagePrev_clicked(self, widget, data=None):\n\n if self.part > 1:\n self.part -= 1\n self.refresh()", "def do_back(robot_name, steps):\n\n if update_position(-steps):\n return True, ' > '+robot_name+' moved back by '+str(steps)+' steps.'\n else:\n return True, ''+robot_name+': Sorry, I cannot go outside my safe zone.'", "def refresh_mark(self):\n current = self.player.current_position()\n if current != None:\n if self.prev_song != None and self.prev_song < len(self.buf):\n self.buf[self.prev_song] = ' ' + self.buf[self.prev_song][1:]\n self.buf[current] = '-' + self.buf[current][1:]\n self.prev_song = current\n # Move cursor to current position.\n vim.current.window.cursor = (current + 1, 1)", "def skip(self):\n self.click_back_button()", "def previous_url(self):\n if self.has_previous:\n kwargs = g.request_args.copy()\n kwargs.update(request.view_args.copy())\n kwargs['offset'] = self.offset - self.limit\n kwargs['limit'] = self.limit\n return url_for(request.endpoint, **kwargs)", "def __previousTask(self):\n self.activeWindow().previousTask()", "def undo(self):\n if self._history_position > 0:\n self._history_position -= 1\n self._commands[\n self._history[self._history_position][1]\n ].execute(self._history[self._history_position][2])\n else:\n print(\"nothing to undo\")", "def back(self,**kwargs):\n self.mm.loadPreviousMenu()", "def previous_board(self):\n pass", "def __previousBookmark(self):\n self.activeWindow().previousBookmark()", "def go_back(update: Update, context: CallbackContext):\n query = update.callback_query\n query.answer()\n\n choice = query.data.split(\"back_to:\")[1]\n\n if \"links\" in choice:\n get_links(update, context, editable_message_id=query.message.message_id)\n elif \"expand\" in choice:\n expand_link(update, context)", "def reset_track():\n global track_last_slided_pos\n global track_last_paused_pos\n \n track_pos_label.configure(text=\"00:00\")\n track_pos.set(0)\n track_last_slided_pos = 0\n track_last_paused_pos = 0", "def on_newerButton_clicked(self):\n self.emit(SIGNAL(\"RT_PREVIOUS_PAGE\"))\n self.recordings_table.previous_page()", "def back(self):\n self.log_info(f\"Browser.back: Telling browser to return to previous page\")\n self.CORE.back()\n return", "async def go_to_previous_page(self, payload):\n await self.show_checked_page(self.current_page - 1)", "def navigBack(self):\n cmdId = self.executeCommand(Command.GO_BACK)\n return cmdId", "async def async_media_next_track(self):\n await self.local_meural.send_key_right()", "def track(self,name):\n self.sendCommand(\"global /track\",name)", "def on_btPagePrev_clicked(self, widget, data=None):\n\n if self.page > 1:\n self.page -= 1\n self.part = 1\n self.refresh()", "def _historyForwardClickedSlot(self):\r\n\r\n steps, success = self._controller.forwardAction.data().toInt()\r\n if success:\r\n self._controller.model.relativeHistoryIndex = steps", "def previous_step_result(self):\n return self._previous_step_result", "def previous_buffer(self):\n selected_window = self.selected_window()\n selected_window.set_buffer(self._find_previous_buffer(selected_window.buffer()))", "def on_back(self):\r\n self.log()", "def cancel_previous(self):\n\n previous_interviews = self.application.interviews.exclude(\n pk=self.pk,\n ).filter(canceled=False)\n\n previous_interviews.update(canceled=True, canceled_at=timezone.now())", "def call_q(self, _):\n return MENU_GO_BACK", "def call_q(self, _):\n return MENU_GO_BACK", "def call_q(self, _):\n return MENU_GO_BACK", "def previousPicture(self):\n\t\tif self.currentPicture == 0:\n\t\t\tself.currentPicture = self.totalPictures - 1\n\t\telse:\n\t\t\tself.currentPicture -= 1\n\t\tself.loadImage(self.picPaths[self.currentPicture])", "def previous_line():\r\n set_point(point().previous_line())" ]
[ "0.8249551", "0.76196414", "0.75633925", "0.7422574", "0.7405951", "0.73070335", "0.72534436", "0.7235791", "0.7137344", "0.7100116", "0.70965886", "0.70518535", "0.70392615", "0.69184697", "0.681314", "0.6702323", "0.66659826", "0.6575856", "0.65132546", "0.6510637", "0.64742404", "0.6455687", "0.6438287", "0.6197984", "0.6151015", "0.6033361", "0.59985036", "0.5994181", "0.59710366", "0.5959195", "0.5898851", "0.58977234", "0.5880429", "0.5847212", "0.5798121", "0.5770856", "0.5745679", "0.5729324", "0.5725961", "0.5709473", "0.5701838", "0.5645689", "0.56320715", "0.56292474", "0.55591184", "0.5537017", "0.5532293", "0.55315685", "0.55291474", "0.5523725", "0.55148697", "0.550492", "0.5501501", "0.5434588", "0.5422868", "0.5421704", "0.54215205", "0.5420668", "0.54012054", "0.54005325", "0.5400138", "0.5380272", "0.5355134", "0.53529036", "0.53477246", "0.5338952", "0.53262794", "0.53224677", "0.52915645", "0.52898353", "0.5276303", "0.52751637", "0.5274851", "0.5272149", "0.5267631", "0.52610314", "0.5255777", "0.52361137", "0.5232048", "0.5222608", "0.5217782", "0.52108", "0.5206849", "0.51998156", "0.5183718", "0.51777774", "0.5172018", "0.51651144", "0.5156646", "0.51495826", "0.5147699", "0.5146269", "0.5140666", "0.51384795", "0.5131043", "0.512697", "0.512697", "0.512697", "0.51242745", "0.5119945" ]
0.76491725
1
Box with Lid. See xml for more info.
def __init__(self, env, initial_lid_pos, name, resource='objects/box_with_lid.xml'): self._env = env self._initial_lid_pos = initial_lid_pos self._name = name self._resource = resource
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def define_box_location(self):\n self.contents['Box_ID'] = np.ones(self.numatom) * self.num_box", "def add_box(self, l, w, h, x, y, z, comment=\"\"):\n self.data['shape']['compound'].append({'box': {'#': comment, 'pose': {'x': x, 'y': y, 'z': z},\n 'size': {'x': l, 'y': w, 'z': h}}})", "def set_box(self, box=0.0):\n self.box = box", "def box(context, nodelist, classname=None):\n return render_to_string('deco/box.html', {\n 'classname': classname or \"\",\n 'content': nodelist.render(context)\n })", "def add_box(self):\n self.scenes[self.current_scene].add_object(Box())\n self.redraw()", "def box(self) -> None:\n self.screen.box()", "def box(self, x, y, w, h):\n\t\tpass", "def make_box(self, name=None) -> 'Box':\n\n if self.size().x == 0:\n box = Rect(self.size().z, self.size().y, name=name)\n box.ry(90)\n elif self.size().y == 0:\n box = Rect(self.size().x, self.size().z, name=name)\n box.rx(90)\n elif self.size().z == 0:\n box = Rect(self.size().x, self.size().y, name=name)\n else:\n box = Box(*self.size().asArray(), name=name)\n\n box.place(\n ~box == ~self,\n ~box == ~self,\n ~box == ~self)\n return box", "def envelop(self):\n return self.box", "def add_box(self, box):\n mz_from = box.from_mz\n mz_to = box.to_mz\n rt_from = box.from_rt\n rt_to = box.to_rt\n self.boxes_mz.addi(mz_from, mz_to, box)\n self.boxes_rt.addi(rt_from, rt_to, box)", "def add_box(self, width, height, label=None, facecolor='none', color='b'):\n rect = patches.Rectangle(\n (0, 0),\n width,\n height,\n linewidth=1,\n edgecolor=color,\n label=label,\n facecolor=facecolor)\n pyplot.gca().add_patch(rect)", "def bbox(self, node):\n node_id = node.get('id')\n #inkex.utils.debug(\"Check if \" + str(node_id) + \" is in \" + str(self.node_info))\n info = self.node_info[node_id] \n \n x = info.x\n y = info.y\n width = info.width\n height = info.height\n\n return Box(Point(x, y),\n Point(x + width, y),\n Point(x + width, y + height),\n Point(x, y + height))", "def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)", "def draw_left_hand_box(data):\n box = data['lhb'][data['i']];\n return draw_hand_box(data,box,[0,255,0]);", "def rightbox(self):\r\n pass", "def box(self):\n b = Bnd_Box()\n brepbndlib_Add(self.topods_shape(), b)\n return geom_utils.box_to_geometry(b)", "def box(self) -> math.Box:\n area = self.__dict__[\"area\"]\n if area is None:\n return math.Box()\n return math.Box(math.Point(*area[:2]), math.Point(*area[-2:]))", "def _add_box(self, boxdesc):\n # Check box definition parameters\n box_attributes = list(boxdesc.keys())\n if not set(box_attributes).issubset(self.unit_attributes):\n raise ValueError(\n \"Box definition: '{0}' defined in '{1}' is not supported. \"\n \"Supported box parameters are '{2}'.\".format(\n json.dumps(boxdesc, indent=2), self._xmlfile,\n self.unit_attributes))\n for mandatory_parameter in self.unit_attributes[:2]:\n if mandatory_parameter not in box_attributes:\n raise ValueError(\n \"A '{0}' parameter is required in box definition: '{1}' \"\n \"defined in '{2}'.\".format(\n mandatory_parameter, json.dumps(boxdesc, indent=2),\n self._xmlfile))\n\n # Check the name of the new box is not already reserved\n box_name = boxdesc[self.unit_attributes[0]][0]\n if box_name in self._boxes:\n raise ValueError(\"The box name '{0}' defined in '{1}' is already \"\n \"used.\".format(box_name, self._xmlfile))\n\n # Instanciate the new box\n box_module = boxdesc[self.unit_attributes[1]][0]\n iterinputs = boxdesc.get(self.unit_attributes[3], [])\n iteroutputs = boxdesc.get(self.unit_attributes[4], [])\n if box_module.endswith(\".xml\"):\n box = Pbox(box_module)\n else:\n box = Bbox(box_module)\n box.update_control_names(box_name)\n if iterinputs != [] or iteroutputs != []:\n iterinputs = [item[\"name\"] for item in iterinputs]\n iteroutputs = [item[\"name\"] for item in iteroutputs]\n box = Ibox(box, iterinputs, iteroutputs)\n self._boxes[box_name] = box\n\n # Set the new box default parameters\n set_tag = self.unit_attributes[2]\n if set_tag in box_attributes:\n for box_defaults in boxdesc[set_tag]:\n\n # Check the proper lexic has been specified\n if not set(box_defaults.keys()).issubset(self.unit_set):\n raise ValueError(\n \"Box attribute definition: '{0}' defined in '{1}' is \"\n \"not supported. Supported attributes are \"\n \"'{2}'.\".format(\n list(box_defaults.keys()), self._xmlfile,\n self.unit_set))\n\n # Set the input or output default paramters\n box_pname = box_defaults[self.unit_set[0]]\n box_pvalue = eval(box_defaults[self.unit_set[1]])\n if box_pname in self._boxes[box_name].inputs.controls:\n control = getattr(self._boxes[box_name].inputs, box_pname)\n elif box_pname in self._boxes[box_name].outputs.controls:\n control = getattr(self._boxes[box_name].outputs, box_pname)\n else:\n raise ValueError(\n \"The parameter '{0}' is not defined in the box \"\n \"'{1}' input or output parameters.\".format(\n box_pname, box_name))\n control.optional = True\n control.value = box_pvalue", "def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box", "def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box", "def __init__(self, *args):\n _Bnd.Bnd_Box_swiginit(self,_Bnd.new_Bnd_Box(*args))", "def box(self, box, padx=0.5, pady=0.3, **options):\n\n # underride sets default values only if the called hasn't\n underride(options, outline='black')\n box.left -= padx\n box.top -= pady\n box.right += padx\n box.bottom += pady\n item = self.rectangle(box, **options)\n return item", "def get_box(req):", "def _draw_single_box_on_image(self,box,label,id):\n p1 = (box[1], box[0])\n p2 = (box[3], box[2])\n if self.config.DISCO_MODE:\n color = random.choice(self.STANDARD_COLORS)\n else:\n color = self.STANDARD_COLORS[id]\n cv2.rectangle(self.image, p1, p2, color, 2)\n self._draw_text_on_image(label,(p1[0],p1[1]-10),color)", "def plotBox(box):\n plt.plot([box.xll, box.xur, box.xur, box.xll, box.xll]\n ,[box.yll, box.yll, box.yur, box.yur, box.yll]\n , '-'\n )", "def __repr__(self):\n return \"Box(mins={}, maxs={}, angles={})\".format(\n self.mins, self.maxs, self.angles\n )", "def __init__(self, box=[], idx=None):\n self.box: List[int] = box\n self.class_id: int = idx", "def add_random_box(self, env):\n box_size = self.random_size(0.05, 0.15, 0.05, 0.15, 0.01, 0.06)\n box_pose = self.random_pose(env, box_size)\n box_template = 'assets/box/box-template.urdf'\n box_urdf = self.fill_template(box_template, {'DIM': box_size})\n box_id = env.add_object(box_urdf, box_pose)\n os.remove(box_urdf)\n self.color_random_brown(box_id)\n self.object_points[box_id] = np.float32((0, 0, 0)).reshape(3, 1)\n self._IDs[box_id] = 'random_box'\n return box_id", "def add_box(self, timeout=4):\n\n # Side length of the box\n box_size = 0.16\n\n # Set pose of the box\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = 'world'\n box_pose.pose.orientation.w = 1.0\n box_pose.pose.position.x = 0.0\n box_pose.pose.position.y = 0.45\n box_pose.pose.position.z = 1.92\n\n # Add box to scene\n self.scene.add_box(self.box_name, box_pose, size=(box_size,\n box_size,\n box_size))\n\n # Wait for update and return status\n return self.wait_for_state_update(box_is_known=True,\n timeout=timeout)", "def __new__(cls, *args, **kwargs):\n obj = super(Box, cls).__new__(cls, *args, **kwargs)\n obj._box_config = _get_box_config(cls, kwargs)\n return obj", "def create_human_box(self, i):\n self.box = self.detections[0, 0, i, 3:7] * np.array([self.w, self.h, self.w, self.h])\n (self.startX, self.startY, self.endX, self.endY) = self.box.astype(\"int\")", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def test_one_object_multi_bndbox(self):\n\n text_num = '123'\n obj = self.root.find('object')\n bndbox = obj.find('bndbox')\n bndbox.find('xmin').text = text_num\n obj.append(bndbox)\n _, boxes = self._test_helper()\n self.assertEqual(len(boxes), 2)\n self.assertNotEqual(boxes[0].xmin, int(text_num))", "def delete_box(self) -> None:\n from pymol import cmd\n\n # Reset all box variables\n self.x = 0\n self.y = 0\n self.z = 0\n # self.min_x_set = 0.0\n # self.max_x_set = 0.0\n # self.min_y_set = 0.0\n # self.max_y_set = 0.0\n # self.min_z_set = 0.0\n # self.max_z_set = 0.0\n # self.angle1_set = 0.0\n # self.angle2_set = 0.0\n # self.padding_set = 3.5\n\n # Delete Box and Vertices objects in PyMOL\n cmd.delete(\"vertices\")\n cmd.delete(\"box\")\n\n # Set Box variables in the interface\n self.min_x.setValue(self._default.min_x)\n self.max_x.setValue(self._default.max_x)\n self.min_y.setValue(self._default.min_y)\n self.max_y.setValue(self._default.max_y)\n self.min_z.setValue(self._default.min_z)\n self.max_z.setValue(self._default.max_z)\n self.angle1.setValue(self._default.angle1)\n self.angle2.setValue(self._default.angle2)\n\n # Change state of buttons in the interface\n self.button_draw_box.setEnabled(True)\n self.button_redraw_box.setEnabled(False)\n self.min_x.setEnabled(False)\n self.min_y.setEnabled(False)\n self.min_z.setEnabled(False)\n self.max_x.setEnabled(False)\n self.max_y.setEnabled(False)\n self.max_z.setEnabled(False)\n self.angle1.setEnabled(False)\n self.angle2.setEnabled(False)", "def __init__(self, box):\n self.is_hidden = False\n self.last_boxes = []\n self.best_box = None\n self.frames_undetected = 0\n self.age = 0\n self.n_frames = 10\n\n self.update(box)", "def makeBox(self) -> None:\n self.state[CASH] = self.state[CASH] + 1", "def __init__(self, v1, v2, *opts, **kwargs):\n Item.__init__(self, \"box\", (v1, v2), opts, **kwargs)", "def create_boxes(plot_data, size=1):\n fixed_boxes = plot_data.buffer(size).envelope\n \n fixed_boxes = gpd.GeoDataFrame(geometry=fixed_boxes)\n \n #Mimic the existing structure\n fixed_boxes = gpd.sjoin(fixed_boxes, plot_data)\n fixed_boxes[\"score\"] = None\n fixed_boxes[\"label\"] = \"Tree\" \n fixed_boxes[\"xmin\"] = None \n fixed_boxes[\"xmax\"] = None\n fixed_boxes[\"ymax\"] = None\n fixed_boxes[\"ymin\"] = None\n \n fixed_boxes[\"box_id\"] = fixed_boxes.index.to_series().apply(lambda x: \"fixed_box_{}\".format(x))\n \n return fixed_boxes", "def create_boxes(plot_data, size=1):\n fixed_boxes = plot_data.buffer(size).envelope\n \n fixed_boxes = gpd.GeoDataFrame(geometry=fixed_boxes)\n \n #Mimic the existing structure\n fixed_boxes = gpd.sjoin(fixed_boxes, plot_data)\n fixed_boxes[\"score\"] = None\n fixed_boxes[\"label\"] = \"Tree\" \n fixed_boxes[\"xmin\"] = None \n fixed_boxes[\"xmax\"] = None\n fixed_boxes[\"ymax\"] = None\n fixed_boxes[\"ymin\"] = None\n \n fixed_boxes[\"box_id\"] = fixed_boxes.index.to_series().apply(lambda x: \"fixed_box_{}\".format(x))\n \n return fixed_boxes", "def shape(self) -> str:\n return \"box\"", "def box(self, by: IndexLabel | None = None, **kwargs) -> PlotAccessor:\n return self(kind=\"box\", by=by, **kwargs)", "def isBox(self):\n return self.box", "def get_boxes(self):\r\n\r\n boxes = [(\" \", self.worldbox.tl, self.worldbox.br)]\r\n# boxes = []\r\n boxes += [(\".\", b.tl, b.br) for b in self.wallboxes]\r\n boxes += [(\"x\", b.tl, b.br) for b in self.targetboxes]\r\n agentscale = 100\r\n boxes += [(\"a\", (self.i_state[0] - self.dx * agentscale, self.i_state[1] - self.dx * agentscale),\r\n (self.i_state[0] + self.dx * agentscale, self.i_state[1] + self.dx * agentscale))]\r\n return boxes", "def GetAMRBox(self, p_int, p_int_1):\n ...", "def create_box(world, space, density, lx, ly, lz):\r\n\r\n\t\t# Create body\r\n\t\tbody = ode.Body(world)\r\n\t\tM = ode.Mass()\r\n\t\tM.setBox(density, lx, ly, lz)\r\n\t\tbody.setMass(M)\r\n\r\n\t\t# Set parameters for drawing the body\r\n\t\tbody.shape = \"box\"\r\n\t\tbody.boxsize = (lx, ly, lz)\r\n\r\n\t\t# Create a box geom for collision detection\r\n\t\tgeom = ode.GeomBox(space, lengths = body.boxsize)\r\n\t\tgeom.setBody(body)\r\n\r\n\t\treturn body, geom", "def box(original, diameter):\n return Box(original, h.ones(original.size()) * diameter, None).checkSizes()", "def create():\n\n form = SQLFORM(db.box, fields=['name', 'private'])\n add_element_required_attr(db.box, form)\n\n form.vars.owner = auth.user\n\n if form.process(onvalidation=_validate_box_form).accepted:\n flash('success', 'Created box.', URL('box', 'view', args=[form.vars.id]))\n elif form.errors:\n flash('danger', 'Form has errors.')\n\n return {\n 'form': form\n }", "def boxPlot(self):\n clf()\n boxplot(self.y,positions=self.x,widths=0.5)\n xlabel('X Label (units)')\n ylabel('Y Label (units)')\n savefig('boxplot.png')", "def make_box(stdscr, margin:int):\n sh, sw = stdscr.getmaxyx()\n\n box = (\n (margin, margin),\n (sh-margin, sw-margin)\n )\n rectangle(stdscr, box[0][0], box[0][1], box[1][0], box[1][1])\n stdscr.refresh()\n return box", "def uiNewHorizontalBox():\n\n # Set return type\n clibui.uiNewHorizontalBox.restype = ctypes.POINTER(uiBox)\n\n return clibui.uiNewHorizontalBox()", "def setBox(self, box: Vector):\r\n if not isinstance(box, Vector):\r\n raise TypeError(\"Box Property should be a vector\")\r\n\r\n if self._fixture is not None:\r\n while kge.Physics.world.locked:\r\n continue\r\n\r\n self._box = box\r\n self._fixture.shape.box = *self._box,", "def fl_drw_box(boxtype, xpos, ypos, width, height, colr, bndrwidth):\n _fl_drw_box = library.cfuncproto(\n library.load_so_libforms(), \"fl_drw_box\",\\\n None, [cty.c_int, xfdata.FL_Coord, xfdata.FL_Coord, xfdata.FL_Coord,\n xfdata.FL_Coord, xfdata.FL_COLOR, cty.c_int],\\\n \"\"\"void fl_drw_box(int style, FL_Coord x, FL_Coord y, FL_Coord w,\n FL_Coord h, FL_COLOR c, int bw_in)\"\"\")\n library.check_if_flinitialized()\n library.checkfatal_allowed_value_in_list(boxtype, xfdata.BOXTYPE_list)\n i_boxtype = library.convert_to_intc(boxtype)\n i_xpos = library.convert_to_FL_Coord(xpos)\n i_ypos = library.convert_to_FL_Coord(ypos)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n i_bndrwidth = library.convert_to_intc(bndrwidth)\n library.keep_elem_refs(boxtype, i_boxtype, xpos, i_xpos, ypos, \\\n i_ypos, width, i_width, height, i_height, colr, ul_colr, \\\n bndrwidth, i_bndrwidth)\n _fl_drw_box(i_boxtype, i_xpos, i_ypos, i_width, i_height, ul_colr, \\\n i_bndrwidth)", "def __init__(self,\n env,\n xml_config,\n box_lid_pos=0.,\n peg_pos=[0.75, 0.1, 0.1],\n peg_delta_range=[0., 0.]):\n self._env = env\n self._xml_config = xml_config\n self._box_lid_pos = box_lid_pos\n self._peg_pos = peg_pos\n self._peg_delta_range = peg_delta_range\n\n # Manually specify what objects exist for each xml config\n #TODO: add support for dm_control.mjcf\n self._boxes = []\n self._pegs = []\n\n if self._xml_config == 'default' or self._xml_config == 'task':\n box = BoxWithLid(\n env=self._env,\n initial_lid_pos=self._box_lid_pos,\n name='box')\n self._boxes.append(box)\n peg = BlockPeg(\n env=self._env,\n initial_pos=self._peg_pos,\n random_delta_range=self._peg_delta_range,\n name='peg')\n self._pegs.append(peg)", "def drop_boxes(self): \r\n model = loader.load_model('models/box.egg')\r\n model.set_pos(-0.5, -0.5, -0.5)\r\n model.flatten_light()\r\n shape = BulletBoxShape(LVector3(0.5, 0.5, 0.5))\r\n ahead = self.vehicleNP.get_pos() + self.vehicle.get_forward_vector()*15\r\n \r\n for i in range(6):\r\n node = BulletRigidBodyNode('Box')\r\n node.set_mass(5.0)\r\n node.add_shape(shape)\r\n node.set_deactivation_enabled(False)\r\n np = render.attach_new_node(node)\r\n np.set_pos(ahead.x, ahead.y, ahead.z + i*2)\r\n self.world.attach(node)\r\n model.copy_to(np)", "def make_wander_box(self):\n x = int(self.location[0])\n y = int(self.location[1])\n box_list = []\n box_rects = []\n\n for i in range(x-3, x+4):\n box_list.append([i, y-3])\n box_list.append([i, y+3])\n\n for i in range(y-2, y+3):\n box_list.append([x-3, i])\n box_list.append([x+3, i])\n\n for box in box_list:\n left = box[0]*32\n top = box[1]*32\n box_rects.append(pg.Rect(left, top, 32, 32))\n\n return box_rects", "def make_lattice(box):\n from quippy.atoms import make_lattice\n if box.shape == (3, 3):\n\t# http://lammps.sandia.gov/doc/Section_howto.html#howto-12 Describes the\n\t# methodology (look for the section entitled \"6.12. Triclinic\n\t# (non-orthogonal) simulation boxes\") The [a, b, c, alpha, beta, gamma]\n\t# vector can be passed to the ase.Atoms object as a definition for the\n\t# triclinic box (note that the quippy.Atoms class inherits from\n\t# ase.Atoms) Make sure that you note that the data is provided:\n\t# \n\t# ITEM: BOX BOUNDS xy xz yz ....\n\t# xlo_bound xhi_bound xy\n\t# ylo_bound yhi_bound xz\n\t# zlo_bound zhi_bound yz\n\t# \n\t# whereas we need xlo, xhi, etc. not xlo_bound, xhi_bound, etc.\n\txlo = box[0][0] - min(0.0, box[0][2], box[1][2], box[0][2] + box[1][2])\n\txhi = box[0][1] - max(0.0, box[0][2], box[1][2], box[0][2] + box[1][2])\n\tylo = box[1][0] - min(0.0, box[2][2])\n\tyhi = box[1][1] - max(0.0, box[2][2])\n\tzlo = box[2][0]\n\tzhi = box[2][1]\n\n\ta = (xhi - xlo)\n\tb = np.sqrt((yhi - ylo)**2 + (box[0][2])**2)\n\tc = np.sqrt((zhi - zlo)**2 + (box[1][2])**2 + (box[2][2])**2)\n\talpha = np.arccos((box[0][2] * box[1][2] + (yhi - ylo) * box[2][2]) / (b * c))\n\tbeta = np.arccos(box[1][2] / c)\n\tgamma = np.arccos(box[0][2] / b)\n\treturn make_lattice(a, b, c, alpha, beta, gamma)\n \n elif box.shape == (3, 2):\n\treturn make_lattice(box[0][1] - box[0][0],\n box[1][1] - box[1][0],\n box[2][1] - box[2][0])\n else:\n raise ValueError(\"Unexpected box size/parameters: {}\".format(box))", "def errorbox(context, nodelist, box_id=None):\n return render_to_string('deco/errorbox.html', {\n 'box_id': box_id or \"\",\n 'content': nodelist.render(context)\n })", "def __init__(self, size, x=0, y=0, id=None):\n super().__init__(size, size, x, y, id)", "def __init__(self, size, x=0, y=0, id=None):\n super().__init__(size, size, x, y, id)", "def __init__(self, size, x=0, y=0, id=None):\n super().__init__(size, size, x, y, id)", "def init_infobox(self):\n infobox = tk.Label(self, text=\"\", justify=\"left\")\n infobox.grid(row=0, column=1, sticky=\"n\")\n self.infobox = infobox", "def s_box(cls, box, bits):\n row = [bits[0], bits[5]]\n row = bits_to_integer(row)\n col = bits_to_integer(bits[1:5])\n s_box_value = cls._sbox[box][row][col]\n return tuple(bits_of(s_box_value, 4))", "def __init__(self, size, x=0, y=0, id=None):\n\n super().__init__(size, size, x, y, id)", "def box(self, x0, y0, width, height):\n assert width > 1\n assert height > 1\n\n width -= 1\n height -= 1\n\n for x in range(x0, x0 + width):\n self.point(x, y0, \"-\")\n self.point(x, y0 + height, \"-\")\n\n for y in range(y0, y0 + height):\n self.point(x0, y, \"|\")\n self.point(x0 + width, y, \"|\")\n\n self.point(x0, y0, \"+\")\n self.point(x0 + width, y0, \"+\")\n self.point(x0, y0 + height, \"+\")\n self.point(x0 + width, y0 + height, \"+\")", "def setBox(self, box):\n if self.box is None:\n self.box = box\n self.centroid = [(box[0] + box[2]) / 2, (box[1] + box[3]) / 2]\n self.lastCentroid.append(self.centroid)\n else:\n # Creating a weighted update\n new_centroid = [(box[0] + box[2]) / 2, (box[1] + box[3]) / 2]\n old_centroid = self.centroid\n\n received_height = box[3] - box[1]\n received_width = box[2] - box[0]\n\n new_centroid[0] = new_centroid[0] * WEIGHT_POSITION + old_centroid[0] * (1 - WEIGHT_POSITION)\n new_centroid[1] = new_centroid[1] * WEIGHT_POSITION + old_centroid[1] * (1 - WEIGHT_POSITION)\n\n new_height = received_height * WEIGHT_DIMENSIONS + (self.box[3] - self.box[1]) * (1 - WEIGHT_DIMENSIONS)\n new_width = received_width * WEIGHT_DIMENSIONS + (self.box[2] - self.box[0]) * (1 - WEIGHT_DIMENSIONS)\n\n # calculating the new bounding box\n self.box[0] = int(new_centroid[0] - new_width / 2)\n self.box[2] = int(new_centroid[0] + new_width / 2)\n self.box[1] = int(new_centroid[1] - new_height / 2)\n self.box[3] = int(new_centroid[1] + new_height / 2)\n\n self.centroid = new_centroid\n self.lastCentroid.append(new_centroid)", "def from_annotation(cls, anno) -> \"Box\":\n bounds = anno[\"bounds\"]\n return cls(\n x=bounds[\"left\"],\n y=bounds[\"top\"],\n width=bounds[\"right\"] - bounds[\"left\"],\n height=bounds[\"bottom\"] - bounds[\"top\"],\n label=anno[\"label\"][\"text\"],\n )", "def oneNumBox(df, colName):\n \n boxData = [go.Box(x=df[colName[0]], name=colName[0])]\n layout = go.Layout(title='Box Plot for distribution of ' + str(colName[0]))\n fig = go.Figure(data = boxData, layout = layout)\n return {\"label\":\"Boxplot\", \"plot\":fig}", "def draw_box(self) -> None:\n from math import pi, sin, cos\n import pymol\n from pymol import cmd\n\n # Convert angle\n angle1 = (self.angle1.value() / 180.0) * pi\n angle2 = (self.angle2.value() / 180.0) * pi\n\n # Get positions of box vertices\n # P1\n x1 = -self.min_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y1 = -self.min_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z1 = self.min_x.value() * sin(angle2) + self.min_y.value() * sin(angle1) * cos(angle2) - self.min_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P2\n x2 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y2 = (-self.min_y.value()) * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z2 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P3\n x3 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y3 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z3 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P4\n x4 = (-self.min_x.value()) * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y4 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z4 = -(-self.min_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P5\n x5 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y5 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z5 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P6\n x6 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y6 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z6 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P7\n x7 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n\n y7 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n\n z7 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P8\n x8 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y8 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z8 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # Create box object\n pymol.stored.list = []\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.iterate(\"box\", \"stored.list.append((name, color))\", quiet=1)\n list_color = pymol.stored.list\n cmd.delete(\"box\")\n if len(list_color) > 0:\n for item in list_color:\n at_name = item[0]\n at_c = item[1]\n cmd.set_color(at_name + \"color\", cmd.get_color_tuple(at_c))\n else:\n for at_name in [\"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\", \"v8\", \"v1x\", \"v1y\", \"v1z\", \"v2x\", \"v3y\", \"v4z\"]:\n cmd.set_color(at_name + \"color\", [0.86, 0.86, 0.86])\n\n # Create vertices\n cmd.pseudoatom(\"box\", name=\"v2\", pos=[x2, y2, z2], color=\"v2color\")\n cmd.pseudoatom(\"box\", name=\"v3\", pos=[x3, y3, z3], color=\"v3color\")\n cmd.pseudoatom(\"box\", name=\"v4\", pos=[x4, y4, z4], color=\"v4color\")\n cmd.pseudoatom(\"box\", name=\"v5\", pos=[x5, y5, z5], color=\"v5color\")\n cmd.pseudoatom(\"box\", name=\"v6\", pos=[x6, y6, z6], color=\"v6color\")\n cmd.pseudoatom(\"box\", name=\"v7\", pos=[x7, y7, z7], color=\"v7color\")\n cmd.pseudoatom(\"box\", name=\"v8\", pos=[x8, y8, z8], color=\"v8color\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1x\", pos=[x1, y1, z1], color='red')\n cmd.pseudoatom(\"box\", name=\"v2x\", pos=[x2, y2, z2], color='red')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1y\", pos=[x1, y1, z1], color='forest')\n cmd.pseudoatom(\"box\", name=\"v3y\", pos=[x3, y3, z3], color='forest')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v4z\", pos=[x4, y4, z4], color='blue')\n cmd.pseudoatom(\"box\", name=\"v1z\", pos=[x1, y1, z1], color='blue')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")", "def test_box_new(self) -> None:\n box_number = \"BOX22222\"\n bm = BoxManagementClass()\n\n # add a box with an invalid box type\n box_type_str = 'XXX'\n with raises(InvalidValueError):\n _ = bm.box_new(box_number=box_number, box_type=box_type_str)\n\n # add a box with a valid box type\n box_type_str = 'Evans'\n box_type = BoxType.objects.get(box_type_code=box_type_str)\n box = bm.box_new(box_number=box_number, box_type=box_type)\n assert box.box_number == box_number\n assert box.box_type == box_type\n assert box.location is None\n assert box.product is None\n assert box.exp_year is None\n assert box.exp_month_start is None\n assert box.exp_month_end is None\n assert box.date_filled is None\n assert box.quantity == box_type.box_type_qty\n\n # test attempting to create a duplicate box number\n with raises(InvalidActionAttemptedError):\n _ = bm.box_new(box_number=box_number, box_type=box_type_str)\n return", "def create(self, boxes=[]):\n\n self.oid = fboxlib.create_boxarray_from_boxes(boxes)", "def __init__(self,\n mass_1, mass_2,\n width_1, width_2,\n x0_1, x0_2,\n v0_1=0, v0_2=0,\n h=0.1):\n self.box_1 = box.box(mass_1, width_1, x0_1, v0_1)\n self.box_2 = box.box(mass_2, width_2, x0_2, v0_2)\n self.h = h\n self.coll_counter = 0", "def create_node_bouding_box(node, bbox_name):\n x1, y1, z1, x2, y2, z2 = cmds.exactWorldBoundingBox(node)\n cube = cmds.polyCube()[0]\n\n # Get the centre point for each axis\n centre_points = [\n (x2 + x1) / 2.0,\n (y2 + y1) / 2.0,\n (z2 + z1) / 2.0\n ]\n\n # Scale up the cube to fit.\n bbox_scale = [\n x2 - x1,\n y2 - y1,\n z2 - z1\n ]\n\n cmds.move(centre_points[0], centre_points[1], centre_points[2], cube)\n cmds.scale(bbox_scale[0], bbox_scale[1], bbox_scale[2], cube)\n\n cube = cmds.ls(cube, long=True)\n bbox = cmds.rename(cube, bbox_name)\n\n return bbox", "def create_default_box(self, GroupID = UUID(), relative_position = (1, 0, 0)):\n\n # self.agent.Position holds where we are. we need to add this tuple to the incoming tuple (vector to a vector)\n location_to_rez_x = self.agent.Position.X + relative_position[0]\n location_to_rez_y = self.agent.Position.Y + relative_position[1]\n location_to_rez_z = self.agent.Position.Z + relative_position[2]\n\n location_to_rez = (location_to_rez_x, location_to_rez_y, location_to_rez_z)\n\n # not sure what RayTargetID is, send as uuid of zeros\n RayTargetID = UUID()\n\n self.object_add(self.agent.agent_id, self.agent.session_id,\n GroupID = GroupID, PCode = PCodeEnum.Primitive,\n Material = 3, AddFlags = 2, PathCurve = 16,\n ProfileCurve = 1, PathBegin = 0, PathEnd = 0,\n PathScaleX = 100, PathScaleY = 100, PathShearX = 0,\n PathShearY = 0, PathTwist = 0, PathTwistBegin = 0,\n PathRadiusOffset = 0, PathTaperX = 0, PathTaperY = 0,\n PathRevolutions = 0, PathSkew = 0, ProfileBegin = 0,\n ProfileEnd = 0, ProfileHollow = 0, BypassRaycast = 1,\n RayStart = location_to_rez, RayEnd = location_to_rez,\n RayTargetID = RayTargetID, RayEndIsIntersection = 0,\n Scale = (0.5, 0.5, 0.5), Rotation = (0, 0, 0, 1),\n State = 0)", "def boxen(self, paneel):\n \n boxje = wx.BoxSizer(wx.VERTICAL)\n boxje.Add(paneel, 1, wx.EXPAND | wx.ALL)\n \n #boxje.Add(vbox, 8, wx.EXPAND | wx.ALL)\n return boxje", "def draw_box(image, box, color, thickness=2):\n b = np.array(box).astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)", "def box2kml(xy,fname=None,name='box',color='FF0000',width=3,verbose=True):\n\n if fname is None:\n fname = name + '.kml'\n\n if type(xy[0]) is tuple:\n x1,x2 = xy[0]\n y1,y2 = xy[1]\n else:\n x1,x2,y1,y2 = xy[0:]\n\n if verbose:\n print(\"Box: %10.6f %10.6f %10.6f %10.6f\" % (x1,x2,y1,y2))\n\n elev = 0.\n kml_text = kml_header(fname)\n\n mapping = {}\n mapping['x1'] = x1\n mapping['x2'] = x2\n mapping['y1'] = y1\n mapping['y2'] = y2\n mapping['elev'] = elev\n mapping['name'] = name\n mapping['desc'] = \" x1 = %s, x2 = %s\\n\" % (f2s(x1),f2s(x2)) \\\n + \" y1 = %s, y2 = %s\" % (f2s(y1),f2s(y2))\n mapping['color'] = color\n mapping['width'] = width\n\n region_text = kml_region(mapping)\n\n kml_text = kml_text + region_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def get_box_coordinates(self):\n return QRect(self.box_begin,self.box_end)", "def createWireBox(node):\n\n #create an empty bounding box\n bb = osg.BoundingBox()\n\n #if we have a geode, expand by the drawables bounding box, else use the bounding sphere\n geode = osg.NodeToGeode(node)\n if geode:\n print \"geode found\" \n for i in range(geode.getNumDrawables()):\n dwb = geode.getDrawable(0)\n bb.expandBy(dwb.getBound());\n else:\n bb.expandBy(node.getBound()) \n \n center = node.getBound().center()\n \n #create a geode for the wirebox\n wbgeode = osg.Geode()\n wbgeode.setName(\"ExtentsGeode\")\n\n #create a stateset for the wirebox\n stateset = osg.StateSet()\n wbgeode.setStateSet(stateset)\n# stateset.thisown = 0 \n\n #create a polygonmode state attribute\n polyModeObj = osg.PolygonMode()\n polyModeObj.setMode(osg.PolygonMode.FRONT_AND_BACK, osg.PolygonMode.LINE)\n stateset.setAttribute(polyModeObj)\n \n #create a linewidth state attribute\n lw = osg.LineWidth()\n lw.setWidth(2.0)\n stateset.setAttribute(lw)\n \n stateset.setAttribute(selmat)\n\n #create a drawablw box with the right position and size\n lx = bb._max.x() - bb._min.x()\n ly = bb._max.y() - bb._min.y()\n lz = bb._max.z() - bb._min.z()\n box = osg.Box(center, lx, ly, lz)\n shape = osg.ShapeDrawable(box)\n #shape.setColor(osg.Vec4(1.0, 0.0, 0.0, 1.0))\n \n #add the drawable to the wirebox geode\n wbgeode.addDrawable(shape)\n\n for pointer in [stateset, box, polyModeObj, lw, shape]:\n pointer.thisown = False\n\n #return the wirebox geode\n return wbgeode", "def get_box(\n self, reference: Output | None = None, dest_box: Box | None = None\n ) -> Box:\n if reference:\n reference_ptr = reference._ptr\n else:\n reference_ptr = ffi.NULL\n\n if not dest_box:\n dest_box = Box(ptr=ffi.new(\"struct wlr_box *\"))\n\n lib.wlr_output_layout_get_box(self._ptr, reference_ptr, dest_box._ptr)\n return dest_box", "def _make_instruction_box(self):\n #make the instructions\n self.instructionLabel = gtk.Label()\n self.instructionImage = gtk.Image()\n instructionBox = gtk.HBox(spacing=PADDING)\n instructionBox.pack_start(self.instructionImage, False, False, 0)\n instructionBox.pack_start(self.instructionLabel, False, False, 0)\n descriptionLabel = WrapLabel.WrapLabel(\"You must enable UPnP in your router or forward the port manually to be a relay. Otherwise, peers cannot send traffic through your computer.\\n\\nAlso remember to unblock BitBlinder.exe and Tor.exe in any firewall.\")\n \n #make help link row\n routerAccessLink = GTKUtils.make_html_link(\"Access your router\", \"\")\n portForwardingLink = GTKUtils.make_html_link(\"How do I forward a port?\", \"\")\n linkRow = gtk.HBox()\n linkRow.pack_start(portForwardingLink, True, True, 0)\n linkRow.pack_start(routerAccessLink, True, True, 0)\n \n testingBox = self._make_test_bar()\n \n #pack everything together\n box = gtk.VBox(spacing=PADDING)\n box.pack_start(instructionBox, False, False, 0)\n box.pack_start(testingBox, False, False, 0)\n box.pack_start(descriptionLabel, False, False, 0)\n box.pack_start(linkRow, False, False, 0)\n box.show_all()\n return box", "def uiNewVerticalBox():\n\n # Set return type\n clibui.uiNewVerticalBox.restype = ctypes.POINTER(uiBox)\n\n return clibui.uiNewVerticalBox()", "def box(original, radius):\n batches = original.size()[0]\n num_elem = h.product(original.size()[1:])\n ei = h.getEi(batches,num_elem)\n \n if len(original.size()) > 2:\n ei = ei.contiguous().view(num_elem, *original.size())\n\n return HBox(original, None, ei * radius).checkSizes()", "def fill_box(self, x, y, w, h):\n\t\tpass", "def redraw_box(self) -> None:\n from pymol import cmd\n \n # Provided a selection\n if \"sele\" in cmd.get_names(\"selections\"):\n # Get dimensions of selected residues\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"sele\")\n\n if self.min_x.value() != self.min_x_set or self.max_x.value() != self.max_x_set or self.min_y.value() != self.min_y_set or self.max_y.value() != self.max_y_set or self.min_z.value() != self.min_z_set or self.max_z.value() != self.max_z_set or self.angle1.value() != self.angle1_set or self.angle2.value() != self.angle2_set:\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n # Padding or selection altered\n else:\n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set background box values\n self.min_x_set = round(self.x - (min_x - self.padding.value()), 1) + self.min_x.value() - self.min_x_set\n self.max_x_set = round((max_x + self.padding.value()) - self.x, 1) + self.max_x.value() - self.max_x_set\n self.min_y_set = round(self.y - (min_y - self.padding.value()), 1) + self.min_y.value() - self.min_y_set\n self.max_y_set = round((max_y + self.padding.value()) - self.y, 1) + self.max_y.value() - self.max_y_set\n self.min_z_set = round(self.z - (min_z - self.padding.value()), 1) + self.min_z.value() - self.min_z_set\n self.max_z_set = round((max_z + self.padding.value()) - self.z, 1) + self.max_z.value() - self.max_z_set\n self.angle1_set = 0 + self.angle1.value()\n self.angle2_set = 0 + self.angle2.value()\n self.padding_set = self.padding.value()\n # Not provided a selection\n else:\n if self.min_x.value() != self.min_x_set or self.max_x.value() != self.max_x_set or self.min_y.value() != self.min_y_set or self.max_y.value() != self.max_y_set or self.min_z.value() != self.min_z_set or self.max_z.value() != self.max_z_set or self.angle1.value() != self.angle1_set or self.angle2.value() != self.angle2_set:\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n\n if self.padding_set != self.padding.value():\n # Prepare dimensions without old padding\n min_x = self.padding_set - self.min_x_set\n max_x = self.max_x_set - self.padding_set\n min_y = self.padding_set - self.min_y_set\n max_y = self.max_y_set - self.padding_set\n min_z = self.padding_set - self.min_z_set\n max_z = self.max_z_set - self.padding_set\n\n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set background box values\n self.min_x_set = round(self.x - (min_x - self.padding.value()), 1)\n self.max_x_set = round((max_x + self.padding.value()) - self.x, 1)\n self.min_y_set = round(self.y - (min_y - self.padding.value()), 1)\n self.max_y_set = round((max_y + self.padding.value()) - self.y, 1)\n self.min_z_set = round(self.z - (min_z - self.padding.value()), 1)\n self.max_z_set = round((max_z + self.padding.value()) - self.z, 1)\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Set Box variables in the interface\n self.min_x.setValue(self.min_x_set)\n self.max_x.setValue(self.max_x_set)\n self.min_y.setValue(self.min_y_set)\n self.max_y.setValue(self.max_y_set)\n self.min_z.setValue(self.min_z_set)\n self.max_z.setValue(self.max_z_set)\n self.angle1.setValue(self.angle1_set)\n self.angle2.setValue(self.angle2_set) \n \n # Redraw box\n self.draw_box()", "def _make_required_ports_box(self):\n #make entry rows\n self.relayBox = RelayPortStatusBox(\"Relay Port (TCP)\", \"orPort\", self)\n self.upnpBox = UPnPStatusBox(\"UPnP\")\n \n #pack them together:\n box = gtk.VBox(spacing=PADDING)\n box.pack_start(self.relayBox, False, False, 0)\n box.pack_start(self.upnpBox, False, False, 0)\n box = GTKUtils.add_padding(box, PADDING)\n frame = GTKUtils.add_frame(box, name=\"Relay Port\", width=0)\n frame.show_all()\n return frame", "def fillbox(self,event=None):\n \n pass", "def x_lb(self):\n pass", "def __init__(self, Lx, Ly = None, Lz = None):\n\n if isinstance(Lx, float):\n self.box = mat(3, 3)\n self.box[0][0] = Lx\n self.box[1][1] = Lx\n self.box[2][2] = Lx\n\n if isinstance(Ly, float):\n self.box[1][1] = Ly\n if isinstance(Lz, float):\n self.box[2][2] = Lz\n elif isinstance(Lx, mat):\n if Lx.get_rows() == 3 and Lx.get_cols() == 3:\n self.box = mat(Lx)\n elif isinstance(Lx, md_box):\n self.box = mat(md_box.get())", "def to_xml(self, parent):\n corp_elem = ET.SubElement(parent, \"corporation\")\n ET.SubElement(corp_elem, \"name\").text = self.name\n ET.SubElement(corp_elem, \"description\").text = self.description\n ET.SubElement(corp_elem, \"locked\").text = str(self.locked)\n boxes_elem = ET.SubElement(corp_elem, \"boxes\")\n boxes_elem.set(\"count\", \"%s\" % str(len(self.boxes)))\n for box in self.boxes:\n box.to_xml(boxes_elem)", "def box_plots(norm, original):\n bp = plt.boxplot([norm, original], notch=False, patch_artist=True)\n for box in bp['boxes']:\n box.set(color=\"red\")\n box.set(color=\"blue\")\n plt.ylabel(\"coefficient of variation\")\n plt.xlabel(\"Methods\")\n my_xticks = ['RPKM', 'raw counts']\n x = [1, 2]\n plt.xticks(x, my_xticks)\n plt.ylim(0, 400)\n plt.show()", "def get_box(self, box_name):\n for box in self._boxes:\n if box.name == box_name:\n return box", "def __init__(self):\n self.boxes_mz = IntervalTree()\n self.boxes_rt = IntervalTree()", "def Set(self, *args):\n return _Bnd.Bnd_Box_Set(self, *args)", "def add_box(self, position_x=None, position_y=None, width=None,\n height=None, static=False, kinematic=False,\n density=None, 密度=None,\n 位置x=None, 位置y=None, 寬=None, 高=None, \n 固定=False, random_flag=False):\n\n if static or 固定 :\n box_body = pymunk.Body(body_type=pymunk.Body.STATIC)\n elif kinematic:\n box_body = pymunk.Body(body_type=pymunk.Body.KINEMATIC)\n else:\n box_body = pymunk.Body(body_type=pymunk.Body.DYNAMIC) \n\n \n tmp_width = 寬 if 寬 is not None else width\n if not random_flag:\n tmp_width = tmp_width if tmp_width is not None else self.config.SIZE_WIDTH\n else:\n tmp_width = tmp_width if tmp_width is not None else randint(*self.config.RAMDOM_SIZE_RANGE)\n\n if tmp_width <= 0: raise BoxException('新增方塊錯誤','寬(width)要大於0')\n \n tmp_height = 高 if 高 is not None else height\n if not random_flag:\n tmp_height = tmp_height if tmp_height is not None else self.config.SIZE_HEIGHT\n else:\n tmp_height = tmp_height if tmp_height is not None else randint(*self.config.RAMDOM_SIZE_RANGE)\n\n if tmp_height <= 0: raise BoxException('新增方塊錯誤','高(height)要大於0')\n\n box_shape = pymunk.Poly.create_box(box_body, (tmp_width, tmp_height) )\n\n tmp_density = 密度 if 密度 is not None else density\n if tmp_density is None:\n tmp_density = self.config.DENSITY\n box_shape.density = tmp_density\n \n box_shape.friction = self.config.FRICTION\n box_shape.elasticity = self.config.ELASTICITY\n \n box_shape.color = color.random() \n \n\n\n tmp_x = 位置x if 位置x is not None else position_x\n if not random_flag:\n tmp_x = tmp_x if tmp_x is not None else self.config.X\n else:\n tmp_x = tmp_x if tmp_x is not None else randint(*self.config.RANDOM_X_RANGE)\n\n tmp_y = 位置y if 位置y is not None else position_y\n if not random_flag:\n tmp_y = tmp_y if tmp_y is not None else self.config.Y\n else:\n tmp_y = tmp_y if tmp_y is not None else randint(*self.config.RANDOM_Y_RANGE)\n\n box_body.position = (tmp_x, tmp_y)\n\n if not random_flag:\n box_body.angle = 0\n else:\n box_body.angle = 3.1416 * 2 * random()\n\n if not random_flag:\n box_body.velocity = (0, 0)\n else:\n box_body.velocity = ( randint(*self.config.RANDOM_VELOCITY_RANGE),\n randint(*self.config.RANDOM_VELOCITY_RANGE) ) \n\n self.space.add(box_body, box_shape)\n return BodyShapeWrapper(box_body, box_shape)", "def test_get_all_boxes(self, postfix_directory):\n print(\"Test_All_Boxes\")\n protein_file = os.path.join(postfix_directory, \"PfATP4.pdb\")\n ligand_file = os.path.join(postfix_directory, \"SJ733.pdb\")\n coords = rdkit_util.load_molecule(protein_file)[0]\n\n boxes = dc.dock.binding_pocket.get_all_boxes(coords)\n assert isinstance(boxes, list)\n # Pocket is of form ((x_min, x_max), (y_min, y_max), (z_min, z_max))\n for pocket in boxes:\n assert len(pocket) == 3\n assert len(pocket[0]) == 2\n assert len(pocket[1]) == 2\n assert len(pocket[2]) == 2\n (x_min, x_max), (y_min, y_max), (z_min, z_max) = pocket\n assert x_min < x_max\n assert y_min < y_max\n assert z_min < z_max", "def get_boxes(self) -> List[Box]:\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]", "def generate_boxes(self, img):\r\n return [Box(left, top, img) for (left, top) in self.coords]", "def addSceneBox(self,**kwargs):\n box = self.mm.modes[self.mm.cur_mode]\n self.predefined_box = box \n self.mm.confirm()\n self.baxter.yes()\n self.mm.loadPreviousMenu()", "def item():\n return {'name':'box',\n 'value':340}" ]
[ "0.646756", "0.641661", "0.58954304", "0.58318424", "0.58084816", "0.5737751", "0.57088435", "0.5644141", "0.56273854", "0.5592895", "0.55899185", "0.5582731", "0.55821747", "0.55549514", "0.5553726", "0.5544039", "0.5518349", "0.551408", "0.5510807", "0.5510807", "0.54989916", "0.5495238", "0.5456784", "0.54197115", "0.53654927", "0.53597146", "0.53493166", "0.53397214", "0.53240573", "0.5300161", "0.5299721", "0.5270448", "0.526005", "0.52457803", "0.52326655", "0.52325577", "0.5224165", "0.5211146", "0.5211146", "0.52028817", "0.51786315", "0.51708704", "0.51551175", "0.5141874", "0.51247525", "0.512395", "0.51201844", "0.5115134", "0.5098555", "0.5088344", "0.50743604", "0.5065218", "0.50362486", "0.50190777", "0.50185305", "0.49925178", "0.49872452", "0.49835646", "0.49835646", "0.49835646", "0.49724713", "0.49548936", "0.4932477", "0.49297327", "0.49272296", "0.4926063", "0.49243888", "0.49140286", "0.49051175", "0.48948905", "0.4890655", "0.48755497", "0.48681173", "0.4867838", "0.48594755", "0.48511136", "0.4848455", "0.48483896", "0.48471147", "0.48456553", "0.48428413", "0.48377454", "0.48167068", "0.48150867", "0.48072442", "0.48059154", "0.48020774", "0.48019874", "0.47990853", "0.47939453", "0.47913596", "0.4790252", "0.47896773", "0.4785679", "0.47788176", "0.47761223", "0.47730276", "0.47716773", "0.47655785", "0.4763536" ]
0.5329588
28
Block attached to peg. See xml for more info.
def __init__(self, env, initial_pos, random_delta_range, name, resource='objects/block_peg.xml'): self._env = env self._initial_pos = np.asarray(initial_pos) self._random_delta_range = np.asarray(random_delta_range) self._name = name self._resource = resource assert self._initial_pos.shape == (3,) assert self._random_delta_range.shape == (2,)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onBlock(self, data) :\n pass", "def penblock(self, block):\n self.block = block", "def add_block(self, env):\n block_size = (0.04, 0.04, 0.04)\n block_pose = self.random_pose(env, block_size)\n block_urdf = 'assets/stacking/block.urdf'\n block_id = env.add_object(block_urdf, block_pose)\n self.object_points[block_id] = np.float32((0, 0, 0)).reshape(3, 1)\n self._IDs[block_id] = 'block'\n return block_id", "def blocks(self):\n pass", "def _get_block(self, pos):\n raise NotImplementedError", "def block(self):\n pass", "def GetBlock(self):\n return self._block", "def getBlock(self) -> ghidra.program.model.correlate.Block:\n ...", "def block(self):\n return self._block", "def makeBlock(tag):\n return {\"t\":\"RawBlock\",\"c\":[\"html\",tag]}", "def block(self, block):\n\n self._block = block", "def block(self, block):\n\n self._block = block", "def block(self) -> IMockPin:\n return self[\"block\"]", "def _set_block(self, pos, block_):\n raise NotImplementedError", "def block(self):\n if self._block is None:\n bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(\n self.idfname, self.iddname, self, commdct=None, block=None\n )\n self._block = block\n self._idd_info = commdct\n self._idd_index = idd_index\n self._idfobjects = bunchdt\n self._model = data\n self._idd_version = versiontuple\n return self._block", "def add_block_as_child_node(self, block, node):\n child = etree.SubElement(node, \"unknown\")\n block.add_xml_to_node(child)", "def block_eid(self, block_id):\n ...", "def block_one(self):", "def _start_element(self, name, attrs):\n self.tagstack.append(name)\n if name==\"volume\":\n self.volumeobject = volumeobject_sax()\n self.volumeobject.block_size = 512 # reasonable default\n self.volumeobject.image = self.imageobject\n self.data = u\"\"\n if \"offset\" in attrs: self.volumeobject.offset = int(attrs[\"offset\"]) \n return\n if name==\"block_size\":\n self.data = u\"\"\n \n if name==\"fileobject\":\n self.fileobject = fileobject_sax(imagefile=self.imagefile)\n self.fileobject.volume = self.volumeobject\n return\n if name=='hashdigest':\n self.hashdigest_type = attrs['type'] # remember the type\n if not self.fileobject: return\n if name==\"run\":\n b = byterun()\n b.decode_sax_attributes(attrs)\n self.fileobject._byteruns.append(b)\n return\n self.data = u\"\" # new element; otherwise data is ignored", "def add_block(self, block_pf):\n\n # test si il s'agit du bloc genesis\n if len(self.blocks) != 0:\n # check si previous H est coherent avant ajout a chaine\n if self.check_previousBlockH(block_pf.header['prevBlockH']):\n self.blocks.append(block_pf)\n else:\n print \"== Probleme de parent\"\n print \"= %s\" % block_pf.header['prevBlockH']\n print \"= %s\" % getHashBlock(self.get_topBlock())\n else:\n self.blocks.append(block_pf)", "def add_new_block(self):\n old_block = self.curr_block\n self.curr_block = self.gen_new_block()\n add_edge(old_block, self.curr_block)", "def draw_block_element(self, cr, x, y):\n cr.rectangle(\n self.wall_width+x*self.block_size, \n (self.block_height-y-1)*self.block_size, \n self.block_size, self.block_size\n )\n \n cr.set_source_rgb(0.2, 0.25, 0.5)\n cr.fill_preserve()\n\n cr.set_source_rgb(0.8,0.8,0.8)\n cr.set_line_width(self.block_size/10)\n cr.stroke()", "def new_block(self):\n child = Block(self, py3_wrapper=self.py3_wrapper)\n self.add(child)\n return child", "def add_block(self, env, block_color, width, height):\n\n block_size = (0.04, 0.04, 0.04)\n block_urdf = \"stacking/block.urdf\"\n block_pose = self.get_random_pose(env, block_size)\n block_id = env.add_object(block_urdf, block_pose)\n pb.changeVisualShape(\n block_id, -1, rgbaColor=utils.COLORS[block_color] + [1])\n # (0, None): 0 means that the block is symmetric.\n # TODO(hagrawal): Not sure what None means. Update. This is kept\n # for CLIPort compatibility. We don't use it.\n self.blocks.append((block_id, (0, None)))\n block_pix = utils.xyz_to_pix(block_pose[0], self.bounds, self.pix_size)\n block_obj_info = {\n \"obj_id\": block_id,\n \"pose\": block_pose,\n \"size\": block_size,\n \"urdf\": block_urdf,\n \"color\": block_color,\n \"unknown_color\": block_color in utils.EVAL_COLORS,\n \"pix\": block_pix,\n \"region\": determine_region(block_pix[0], block_pix[1], width, height),\n }\n return block_obj_info", "def create_hard_block_at(self, x, y):\n cell_size = self.map.get_cell_size()\n obj = HardBlock(\n parent=self.map,\n style={\n 'width': cell_size, \n 'height': cell_size * 2, \n 'z-index': layers['object'] }\n )\n # I am a hard block, I can stop the fire without being destroyed\n fireblocking(block(obj))\n\n self.map.add_node(obj, x, y, 0, -cell_size)\n return obj", "def _start_element(self, name, attrs):\n self.tagstack.append(name)\n if name==\"volume\":\n self.volumeobject = volumeobject_sax()\n self.volumeobject.image = self.imageobject\n return\n if name=='fileobject':\n self.data = None # don't record this\n return\n self.data = u\"\" # new element; otherwise data is ignored", "def get_block(self):\n return Gumtree.gumtree.getBlock()", "def run(self, parent, blocks):\r\n pass", "def put_elem_block(self, elem_blk_id, elem_type, num_elem_this_blk,\n num_nodes_per_elem, num_attr):\n ierr = exolib.py_expelb(self.exoid, elem_blk_id, elem_type,\n num_elem_this_blk, num_nodes_per_elem, num_attr)\n if ierr:\n raise ExodusIIWriterError(\"Error putting element block information\")", "def draw_block(self):\n draw_component = DrawComponent(self.component_spot,self.component_type)\n return draw_component", "def __init__(self, block):\n super(CraftFlowerPot, self).__init__(block)\n self.pot = (block.getWorld()).getTileEntityAt(getX(), getY(), getZ())", "def _adjustBlock(self, b):\n raise NotImplementedError", "def gen_new_block(self):\n block = BasicBlock()\n self.blocks.append(block)\n return block", "def genesis_block(self):\n block = Block(target=self.target, transactions=[])\n self.current_transactions.append(block)", "def __init__(self, parent, blocks=None):\n self.parent = parent\n self.x = int(parent.block_width / 2) ## x coord of base block element\n self.y = parent.block_height\n\n ## Setup rotate variable\n self.rotate = 0\n\n if blocks:\n self.block_elements = blocks ## Relative coords\n else:\n self.block_elements = [(-1,0),(0, 0),(1,0),(2,0)] ## Relative coord", "def block_type(self):\r\n raise NotImplementedError()", "def _get_block(self, pos):\n return _get_mc().getBlock(pos)", "def _end_element(self, name):\n assert(self.tagstack.pop()==name)\n if name==\"volume\":\n self.volumeobject = None\n return\n if name==\"block_size\" and len(self.tagstack) > 1 : \n if self.tagstack[-1] == \"volume\" : \n self.volumeobject.block_size = int(self.data)\n self.data=None\n return\n if name==\"fileobject\":\n if (self.flags & ALLOC_ONLY)==0 or self.fileobject.allocated():\n self.callback(self.fileobject)\n self.fileobject = None\n return\n if name=='hashdigest':\n self.fileobject._tags[self.hashdigest_type.lower()] = self.data\n self.data = None\n return\n if self.fileobject: # in a file object, all tags are remembered\n self.fileobject._tags[name] = self.data\n self.data = None\n return\n # Special case: <source><image_filename>fn</image_filename></source>\n # gets put in <imagfile>fn</imagefile>\n if name=='image_filename' and self.tagstack==['xml','fiwalk','source']:\n self.imageobject._tags[name] = self.data\n # Handle lots of XML that was generated wrong\n # This can be removed when XML version 0.3 is gone\n if name=='imagefile' and self.tagstack==['xml','fiwalk','source']:\n self.imageobject._tags['image_filename'] = self.data", "def nextblock(self, parent=None, **kwargs):\n block = self.newblock(parent, **kwargs)\n if not parent and self.block:\n self.block.add_child(block)\n\n self.block = block\n return block", "def __init__(self):\n\n pyxel.init(windowWidth, windowHeight)\n\n # generates randomly ordered list of [0, 1, 2, 3, 4, 5, 6, 7]\n self.bag = sample(list(range(7)), 7)\n\n # generates a block from last element of self.bag into self.blocks\n self.block = Block(blockData[self.bag.pop()])\n\n pyxel.run(self.update, self.draw)", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def add(self, block: Block):\n self._buffer.append(block)", "def create_block(self):\n return poet_transaction_block.PoetTransactionBlock()", "def changed_block(self, old_block, new_block):", "def new_block_call(self, event):\n self.push_job(flush=True)", "def __init__(self):\n self.chain = [Block.genesis()]", "def process(self, request):\n Block.process(self, request)\n content = self.contentElement.process(request)\n if content:\n self.idevice.content = content\n if 'emphasis'+self.id in request.args:\n self.idevice.emphasis = int(request.args['emphasis'+self.id][0])", "def _set_block(self, pos, block_):\n self._changes[deepcopy(pos)] = block", "def to_basic_block(self):\n return _spacegrant_swig.general_burster_2_sptr_to_basic_block(self)", "def block_data(self):\n return self._current_block", "def save_block(self, block):\n # Implementing this is optional.", "def _set_block(self, pos, block_):\n _get_mc().setBlock(pos, block_)", "def parse_xml(cls, node, runtime, keys, id_generator):\r\n xml = etree.tostring(node)\r\n block = cls.from_xml(xml, runtime, id_generator)\r\n return block", "def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)", "def mine(self):\n # Checking if there is anything to be mined \n if len(self.pool) > 0:\n # Getting data from the pools list and removing it from the list\n data = self.pool.pop()\n # Instantiating the block with the given data and hash of the last block in the blocks list\n block = Block(data, self.blocks[-1].hash)\n # mining the block on the given difficulty level\n block.mine(self.difficulty)\n # Adding the block to the chain\n self.add_to_chain(block)\n # Showing block details\n self.verbose(block)", "def draw(self, parent, cr):\n for x, y in self.get_block_coords():\n parent.draw_block_element(cr, x, y)", "def create_genesis(self):\n return Block(0, 0, b'0', b'0', b'')", "def test(self, parent, block):\r\n pass", "def to_basic_block(self) -> \"gr::basic_block_sptr\":\n return _beamforming_swig.doaesprit_sptr_to_basic_block(self)", "def is_block(self):\n\t\treturn self.name in get_elements_collection(self.__res, 'block_level')", "def announce_new_block(block):\n for peer in peers:\n url = \"{}add_block\".format(peer)\n requests.post(url, data=json.dumps(block.__dict__, sort_keys=True))", "def __init__(self, label_event_handler=None, sendee=None, sending=True):\n super(MLFBlockProcessor, self).__init__(sendee, sending=sending)\n self.set_sendee(sendee)\n self._current = None\n self._label_event_handler = label_event_handler", "def visit_Block(self, node: Block) -> None:\n\n for declaration in node.declarations:\n self.visit(declaration)\n self.visit(node.compound_statement)", "def updateBlock(self):\n self.blkno = self.blknoSpinBox.value() - 1\n self.initDataParms()\n self.updateCurveList()\n self.compute()", "def save_block(self, drip_campaign_id, start_time, nodes_id):\n new_block = Block(\n drip_campaign_id=drip_campaign_id,\n start_time=start_time,\n nodes_id=nodes_id\n )\n new_block.save()", "def __ParseBlock(self, ast):\n for node in ast:\n node_name = node[0]\n node_value = node[1]\n if node_name == 'statement':\n self.__ParseStatement(node_value)\n else:\n logging.info('Unknown AST node in message block: %s' % (node_name))", "def block_parent_eid(self, block_id):\n ...", "def announce_new_block(block):\n for peer in peers:\n url = \"{}add_block\".format(peer)\n headers = {'Content-Type': \"application/json\"}\n requests.post(url,\n data=json.dumps(block.__dict__, sort_keys=True),\n headers=headers)", "def put_block(self):\n self.blocks[self.editor_cursor_position[1]][\n self.editor_cursor_position[0]] = self.available_block_types[self.current_block_type]", "def render(self): # pragma: no cover\n from graphviz import Digraph\n dot = Digraph(name=\"top\")\n for block in self.blocks:\n if isinstance(block, Branch):\n label = \"if \" + astor.to_source(block.cond)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"invhouse\"})\n elif isinstance(block, Yield):\n label = astor.to_source(block.value)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"oval\"})\n elif isinstance(block, BasicBlock):\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n elif isinstance(block, HeadBlock):\n label = \"Initial\"\n dot.node(str(id(block)) + \"_start\", label.rstrip(), {\"shape\": \"doublecircle\"})\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.initial_statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n dot.edge(str(id(block)) + \"_start\", str(id(block)))\n else:\n raise NotImplementedError(type(block))\n # for source, sink, label in self.edges:\n for sink, label in block.outgoing_edges:\n dot.edge(str(id(block)), str(id(sink)), label)\n\n\n file_name = tempfile.mktemp(\"gv\")\n dot.render(file_name, view=True)\n # with open(\"cfg.dot\", \"w\") as file:\n # file.write(dot.source)\n # exit()", "def announce_new_block(block):\n for peer in peers:\n url = \"{}/add_block\".format(peer)\n headers = {'Content-Type': \"application/json\"}\n requests.post(url,\n data=json.dumps(block.__dict__, sort_keys=True),\n headers=headers)", "def to_basic_block(self):\n return _spacegrant_swig.ax25_pdu_packer_sptr_to_basic_block(self)", "def block(self, text, head_offset=0):\n if not self.lite:\n tre = '|'.join(self.btag)\n else:\n tre = '|'.join(self.btag_lite)\n text = text.split('\\n\\n')\n\n tag = 'p'\n atts = cite = graf = ext = ''\n c1 = ''\n\n out = []\n\n anon = False\n for line in text:\n pattern = r'^(%s)(%s%s)\\.(\\.?)(?::(\\S+))? (.*)$' % (\n tre, self.align_re, self.c\n )\n match = re.search(pattern, line, re.S)\n if match:\n if ext:\n out.append(out.pop() + c1)\n\n tag, atts, ext, cite, graf = match.groups()\n h_match = re.search(r'h([1-6])', tag)\n if h_match:\n head_level, = h_match.groups()\n tag = 'h%i' % max(1, min(int(head_level) + head_offset, 6))\n o1, o2, content, c2, c1, eat = self.fBlock(tag, atts, ext,\n cite, graf)\n # leave off c1 if this block is extended,\n # we'll close it at the start of the next block\n\n if ext:\n line = \"%s%s%s%s\" % (o1, o2, content, c2)\n else:\n line = \"%s%s%s%s%s\" % (o1, o2, content, c2, c1)\n\n else:\n anon = True\n if ext or not re.search(r'^\\s', line):\n o1, o2, content, c2, c1, eat = self.fBlock(tag, atts, ext,\n cite, line)\n # skip $o1/$c1 because this is part of a continuing\n # extended block\n if tag == 'p' and not self.hasRawText(content):\n line = content\n else:\n line = \"%s%s%s\" % (o2, content, c2)\n else:\n line = self.graf(line)\n\n line = self.doPBr(line)\n if self.html_type == 'xhtml':\n line = re.sub(r'<br>', '<br />', line)\n\n if self.html_type == 'html':\n line = re.sub(r'<br />', '<br>', line)\n\n if ext and anon:\n out.append(out.pop() + \"\\n\" + line)\n elif not eat:\n out.append(line)\n\n if not ext:\n tag = 'p'\n atts = ''\n cite = ''\n graf = ''\n\n if ext:\n out.append(out.pop() + c1)\n return '\\n\\n'.join(out)", "def name(self):\r\n return self.block_id", "def to_basic_block(self):\n return _spacegrant_swig.udp_debug_sptr_to_basic_block(self)", "def addBlock(self, aBlock: gp.Block):\n\n if self.blocks[aBlock.y][aBlock.x] != None:\n raise MovementError('game board space not empty')\n self.blocks[aBlock.y][aBlock.x] = aBlock\n self.groundSprites.append(aBlock.sprite)", "def populate_block(self, block, additional_options=None):\n raise NotImplementedError(\n \"SurrogateModel class has not implemented populate_block method.\"\n )", "def check_block(self, block):\n pass", "def list_blocks(self, _):\n print(self.data.name)", "def from_etree(elem):\n assert elem.tag == \"block\", elem.tag\n\n # Create the block with basic attributes\n block = Block(name=elem.attrib[\"name\"], instance=elem.attrib[\"instance\"], mode=elem.get(\"mode\", \"default\"))\n\n # Parse ports\n rotation_maps = {}\n for tag in [\"inputs\", \"outputs\", \"clocks\"]:\n port_type = tag[:-1]\n\n xml_ports = elem.find(tag)\n if xml_ports is not None:\n for xml_port in xml_ports:\n # Got a port rotation map\n if xml_port.tag == \"port_rotation_map\":\n port_name = xml_port.attrib[\"name\"]\n rotation = xml_port.text\n\n # Parse the map\n rotation_map = {}\n for i, j in enumerate(rotation.strip().split()):\n if j != \"open\":\n rotation_map[i] = int(j)\n\n # Store it to be later associated with a port\n rotation_maps[port_name] = rotation_map\n\n # Got a port\n else:\n port = Port.from_etree(xml_port, port_type)\n block.ports[port.name] = port\n\n # Associate rotation maps with ports\n for port_name, rotation_map in rotation_maps.items():\n assert port_name in block.ports, port_name\n block.ports[port_name].rotation_map = rotation_map\n\n # Recursively parse sub-blocks\n for xml_block in elem.findall(\"block\"):\n sub_block = Block.from_etree(xml_block)\n\n sub_block.parent = block\n block.blocks[sub_block.instance] = sub_block\n\n # Parse attributes and parameters\n for tag, data in zip([\"attributes\", \"parameters\"], [block.attributes, block.parameters]):\n # Find the list\n xml_list = elem.find(tag)\n if xml_list is not None:\n # Only a leaf block can have attributes / parameters\n assert block.is_leaf, \"Non-leaf block '{}' with {}\".format(block.instance, tag)\n\n # Parse\n sub_tag = tag[:-1]\n for xml_item in xml_list.findall(sub_tag):\n data[xml_item.attrib[\"name\"]] = xml_item.text\n\n return block", "def block_offsets(self):\n ...", "def __init__(self, pool_guid, txg, timestamp):\n super(ZFSUberBlockEvent, self).__init__(pool_guid, txg, \\\n \"ZFS-uberblock\", self.DATA_TYPE, timestamp)", "def begin():\n return BeginBlock()", "def add_blocker(self, face):\n new_blocker = blocker_string(face, self.relative_level)\n if new_blocker not in self.blockers:\n self.blockers.append(new_blocker)", "def __write_block(self, block, data):\n if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_EASY_FRAMING, True) < 0:\n raise Exception(\"Error setting Easy Framing property\")\n if len(data) > 16:\n raise ValueError(\"Data value to be written cannot be more than 16 characters.\")\n abttx = (ctypes.c_uint8 * 18)()\n abttx[0] = self.MC_WRITE\n abttx[1] = block\n abtrx = (ctypes.c_uint8 * 250)()\n for i in range(16):\n abttx[i + 2] = ord((data + \"\\x00\" * (16 - len(data)))[i])\n return nfc.nfc_initiator_transceive_bytes(self.__device, ctypes.pointer(abttx), len(abttx),\n ctypes.pointer(abtrx), len(abtrx), 0)", "def createFirstBlock(self):\n firstBlock = Block(0, self.__currentTransactionsList, 0, '00')\n self.__chain.append(firstBlock)", "def create_entry_basic_block(self):\n bb = BasicBlock(self)\n self.basic_blocks.insert(0, bb)\n return bb", "def _create_xblock(self, parent_loc, xblock_desc):\r\n create_payload = {\r\n 'category': xblock_desc.category,\r\n 'display_name': xblock_desc.display_name,\r\n }\r\n\r\n if parent_loc is not None:\r\n create_payload['parent_locator'] = parent_loc\r\n\r\n # Create the new XBlock\r\n response = self.session.post(\r\n STUDIO_BASE_URL + '/xblock/',\r\n data=json.dumps(create_payload),\r\n headers=self.headers,\r\n )\r\n\r\n if not response.ok:\r\n msg = \"Could not create {0}. Status was {1}\".format(xblock_desc, response.status_code)\r\n raise CourseFixtureError(msg)\r\n\r\n try:\r\n loc = response.json().get('locator')\r\n\r\n except ValueError:\r\n raise CourseFixtureError(\"Could not decode JSON from '{0}'\".format(response.content))\r\n\r\n # Configure the XBlock\r\n response = self.session.post(\r\n STUDIO_BASE_URL + '/xblock/' + loc,\r\n data=xblock_desc.serialize(),\r\n headers=self.headers,\r\n )\r\n\r\n if response.ok:\r\n return loc\r\n else:\r\n raise CourseFixtureError(\r\n \"Could not update {0}. Status code: {1}\".format(\r\n xblock_desc, response.status_code))", "def put_elem_blk_info(self, id, elemType, numElems, numNodesPerElem,\n numAttrsPerElem):\n assert numElems <= self._f.dimensions[\"num_elem\"], \\\n \"Canont have more elements in the block then globally set.\"\n assert numAttrsPerElem == 0, \"Must be 0 for now.\"\n\n # So the logic is as follows. `eb_status` keeps track of which\n # element ids have already been assigned. We find the first that is\n # not zero and that is the actual index of the the element block.\n status = self._f._variables[\"eb_status\"][:]\n assert 0 in status, \"All element blocks already set.\"\n idx = np.argwhere(status == 0)[0][0] + 1\n\n num_el_name = \"num_el_in_blk%i\" % idx\n num_node_per_el_name = \"num_nod_per_el%i\" % idx\n var_name = \"connect%i\" % idx\n\n self._f.dimensions[num_el_name] = numElems\n self._f.dimensions[num_node_per_el_name] = numNodesPerElem\n\n self._f.create_variable(\n var_name, (num_el_name, num_node_per_el_name),\n dtype=np.int32, **self._comp_opts)\n self._f.variables[var_name].attrs['elem_type'] = np.string_(elemType)\n\n # Set the status and thus \"claim\" the element block id.\n self._f.variables['eb_status'][idx - 1] = 1\n # For some reason this is always eb_prop1.\n self._f.variables['eb_prop1'][idx - 1] = id", "def __init__(self, edid, start_index):\n self._block = edid[start_index:(start_index + 3)]", "def render_block(data):\n\tsnippet = data[2] \n\ttitle = data[0]['name']\n\tdescription = data[0]['description']\n\tblock_type = data[0]['type']\n\t\n\n\t# change the panel outline for\n\t# warnings and detections\n\tblock_border = 'yellow' if block_type == 'warning' else 'red1'\n\n\tcode_snippet = Syntax(\n\t\t\t\t\t\tsnippet, \n\t\t\t\t\t\tSYNTAX, \n\t\t\t\t\t\ttheme=THEME, \n\t\t\t\t\t\tline_numbers=True, \n\t\t\t\t\t\tstart_line=data[1]\n\t\t\t\t\t)\n\n\tdescription_txt = Markdown(\n\t\t\tf\"\"\" ## Explanation \\n {description} \"\"\",\n\t\t\tinline_code_lexer=SYNTAX,\n\t\t\tinline_code_theme=THEME,\n\t\t)\n\t\n\tcomponents = RenderGroup(\n\t\t\t\t\tcode_snippet,\n\t\t\t\t\tdescription_txt\n\t\t\t\t)\n\t\n\tblock = Panel(\n\t\t\tcomponents,\n\t\t\ttitle=f'[b white]{title}',\n\t\t\twidth=60,\n\t\t\tborder_style=block_border\n\t\t)\n\n\t# render\n\tprint('\\n')\n\tprint(block)", "def move_block():\n animate(block, 'bounce_end', duration=1, pos=next(block_positions))", "def blockSignal(self, sig):\n block = sig[\"message\"][\"block\"]\n acct = self.selectedAccount\n for newTx in block[\"Tx\"]:\n txid = newTx[\"TxID\"]\n # only grab the tx if its a transaction we care about.\n if acct.caresAboutTxid(txid):\n tx = self.blockchain.tx(txid)\n acct.confirmTx(tx, self.blockchain.tipHeight)\n # \"Spendable\" balance can change as utxo's mature, so update the \n # balance at every block.\n self.signals.balance(acct.calcBalance(self.blockchain.tipHeight))", "def to_basic_block(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_to_basic_block(self)", "def fBlock(self, tag, atts, ext, cite, content):\n att = atts\n atts = self.pba(atts)\n o1 = o2 = c2 = c1 = ''\n eat = False\n\n if tag == 'p':\n # is this an anonymous block with a note definition?\n notedef_re = re.compile(r\"\"\"\n ^note\\# # start of note def marker\n ([^%%<*!@#^([{ \\s.]+) # !label\n ([*!^]?) # !link\n (%s) # !att\n \\.? # optional period.\n [\\s]+ # whitespace ends def marker\n (.*)$ # !content\"\"\" % (self.c), re.X)\n notedef = notedef_re.sub(self.fParseNoteDefs, content)\n\n # It will be empty if the regex matched and ate it.\n if '' == notedef:\n return o1, o2, notedef, c2, c1, True\n\n m = re.search(r'fn(\\d+)', tag)\n if m:\n tag = 'p'\n if m.group(1) in self.fn:\n fnid = self.fn[m.group(1)]\n else:\n fnid = m.group(1)\n\n # If there is an author-specified ID goes on the wrapper & the\n # auto-id gets pushed to the <sup>\n supp_id = ''\n\n # if class has not been previously specified, set it to \"footnote\"\n if atts.find('class=') < 0:\n atts = atts + ' class=\"footnote\"'\n\n # if there's no specified id, use the generated one.\n if atts.find('id=') < 0:\n atts = atts + ' id=\"fn%s\"' % fnid\n else:\n supp_id = ' id=\"fn%s\"' % fnid\n\n if att.find('^') < 0:\n sup = self.formatFootnote(m.group(1), supp_id)\n else:\n fnrev = '<a href=\"#fnrev%s\">%s</a>' % (fnid, m.group(1))\n sup = self.formatFootnote(fnrev, supp_id)\n\n content = sup + ' ' + content\n\n if tag == 'bq':\n cite = self.checkRefs(cite)\n if cite:\n cite = ' cite=\"%s\"' % cite\n else:\n cite = ''\n o1 = \"\\t<blockquote%s%s>\\n\" % (cite, atts)\n o2 = \"\\t\\t<p%s>\" % atts\n c2 = \"</p>\"\n c1 = \"\\n\\t</blockquote>\"\n\n elif tag == 'bc':\n o1 = \"<pre%s>\" % atts\n o2 = \"<code%s>\" % atts\n c2 = \"</code>\"\n c1 = \"</pre>\"\n content = self.shelve(self.encode_html(content.rstrip(\"\\n\") +\n \"\\n\"))\n\n elif tag == 'notextile':\n content = self.shelve(content)\n o1 = o2 = ''\n c1 = c2 = ''\n\n elif tag == 'pre':\n content = self.shelve(self.encode_html(content.rstrip(\"\\n\") +\n \"\\n\"))\n o1 = \"<pre%s>\" % atts\n o2 = c2 = ''\n c1 = '</pre>'\n\n elif tag == '###':\n eat = True\n\n else:\n o2 = \"\\t<%s%s>\" % (tag, atts)\n c2 = \"</%s>\" % tag\n\n if not eat:\n content = self.graf(content)\n else:\n content = ''\n return o1, o2, content, c2, c1, eat", "def position_at_beginning(self, block):\n self._curblock = block\n self._lastop = 'head'", "def createBlock(self, block: ghidra.program.model.mem.MemoryBlock, name: unicode, start: ghidra.program.model.address.Address, length: long) -> ghidra.program.model.mem.MemoryBlock:\n ...", "def __str__(self):\n return 'MLBlock - {}'.format(self.name)", "def mark_position(self, node):\n if self.block:\n src_descr = self.source_descr\n pos = (src_descr,) + getpos(node)\n self.block.positions.add(pos)", "def nextblock(self, parent=None):\n block = ControlBlock()\n self.blocks.add(block)\n if parent:\n parent.add_child(block)\n elif self.block:\n self.block.add_child(block)\n self.block = block\n return self.block", "def add_peg(self, peg):\n if self.current_index >= 4:\n return\n self.line[self.current_index] = peg\n self.current_index += 1" ]
[ "0.6299285", "0.62494606", "0.5948382", "0.5869707", "0.5861398", "0.57957786", "0.5764005", "0.57609606", "0.5754582", "0.57317865", "0.56770635", "0.56770635", "0.56311584", "0.55958897", "0.55761683", "0.5558237", "0.5510801", "0.55058366", "0.54752433", "0.54671407", "0.54580176", "0.5440626", "0.54314053", "0.5418523", "0.5417509", "0.53666776", "0.53588283", "0.53572375", "0.53554875", "0.53485113", "0.5312467", "0.5310091", "0.5307282", "0.5300478", "0.52724284", "0.52634656", "0.5237517", "0.5234177", "0.522796", "0.5223758", "0.5223132", "0.5214156", "0.5205767", "0.5202647", "0.51996964", "0.51885223", "0.51519054", "0.51457924", "0.5138529", "0.51371", "0.5124933", "0.51209635", "0.51160455", "0.51156235", "0.5112892", "0.51120806", "0.5108971", "0.5104064", "0.5101326", "0.50979936", "0.5094127", "0.5090005", "0.50714815", "0.5067705", "0.5065084", "0.5064832", "0.50565875", "0.5054447", "0.5047773", "0.5047074", "0.5037199", "0.50306743", "0.5030403", "0.5021113", "0.50182164", "0.5016517", "0.50104105", "0.49919406", "0.49883837", "0.49845362", "0.4981705", "0.49793184", "0.49758282", "0.49725565", "0.49684528", "0.49669594", "0.4962709", "0.49621442", "0.49618486", "0.4955999", "0.49546498", "0.49498942", "0.4947383", "0.4946685", "0.4939259", "0.4932841", "0.49286446", "0.4926461", "0.49230978", "0.49223688", "0.49207133" ]
0.0
-1
World containing a box with a lid, and a peg to open it.
def __init__(self, env, xml_config, box_lid_pos=0., peg_pos=[0.75, 0.1, 0.1], peg_delta_range=[0., 0.]): self._env = env self._xml_config = xml_config self._box_lid_pos = box_lid_pos self._peg_pos = peg_pos self._peg_delta_range = peg_delta_range # Manually specify what objects exist for each xml config #TODO: add support for dm_control.mjcf self._boxes = [] self._pegs = [] if self._xml_config == 'default' or self._xml_config == 'task': box = BoxWithLid( env=self._env, initial_lid_pos=self._box_lid_pos, name='box') self._boxes.append(box) peg = BlockPeg( env=self._env, initial_pos=self._peg_pos, random_delta_range=self._peg_delta_range, name='peg') self._pegs.append(peg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def room_with_box():\n\troom_material = pra.Material(energy_absorption=0.6, scattering=None)\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# define obstacle\n\tobstacle_faces = make_polygon(\n\t\tcentre=[2.5,0,2.5],\n\t\tradius=1.8,\n\t\theight=3,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4],\n\t\treverse_normals=True\n\t)\n\tobstacle_material = pra.Material(energy_absorption=0.1, scattering=0.1)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_material))\n\twalls.extend(create_walls(obstacle_faces, obstacle_material))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)\n\n\troom.add_source([0, 0, 2.])\n\troom.add_microphone([0, 0.2, 2.1])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.compute_rir()\n\n\treturn room", "def create_room(room):\n global map\n for x in range(room.x1+1, room.x2):\n for y in range(room.y1+1, room.y2):\n map[x][y].blocked = False\n map[x][y].block_sight = False", "def house(x, y):\r\n # roof\r\n triangle(x + 15, y, x, y + 15, x + 30, y + 15)\r\n # bottom of the house\r\n rect(x, y + 15, 30, 30)\r\n # door\r\n rect(x + 12, y + 30, 10, 15)", "def door_world(self):\n maze = self.create_maze_world(height= 6, width = 9)\n for i in range(maze.dims[0]):\n if i is not 3:\n maze.add_wall( (i, 6), \"W\")\n for j in range(6):\n if j is not 0:\n maze.add_wall( (2 , j), \"N\")\n maze.add_wall((2,2), \"E\")\n maze.add_wall((0,2), \"E\")\n return maze", "def world():\n bgcolor('black')\n path.color('blue')\n\n for index in range(len(tiles)):\n tile = tiles[index]\n \"\"\"\n Si estamos en un cuadro valido lo dibujamos en azul \n y ponemos el punto blanco\n \"\"\"\n if tile > 0:\n x = (index % 20) * 20 - 200\n y = 180 - (index // 20) * 20\n square(x, y)\n\n if tile == 1:\n path.up()\n path.goto(x + 10, y + 10)\n path.dot(2, 'white')", "def make_world(self):\n raise NotImplementedError()", "def createWireBox(node):\n\n #create an empty bounding box\n bb = osg.BoundingBox()\n\n #if we have a geode, expand by the drawables bounding box, else use the bounding sphere\n geode = osg.NodeToGeode(node)\n if geode:\n print \"geode found\" \n for i in range(geode.getNumDrawables()):\n dwb = geode.getDrawable(0)\n bb.expandBy(dwb.getBound());\n else:\n bb.expandBy(node.getBound()) \n \n center = node.getBound().center()\n \n #create a geode for the wirebox\n wbgeode = osg.Geode()\n wbgeode.setName(\"ExtentsGeode\")\n\n #create a stateset for the wirebox\n stateset = osg.StateSet()\n wbgeode.setStateSet(stateset)\n# stateset.thisown = 0 \n\n #create a polygonmode state attribute\n polyModeObj = osg.PolygonMode()\n polyModeObj.setMode(osg.PolygonMode.FRONT_AND_BACK, osg.PolygonMode.LINE)\n stateset.setAttribute(polyModeObj)\n \n #create a linewidth state attribute\n lw = osg.LineWidth()\n lw.setWidth(2.0)\n stateset.setAttribute(lw)\n \n stateset.setAttribute(selmat)\n\n #create a drawablw box with the right position and size\n lx = bb._max.x() - bb._min.x()\n ly = bb._max.y() - bb._min.y()\n lz = bb._max.z() - bb._min.z()\n box = osg.Box(center, lx, ly, lz)\n shape = osg.ShapeDrawable(box)\n #shape.setColor(osg.Vec4(1.0, 0.0, 0.0, 1.0))\n \n #add the drawable to the wirebox geode\n wbgeode.addDrawable(shape)\n\n for pointer in [stateset, box, polyModeObj, lw, shape]:\n pointer.thisown = False\n\n #return the wirebox geode\n return wbgeode", "def place_object(self, thing):\n color = [i * 255 for i in thing.color.rgb]\n size = (20, 20)\n if thing.name == \"luna\":\n size = (5, 5)\n if self.is_visible(thing.position, max(size)):\n position = self.get_position(thing.position, size)\n pygame.draw.ellipse(self.screen, color, (position, size))", "def front_wall(self):\n self.place = \"bed\"\n print(\"You are infront of the bed.\"\n \"You look under it and find a notebook.\")\n nb = Notebook('notebook')\n nb.clue()", "def klyubin_world(self):\n maze = self.create_maze_world(10,10)\n # wall A\n for i in range(6):\n maze.add_wall( (1, i), \"N\" )\n # wall B & D\n for i in range(2):\n maze.add_wall( (i+2, 5), \"E\")\n maze.add_wall( (i+2, 6), \"E\")\n # wall C\n maze.add_wall( (3, 6), \"N\")\n # wall E\n for i in range(2):\n maze.add_wall( (1, i+7), \"N\")\n # wall F\n for i in range(3):\n maze.add_wall( (5, i+2), \"N\")\n # wall G\n for i in range(2):\n maze.add_wall( (i+6, 5), \"W\")\n # walls HIJK\n maze.add_wall( (6, 4), \"N\")\n maze.add_wall( (7, 4), \"N\")\n maze.add_wall( (8, 4), \"W\")\n maze.add_wall( (8, 3), \"N\")\n return maze", "def add_box(self, l, w, h, x, y, z, comment=\"\"):\n self.data['shape']['compound'].append({'box': {'#': comment, 'pose': {'x': x, 'y': y, 'z': z},\n 'size': {'x': l, 'y': w, 'z': h}}})", "def define_box_location(self):\n self.contents['Box_ID'] = np.ones(self.numatom) * self.num_box", "def create_box(world, space, density, lx, ly, lz):\r\n\r\n\t\t# Create body\r\n\t\tbody = ode.Body(world)\r\n\t\tM = ode.Mass()\r\n\t\tM.setBox(density, lx, ly, lz)\r\n\t\tbody.setMass(M)\r\n\r\n\t\t# Set parameters for drawing the body\r\n\t\tbody.shape = \"box\"\r\n\t\tbody.boxsize = (lx, ly, lz)\r\n\r\n\t\t# Create a box geom for collision detection\r\n\t\tgeom = ode.GeomBox(space, lengths = body.boxsize)\r\n\t\tgeom.setBody(body)\r\n\r\n\t\treturn body, geom", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def build_blocks():\n block_1 = GRect(375, 80, x=20, y=330)\n block_1.filled = True\n block_1.color = 'firebrick'\n block_1.fill_color = 'firebrick'\n window.add(block_1)\n block_2 = GRect(375, 80, x=405, y=330)\n block_2.filled = True\n block_2.color = 'steelblue'\n block_2.fill_color = 'steelblue'\n window.add(block_2)\n block_3 = GRect(375, 80, x=20, y=420)\n block_3.filled = True\n block_3.color = 'goldenrod'\n block_3.fill_color = 'goldenrod'\n window.add(block_3)\n block_4 = GRect(375, 80, x=405, y=420)\n block_4.filled = True\n block_4.color = 'forestgreen'\n block_4.fill_color = 'forestgreen'\n window.add(block_4)\n block_5 = GRect(60, 40, x=720, y=120)\n block_5.filled = True\n block_5.color = 'dodgerblue'\n block_5.fill_color = 'dodgerblue'\n window.add(block_5)\n circle_1 = GOval(90, 90, x=20, y=170)\n circle_1.filled = True\n circle_1.color = 'blueviolet'\n circle_1.fill_color = 'blueviolet'\n window.add(circle_1)", "def __init__(self, player, screen):\n\n # Call the parent constructor\n Level.__init__(self, player, screen)\n\n self.level_x_limit = -1380\n self.level_y_limit = 270\n\n\n # Array with type of platform, and x, y location of the platform.\n level = [[platforms.SAND_LONG_GROUND, 0, 500],\n [platforms.SAND_LONG_GROUND, 1431, 500],\n\n [platforms.SAND_PYRAMID_LONG, 900, 386],\n [platforms.SAND_PYRAMID_LONG, 1100, 273],\n [platforms.SAND_PYRAMID_LONG, 2200, 160],\n [platforms.SAND_PYRAMID_LONG, 2200, 57],\n [platforms.SAND_PYRAMID_LONG, 1400, -55],\n [platforms.SAND_PYRAMID_LONG, 1850, -168],\n [platforms.SAND_PYRAMID_LONG, 1850, -281],\n\n #be sure to place this in nonwalljump group\n [platforms.SAND_PYRAMID_LONG, 2178, 386],\n [platforms.SAND_PYRAMID_LONG, 2378, 273],\n [platforms.SAND_PYRAMID_LONG, 1500, -394]\n\n\n ]\n\n # Go through the array above and add platforms\n for platform in level:\n block = platforms.hubSandBits(platform[0])\n block.rect.x = platform[1]\n block.rect.y = platform[2]\n block.player = self.player\n self.platform_list.add(block)\n\n\n\n\n choosePort =[[platforms.PORTAL, -30, 350, 0],\n [platforms.PORTAL, 1556, -120, 2]\n ]\n\n for port in choosePort:\n wego = platforms.ChooseLev(port[0], port[3])\n wego.rect.x = port[1]\n wego.rect.y = port[2]\n wego.player = self.player\n self.platform_choose.add(wego)\n\n\n\n background = platforms.backgroundSandHub()\n background.rect.x = 0\n background.rect.y = 0\n self.decor.add(background)", "def spawn_orb(self):\n x_pos = random.randint(0, self.config.arena_size[0] - 1)\n y_pos = random.randint(0, self.config.arena_size[1] - 1)\n self.arena[x_pos][y_pos] = Tile.ORB", "def __init__(self, walker, box, box_index=0, detection_region=None):\n super(MoveWalkerToBox, self).__init__(walker)\n self._box = box\n self._detection_region = detection_region\n self._box_index = box_index\n self._walker_geoms = None", "def interaction_box(self) -> None:\n assert(0 <= self.target.x_obj+self.d_x <= self.grid.width and 0 <=\n self.target.y_obj+self.d_y <= self.grid.height)\n x_beyond_target = self.target.x_obj + self.d_x\n y_beyond_target = self.target.y_obj + self.d_y\n beyond_target = self.grid.obj_list[ # Object on which we could push the box\n x_beyond_target, y_beyond_target]\n if isinstance(beyond_target, ob.Void): # Simply pushing the box\n self.grid.obj_list.swap_obj(beyond_target, self.target)\n self.grid.obj_list.swap_obj(beyond_target, self.moving_character)\n elif isinstance(beyond_target, ob.Hole):\n if beyond_target.depth == 1:\n # Destroying box and hole\n void1 = ob.Void(self.target.x_obj, self.target.y_obj)\n void2 = ob.Void(x_beyond_target, y_beyond_target)\n self.grid.obj_list[self.target] = void1\n self.grid.obj_list[beyond_target] = void2\n # Then moving character\n self.grid.obj_list.swap_obj(void1, self.moving_character)\n else:\n # Reducing depth of the hole\n beyond_target.reduce_depth()\n # Destructing the box\n void = ob.Void(self.target.x_obj, self.target.y_obj)\n self.grid.obj_list[self.target] = void", "def __init__(self, c):\n self.coords = c[:4]\n self.center = c[4]\n\n # init width and height of block\n widthAndHeight(self)\n\n self.x = randrange((pyxel.width - self.width)/4)\n self.y = 0\n self.vy = 32\n self.falling = True\n\n # init random color\n self.color = randrange(2, 15)\n\n # Add block to posMap\n mapAdd(self, theFallen)\n\n self.frame = pyxel.frame_count", "def create_level(self, name):\n \n # Create a level object\n level = Level()\n size_y=8\n size_x=10\n # Separates static and non static parts\n # This will speed up network games, since only the non static part will be\n # sent on the network\n level_static = soya.World(level)\n \n # Load 3 materials (= textures) for files ./materials{grass|ground|snow}.data\n \n ground = soya.Material.get(\"block2\")\n \n \n # Creates a landscape, from the heighmap \"./images/map.png\"\n # The landscape is in the static part (=level_static), because it won't change along the game.\n land = soya.Land(level_static)\n land.y =0.0\n land.from_image(soya.Image.get(\"floor.png\"))\n \n # Sets how high is the landscape\n land.multiply_height(-0.0)\n \n # These values are trade of between quality and speed\n land.map_size = 8\n land.scale_factor = 1.5\n land.texture_factor = 1.0\n \n # Set the texture on the landscape, according to the height\n # (i.e. height 0.0 to 15.0 are textured with grass, ...)\n \n land.set_material_layer(ground, 0.0, 25.0)\n \n # squares where the player starts\n # Note that this is stored in physical, not abstract, coordinates.\n always_clear=[(-1,-1),(-2,-1),(0,-1),(-1,-2),(-1,0)]\n cube = soya.Shape.get(\"cube\")\n \n # r and c represent the cube positions in the grid,\n # while x and y represent the physical coordinates in the world.\n # Note the simple formula: r = x + self.size_x , c = y + self.size_y\n border_row, border_col = 2*size_x - 2, 2*size_y - 2\n for r, x in enumerate(range(-size_x,size_x-1)):\n for c, y in enumerate(range(-size_y,size_y-1)):\n bx = x +128\n by = y +128 \n if (r % 2 == 0 and c % 2 == 0) or \\\n (r == 0 or c == 0 or r == border_row or c == border_col ):\n # This is a wall block\n block = soya.Volume(level_static, cube)\n block.scale(1.0, 1.0, 1.0)\n block.set_xyz(bx, 0.5, by) \n elif random() < 0.8 and not (x, y) in always_clear:\n # A soft block\n block = SoftBox()\n level.add_mobile(block)\n block.scale(1.0, 1.0,1.0)\n block.set_xyz(bx, 0.5, by)\n \n # Creates a light in the level, similar to a sun (=a directional light)\n sun = soya.Light(level_static)\n sun.directional = 1\n sun.diffuse = (1.0, 0.8, 0.4, 1.0)\n sun.rotate_vertical(-45.0)\n \n # Creates a sky atmosphere, with fog\n atmosphere = soya.SkyAtmosphere()\n atmosphere.ambient = (0.3, 0.3, 0.4, 1.0)\n atmosphere.fog = 1\n atmosphere.fog_type = 0\n atmosphere.fog_start = 40.0\n atmosphere.fog_end = 50.0\n atmosphere.fog_color = atmosphere.bg_color = (0.2, 0.5, 0.7, 1.0)\n atmosphere.skyplane = 1\n atmosphere.sky_color = (1.5, 1.0, 0.8, 1.0)\n \n # Set the atmosphere to the level\n level.atmosphere = atmosphere\n \n # Save the level as \"./worlds/level_demo.data\" (remember, levels are subclasses of worlds)\n level_static.filename = level.name = name+\"_bbomber_static\"\n level_static.save()\n level.filename = level.name = name+\"_bbomber\"\n level.save()", "def test_point_in_room(rectangle,big_area):\n new_room = Room(rectangle, 0, 1, 1, big_area, 'bathroom')\n point = (0, 2, 2)\n assert new_room.contains_point(point[0], point[1], point[2]) is True", "def mk_dungeon(self, bounding_box, depth=0):\n print \"%s\" % repr(bounding_box)\n edge_buffer = self.edge_min + self.room_min\n room = None\n if ((depth >= self.max_depth)\n or (bounding_box.top + edge_buffer\n > bounding_box.bottom - edge_buffer)\n or (bounding_box.left + edge_buffer\n > bounding_box.right - edge_buffer)):\n room = self.mk_room(bounding_box)\n return room\n\n is_vertical = bool(random.randint(0, 1))\n if is_vertical:\n split = random.randint(bounding_box.left + edge_buffer,\n bounding_box.right - edge_buffer)\n box_1 = Box(bounding_box.top, bounding_box.left,\n bounding_box.bottom, split)\n box_2 = Box(bounding_box.top, split, bounding_box.bottom,\n bounding_box.right)\n else:\n # horizontal split\n split = random.randint(bounding_box.top + edge_buffer,\n bounding_box.bottom - edge_buffer)\n box_1 = Box(bounding_box.top, bounding_box.left, split,\n bounding_box.right)\n box_2 = Box(split, bounding_box.left, bounding_box.bottom,\n bounding_box.right)\n # Room 2 will always be down or right from room 1\n room_1 = self.mk_dungeon(box_1, depth + 1)\n room_2 = self.mk_dungeon(box_2, depth + 1)\n\n # Now we have two \"rooms\" (which may be sub-rooms connected by a\n # corridor), and we need to connect them.\n\n # First see if they share an edge\n\n # print self\n try:\n self.line_connection(room_1, room_2, split, is_vertical)\n except ValueError:\n self.bend_connection(room_1, room_2)\n # print self\n return Box(\n min(room_1.top, room_2.top),\n min(room_1.left, room_2.left),\n max(room_1.bottom, room_2.bottom),\n max(room_1.right, room_2.right)\n )", "def __init__(self, size):\n self.world = [[None for y in range(size)] for x in range(size)]", "def __init__(self, world, location, elevation):\n LandCell.__init__(self, world, location, elevation)", "def __init__(self, game, world_file):\n self.game = game\n self.world_file = world_file\n self.floor_batch = game.floor_batch\n self.wall_batch = game.wall_batch\n self.lightmap = LightMap()\n self.tiles = {}\n self.load_world()\n self.load_tileset()\n self.player_light = self.lightmap.add_light(0,0,15)", "def theRoof(pos, blockTypeMain = wool , mainColor=wPurple, replaceGlass = wGlass):\n \n # try again the same trick to add the roof\n # Middle part\n for i in range(0,12,1):\n iy = i\n if i >= 6:\n iy=11-i\n #print i, iy\n mc.setBlocks(pos.x-4+i, pos.y+10+iy, pos.z+4,\n pos.x-4+i, pos.y+10+iy, pos.z+29, blockTypeMain, mainColor)\n\n # RIGHT SIDE of the house\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+5+ii,\n pos.x-13+ii, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+8,\n pos.x-11+ii, pos.y+9+ii, pos.z+26-ii, material)\n \n # and LEFT side of the house\n xAdjust = 21\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5-ii+xAdjust, pos.y+9+ii, pos.z+5+ii,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-7-ii+xAdjust, pos.y+9+ii, pos.z+8,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+26-ii, material)", "def lobby():\n return UncheckedPlace(\"Lobby\")", "def neopentane():\n coords = [\n [0.000000, 0.0, 0.0],\n [0.881905, 0.881905, 0.881905],\n [-0.881905, -0.881905, 0.881905],\n [0.881905, -0.881905, -0.881905],\n [-0.881905, 0.881905, -0.881905],\n [-1.524077, 0.276170, -1.524077],\n [1.524077, 1.524077, 0.276170],\n [1.524077, -0.276170, -1.524077],\n [1.524077, 0.276170, 1.524077],\n [-1.524077, -0.276170, 1.524077],\n [1.524077, -1.524077, -0.276170],\n [-0.276170, 1.524077, -1.524077],\n [0.276170, 1.524077, 1.524077],\n [0.276170, -1.524077, -1.524077],\n [-0.276170, -1.524077, 1.524077],\n [-1.524077, 1.524077, -0.276170],\n [-1.524077, -1.524077, 0.276170],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def main():\n window = GWindow(width=670, height=650, title='Minion Sticker')\n\n # the left hair\n hair_1 = GLine(275, 120, 290, 170)\n hair_1.color = 'black'\n window.add(hair_1)\n\n # the middle hair\n hair_2 = GLine(330, 100, 332, 170)\n hair_2.color = 'black'\n window.add(hair_2)\n\n # the right hair\n hair_3 = GLine(390, 120, 360, 170)\n hair_3.color = 'black'\n window.add(hair_3)\n\n # the head\n head = GOval(200, 200, x=230, y=150)\n head.filled = True\n head.fill_color = 'yellow'\n head.color = 'yellow'\n window.add(head)\n\n # the overalls\n pants = GOval(200, 200, x=230, y=320)\n pants.filled = True\n pants.fill_color = 'cornflowerblue'\n pants.color = 'cornflowerblue'\n window.add(pants)\n\n # the body\n body = GRect(200, 200, x=230, y=230)\n body.filled = True\n body.fill_color = 'yellow'\n body.color = 'yellow'\n window.add(body)\n\n # the pocket of the overalls\n pocket = GRect(120, 80, x=270, y=380)\n pocket.filled = True\n pocket.fill_color = 'cornflowerblue'\n pocket.color = 'cornflowerblue'\n window.add(pocket)\n\n # the left sling of the overalls\n l_sling = GPolygon()\n l_sling.add_vertex((230, 350))\n l_sling.add_vertex((270, 380))\n l_sling.add_vertex((270, 400))\n l_sling.add_vertex((230, 370))\n l_sling.filled = True\n l_sling.fill_color = 'cornflowerblue'\n l_sling.color = 'cornflowerblue'\n window.add(l_sling)\n\n # the right sling of the overalls\n r_sling = GPolygon()\n r_sling.add_vertex((390, 380))\n r_sling.add_vertex((430, 350))\n r_sling.add_vertex((430, 370))\n r_sling.add_vertex((390, 400))\n r_sling.filled = True\n r_sling.fill_color = 'cornflowerblue'\n r_sling.color = 'cornflowerblue'\n window.add(r_sling)\n\n # the left upper limb\n l_uphand = GPolygon()\n l_uphand.add_vertex((185, 390))\n l_uphand.add_vertex((230, 373))\n l_uphand.add_vertex((230, 388))\n l_uphand.add_vertex((185, 400))\n l_uphand.filled = True\n l_uphand.fill_color = 'yellow'\n l_uphand.color = 'yellow'\n window.add(l_uphand)\n\n # the left lower limb\n l_lowhand = GPolygon()\n l_lowhand.add_vertex((185, 390))\n l_lowhand.add_vertex((230, 425))\n l_lowhand.add_vertex((230, 440))\n l_lowhand.add_vertex((185, 400))\n l_lowhand.filled = True\n l_lowhand.fill_color = 'yellow'\n l_lowhand.color = 'yellow'\n window.add(l_lowhand)\n\n # the right upper limb\n r_uphand = GPolygon()\n r_uphand.add_vertex((430, 373))\n r_uphand.add_vertex((475, 390))\n r_uphand.add_vertex((475, 400))\n r_uphand.add_vertex((430, 388))\n r_uphand.filled = True\n r_uphand.fill_color = 'yellow'\n r_uphand.color = 'yellow'\n window.add(r_uphand)\n\n # the right lower limb\n r_lowhand = GPolygon()\n r_lowhand.add_vertex((430, 425))\n r_lowhand.add_vertex((475, 390))\n r_lowhand.add_vertex((475, 400))\n r_lowhand.add_vertex((430, 440))\n r_lowhand.filled = True\n r_lowhand.fill_color = 'yellow'\n r_lowhand.color = 'yellow'\n window.add(r_lowhand)\n\n # the left button on the overalls\n l_button = GOval(10, 10, x=270, y=385)\n l_button.filled = True\n window.add(l_button)\n\n # the right button on the overalls\n r_button = GOval(10, 10, x=380, y=385)\n r_button.filled = True\n window.add(r_button)\n\n # the left frame of the glasses\n l_frame = GRect(43, 25, x=230, y=240)\n l_frame.filled = True\n window.add(l_frame)\n\n # the right frame of the glasses\n r_frame = GRect(43, 25, x=387, y=240)\n r_frame.filled = True\n window.add(r_frame)\n\n # the glasses\n glasses = GOval(120, 120, x=270, y=190)\n glasses.filled = True\n glasses.fill_color = 'gray'\n glasses.color = 'gray'\n window.add(glasses)\n\n # the white eye\n white_eye = GOval(100, 100, x=280, y=200)\n white_eye.filled = True\n white_eye.fill_color = 'white'\n white_eye.color = 'white'\n window.add(white_eye)\n\n # the black eye\n black_eye = GOval(65, 65, x=300, y=220)\n black_eye.filled = True\n black_eye.fill_color = 'black'\n black_eye.color = 'black'\n window.add(black_eye)\n\n # the mouth\n mouth = GOval(100, 50, x=280, y=312)\n mouth.filled = True\n window.add(mouth)\n\n # the oval that covers the mouth to make the shape of mouse\n x_mouth = GOval(110, 35, x=280, y=311)\n x_mouth.color = 'yellow'\n x_mouth.filled = True\n x_mouth.fill_color = 'yellow'\n window.add(x_mouth)\n\n # the first left blush of the left blush\n l_blush1 = GLine(240, 300, 245, 315)\n l_blush1.color = 'deeppink'\n window.add(l_blush1)\n\n # the middle blush of the left blush\n l_blush2 = GLine(250, 300, 255, 315)\n l_blush2.color = 'deeppink'\n window.add(l_blush2)\n\n # the right blush of the left blush\n l_blush3 = GLine(260, 300, 265, 315)\n l_blush3.color = 'deeppink'\n window.add(l_blush3)\n\n # the first left blush of the right blush\n r_blush1 = GLine(395, 300, 400, 315)\n r_blush1.color = 'deeppink'\n window.add(r_blush1)\n\n # the middle blush of the right blush\n r_blush2 = GLine(405, 300, 410, 315)\n r_blush2.color = 'deeppink'\n window.add(r_blush2)\n\n # the right blush of the right blush\n r_blush3 = GLine(415, 300, 420, 315)\n r_blush3.color = 'deeppink'\n window.add(r_blush3)\n\n # the left leg\n l_leg = GPolygon()\n l_leg.add_vertex((275, 500))\n l_leg.add_vertex((315, 500))\n l_leg.add_vertex((310, 530))\n l_leg.add_vertex((280, 530))\n l_leg.color = \"cornflowerblue\"\n l_leg.filled = True\n l_leg.fill_color = \"cornflowerblue\"\n window.add(l_leg)\n\n # the right leg\n r_leg = GPolygon()\n r_leg.add_vertex((335, 500))\n r_leg.add_vertex((380, 500))\n r_leg.add_vertex((375, 530))\n r_leg.add_vertex((345, 530))\n r_leg.color = \"cornflowerblue\"\n r_leg.filled = True\n r_leg.fill_color = \"cornflowerblue\"\n window.add(r_leg)\n\n # the left shoe\n l_shoe = GRect(30, 20, x=280, y=530)\n l_shoe.filled = True\n l_shoe.fill_color = 'black'\n l_shoe.color = 'black'\n window.add(l_shoe)\n\n # the right shoe\n r_shoe = GRect(30, 20, x=345, y=530)\n r_shoe.filled = True\n r_shoe.fill_color = 'black'\n r_shoe.color = 'black'\n window.add(r_shoe)\n\n # the left foot\n l_feet = GOval(30, 20, x=264, y=530)\n l_feet.filled = True\n l_feet.fill_color = 'black'\n l_feet.color = 'black'\n window.add(l_feet)\n\n # the right foot\n r_feet = GOval(30, 20, x=360, y=530)\n r_feet.filled = True\n r_feet.fill_color = 'black'\n r_feet.color = 'black'\n window.add(r_feet)\n\n # the words show on the overalls\n stancode = GLabel('stanCode')\n stancode.color = 'white'\n stancode.font = '-20'\n window.add(stancode, 290, 420)\n\n # the greeting words!\n label = GLabel('Hi!')\n label.color = 'black'\n label.font = 'Dialog-50-bold-italic'\n window.add(label, 450, 220)", "def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)", "def create_world(self):\n for row in range(self.cell_row):\n for col in range(self.cell_col):\n x1 = col * self.cell_size\n y1 = row * self.cell_size\n x2 = x1 + self.cell_size\n y2 = y1 + self.cell_size\n\n if (self.world_status.now[row, col]):\n self.world[row, col] = self.canvas.create_rectangle(\n x1, y1, x2, y2,\n fill = self.color_alive,\n outline = \"gray\",\n tags = \"rect\")\n else:\n self.world[row, col] = self.canvas.create_rectangle(\n x1, y1, x2, y2,\n fill = self.color_dead,\n outline = \"gray\",\n tags = \"rect\")", "def open_tile(self, y, x):\n # Find the letter index and convert into a y-coordinate.\n # Checks if it is a mine\n if [y, x] in self.mine_locations:\n # explode\n self.show_answer_board([y, x])\n print \"Boomz.\"\n return Minesweeper.IS_A_BOMB\n else:\n # strip(?)tease to the user (oh damn sexy numbers)\n self.tease_user(y, x)\n return Minesweeper.NOT_A_BOMB", "def draw_box(self) -> None:\n from math import pi, sin, cos\n import pymol\n from pymol import cmd\n\n # Convert angle\n angle1 = (self.angle1.value() / 180.0) * pi\n angle2 = (self.angle2.value() / 180.0) * pi\n\n # Get positions of box vertices\n # P1\n x1 = -self.min_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y1 = -self.min_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z1 = self.min_x.value() * sin(angle2) + self.min_y.value() * sin(angle1) * cos(angle2) - self.min_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P2\n x2 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y2 = (-self.min_y.value()) * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z2 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P3\n x3 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y3 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z3 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P4\n x4 = (-self.min_x.value()) * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y4 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z4 = -(-self.min_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P5\n x5 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y5 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z5 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P6\n x6 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y6 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z6 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P7\n x7 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n\n y7 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n\n z7 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P8\n x8 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y8 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z8 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # Create box object\n pymol.stored.list = []\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.iterate(\"box\", \"stored.list.append((name, color))\", quiet=1)\n list_color = pymol.stored.list\n cmd.delete(\"box\")\n if len(list_color) > 0:\n for item in list_color:\n at_name = item[0]\n at_c = item[1]\n cmd.set_color(at_name + \"color\", cmd.get_color_tuple(at_c))\n else:\n for at_name in [\"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\", \"v8\", \"v1x\", \"v1y\", \"v1z\", \"v2x\", \"v3y\", \"v4z\"]:\n cmd.set_color(at_name + \"color\", [0.86, 0.86, 0.86])\n\n # Create vertices\n cmd.pseudoatom(\"box\", name=\"v2\", pos=[x2, y2, z2], color=\"v2color\")\n cmd.pseudoatom(\"box\", name=\"v3\", pos=[x3, y3, z3], color=\"v3color\")\n cmd.pseudoatom(\"box\", name=\"v4\", pos=[x4, y4, z4], color=\"v4color\")\n cmd.pseudoatom(\"box\", name=\"v5\", pos=[x5, y5, z5], color=\"v5color\")\n cmd.pseudoatom(\"box\", name=\"v6\", pos=[x6, y6, z6], color=\"v6color\")\n cmd.pseudoatom(\"box\", name=\"v7\", pos=[x7, y7, z7], color=\"v7color\")\n cmd.pseudoatom(\"box\", name=\"v8\", pos=[x8, y8, z8], color=\"v8color\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1x\", pos=[x1, y1, z1], color='red')\n cmd.pseudoatom(\"box\", name=\"v2x\", pos=[x2, y2, z2], color='red')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1y\", pos=[x1, y1, z1], color='forest')\n cmd.pseudoatom(\"box\", name=\"v3y\", pos=[x3, y3, z3], color='forest')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v4z\", pos=[x4, y4, z4], color='blue')\n cmd.pseudoatom(\"box\", name=\"v1z\", pos=[x1, y1, z1], color='blue')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")", "def __init__(self, width, height):\n self.width =width\n self.height = height\n self.box_width = width/self._BOXES_WIDE\n print 'box width: ', self.box_width\n self.box_height = height/self._BOXES_TALL\n\n self.tiles = []\n self.changes = set()\n y = 0\n for i in range(World._BOXES_TALL):\n y += self.box_height\n x = 0\n self.tiles.append([])\n for j in range(World._BOXES_WIDE):\n x += self.box_width\n tile = Tile(self.changes, x, y, self.box_width, self.box_height)\n self.tiles[i].append(tile)", "def box(self, x, y, w, h):\n\t\tpass", "def example_world():\n get_triangles = metis.geometry.box2d_triangles_from_shapely\n\n obstacle_geometry = shapely.geometry.box(0, 0, 10, 10)\n obstacle_geometry = obstacle_geometry.difference(\n obstacle_geometry.buffer(-.2))\n obstacle_geometry = obstacle_geometry.union(\n shapely.geometry.LineString([(5, 0), (5, 10)]).buffer(.1, cap_style=2))\n obstacle_geometry = obstacle_geometry.difference(\n shapely.geometry.Point(5, 2.5).buffer(1, cap_style=1))\n obstacle_geometry = obstacle_geometry.difference(\n shapely.geometry.Point(5, 7.5).buffer(1, cap_style=1))\n\n world = b2.world()\n obstacles = world.CreateStaticBody()\n for triangle in get_triangles(obstacle_geometry):\n _ = obstacles.CreateFixture(shape=triangle)\n\n agent = world.CreateDynamicBody()\n agent_geometry = shapely.geometry.Polygon([\n (2./3., 0.), (-1./3., .4), (-1./3., -.4)])\n for triangle in get_triangles(agent_geometry):\n _ = agent.CreateFixture(shape=triangle)\n\n boxes = [world.CreateDynamicBody() for _ in xrange(2)]\n for box in boxes:\n box.CreateFixture(shape=b2.polygonShape(box=(.8, .8)))\n\n bodies = {'robot': agent, 'box1': boxes[0], 'box2': boxes[1]}\n sample_configuration = {\n 'robot': (1, 2, 0), 'box1': (3, 2, -.2), 'box2': (5, 2.5, 0.1)}\n\n return world, bodies, sample_configuration", "def create_room(self):\n # iterate through array of room types\n rooms = []\n prob_block_5_list = []\n prob_block_6_list = []\n\n for row in self.room_type:\n for col in row:\n rooms.append(self.import_template(col))\n # iterate through rooms to fill screen\n # this number will be part of how we find location of top left corner of room\n # based on 5x5 grid of rooms\n for pos in range(25):\n # this will iterate through the number of columns in array\n # the number y will be part of how we find where to place the block on the y axis (according to pygame.draw)\n for y in range(self.blocks_per_room_y):\n # this will iterate through the number of rows in array\n # the number x will be part of how we find where to place the block on the x axis (according to pygame.draw)\n for x in range(self.blocks_per_room_x):\n # if cell is a 1 add a platform sprite\n if rooms[pos][y][x] is 1:\n #check if platform has another above it for graphics\n if rooms[pos][y - 1][x] in (0, 3, 4, 7) and y - 1 >= 0:\n # the cases checked in each of these conditionals are the basic case that check surrounding blocks\n # to see what platform we should be using, the edge cases, such as if a block is at the edge of\n # the room, in which case we need to check the neighboring room (array in this case)\n\n #check conditions to see if we are using the sprite with with rounded edges on the bottom right and top right\n if ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'right', self.theme)\n #check conditionals to see if we are using the sprite with rounded edges on the bottom left and top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1)\\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corners on top left and top right\n elif ((x + 1) < self.blocks_per_room_x and (x - 1) >= 0 and rooms[pos][y][x + 1] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4))\\\n or (x is 0 and pos > 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] in (0, 3, 4) and rooms[pos][y][x + 1] in (0, 3, 4))\\\n or (x is self.blocks_per_room_x - 1 and pos < 24 and rooms[pos + 1][y][0] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4)):\n block = Platform(self.block_width, self.block_height, 'round top', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1) \\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 1 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top right\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top right', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n coord_x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.x = coord_x\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n #if the space above this block is empty see if we spawn an enemy on the spot above current block\n if rooms[pos][y-1][x] is 0 and y - 1 >= 0:\n self.enemy_generation(coord_x, self.block_height + (pos // 5) * self.room_side_length_y + (y - 1) * self.block_height)\n # if the cell is a 3 then it will be an item pickup\n elif rooms[pos][y][x] is 3:\n rand = random.randrange(0, 4)\n if rand == 0:\n #calculate coordinates of the bag\n bag = pickupSprite('rope')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 1:\n #calculate coordinates of the bag\n bag = pickupSprite('knife')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 2:\n bag = pickupSprite('health')\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n\n\n # if the cell is a 4 then it will be either a spike, if the space is on the bottom of the room,\n # otherwise it is a randomized block or nothing\n elif rooms[pos][y][x] is 4:\n # if the cell is at the bottom of the level, randomly choose whether to place a spike or not\n rand = random.randrange(0, 3)\n rand2 = random.randrange(0, 2)\n if y is 6 and rand is 1:\n spike = enemies.Spikes()\n spike.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n spike.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n spike.player = self.player\n self.enemy_list.add(spike)\n # elif y is 6 and rand is 2:\n # dart = enemies.Darts(self.theme, 'up')\n # dart.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n # dart.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n # dart.player = self.player\n # self.enemy_list.add(dart)\n elif y != 6 and rand2 is 0:\n if rooms[pos][y - 1][x] is 0:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n block.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n elif y != 6 and rand2 is 1:\n if x-1 >= 0 and x+1 <= self.blocks_per_room_x and y-1 >= 0 and y+1 < self.blocks_per_room_y:\n if rooms[pos][y][x-1] is 0:\n direction = 'left'\n blockType = 'middle'\n elif rooms[pos][y][x+1] is 0:\n direction = 'right'\n blockType = 'middle'\n elif rooms[pos][y-1][x] is 0:\n direction = 'up'\n blockType = 'top'\n elif rooms[pos][y+1][x] is 0:\n direction = 'down'\n blockType = 'middle'\n else:\n direction = None\n if direction is not None:\n # use for both block and dart\n rectX = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n rectY = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n\n block = Platform(self.block_width, self.block_height, blockType, self.theme)\n block.rect.x = rectX\n block.rect.y = rectY\n block.player = self.player\n self.platform_list.add(block)\n\n dart = enemies.Darts(self.theme, direction)\n dart.rect.x = rectX\n dart.rect.y = rectY\n dart.player = self.player\n self.enemy_list.add(dart)\n # this is the starting and ending points of the level\n elif rooms[pos][y][x] is 7:\n # exit of the game on the top row of the level\n if pos // 5 is 0:\n #calculate coordinates of the exit\n self.exit_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.exit_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n exit = exit_door_sprite(self.block_width, self.block_height)\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n exit.rect.x = self.exit_coords['x']\n exit.rect.y = self.exit_coords['y']\n exit.player = self.player\n self.exit_sprite.add(exit)\n #entance of the game on the bottom row of the level\n elif pos // 5 is 4:\n #calculate coordinates of the entrance\n self.entrance_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.entrance_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height", "def add_box(self, position_x=None, position_y=None, width=None,\n height=None, static=False, kinematic=False,\n density=None, 密度=None,\n 位置x=None, 位置y=None, 寬=None, 高=None, \n 固定=False, random_flag=False):\n\n if static or 固定 :\n box_body = pymunk.Body(body_type=pymunk.Body.STATIC)\n elif kinematic:\n box_body = pymunk.Body(body_type=pymunk.Body.KINEMATIC)\n else:\n box_body = pymunk.Body(body_type=pymunk.Body.DYNAMIC) \n\n \n tmp_width = 寬 if 寬 is not None else width\n if not random_flag:\n tmp_width = tmp_width if tmp_width is not None else self.config.SIZE_WIDTH\n else:\n tmp_width = tmp_width if tmp_width is not None else randint(*self.config.RAMDOM_SIZE_RANGE)\n\n if tmp_width <= 0: raise BoxException('新增方塊錯誤','寬(width)要大於0')\n \n tmp_height = 高 if 高 is not None else height\n if not random_flag:\n tmp_height = tmp_height if tmp_height is not None else self.config.SIZE_HEIGHT\n else:\n tmp_height = tmp_height if tmp_height is not None else randint(*self.config.RAMDOM_SIZE_RANGE)\n\n if tmp_height <= 0: raise BoxException('新增方塊錯誤','高(height)要大於0')\n\n box_shape = pymunk.Poly.create_box(box_body, (tmp_width, tmp_height) )\n\n tmp_density = 密度 if 密度 is not None else density\n if tmp_density is None:\n tmp_density = self.config.DENSITY\n box_shape.density = tmp_density\n \n box_shape.friction = self.config.FRICTION\n box_shape.elasticity = self.config.ELASTICITY\n \n box_shape.color = color.random() \n \n\n\n tmp_x = 位置x if 位置x is not None else position_x\n if not random_flag:\n tmp_x = tmp_x if tmp_x is not None else self.config.X\n else:\n tmp_x = tmp_x if tmp_x is not None else randint(*self.config.RANDOM_X_RANGE)\n\n tmp_y = 位置y if 位置y is not None else position_y\n if not random_flag:\n tmp_y = tmp_y if tmp_y is not None else self.config.Y\n else:\n tmp_y = tmp_y if tmp_y is not None else randint(*self.config.RANDOM_Y_RANGE)\n\n box_body.position = (tmp_x, tmp_y)\n\n if not random_flag:\n box_body.angle = 0\n else:\n box_body.angle = 3.1416 * 2 * random()\n\n if not random_flag:\n box_body.velocity = (0, 0)\n else:\n box_body.velocity = ( randint(*self.config.RANDOM_VELOCITY_RANGE),\n randint(*self.config.RANDOM_VELOCITY_RANGE) ) \n\n self.space.add(box_body, box_shape)\n return BodyShapeWrapper(box_body, box_shape)", "def home(xh, yh, h):\n rect(screen, (150, 75, 0), (xh, yh, 150 * h, 100 * h), 0) # house\n polygon(screen, (255, 0, 0), [(xh + 150 * h / 2, yh - 100 * h / 2), (xh, yh), (xh + 150 * h, yh)], 0) # roof\n rect(screen, (0, 191, 255), (xh + 50 * h, yh + 30 * h, 50 * h, 30 * h), 0) # window", "def __init__(self, player, screen):\n\n # Call the parent constructor\n Level.__init__(self, player, screen)\n\n self.level_x_limit = -1380\n self.level_y_limit = 270\n\n\n # Array with type of platform, and x, y location of the platform.\n level = [\n\n\n ]\n\n # Go through the array above and add platforms\n for platform in level:\n block = platforms.hubSandBits(platform[0])\n block.rect.x = platform[1]\n block.rect.y = platform[2]\n block.player = self.player\n self.platform_list.add(block)\n\n\n\n\n choosePort =[\n ]\n\n for port in choosePort:\n wego = platforms.ChooseLev(port[0], port[3])\n wego.rect.x = port[1]\n wego.rect.y = port[2]\n wego.player = self.player\n self.platform_choose.add(wego)\n\n\n\n background = platforms.backgroundSandHub()\n background.rect.x = 0\n background.rect.y = 0\n self.decor.add(background)", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def fill_single_world():\n if not front_is_clear():\n if not right_is_clear():\n if not left_is_clear():\n put_beeper()", "def test_point_not_in_room(rectangle, big_area):\n new_room = Room(rectangle, 0, 1, 1, big_area, 'bathroom')\n point = (0, 15, 15)\n assert new_room.contains_point(point[0], point[1], point[2]) is False", "def __init__(self,\n env,\n initial_lid_pos,\n name,\n resource='objects/box_with_lid.xml'):\n self._env = env\n self._initial_lid_pos = initial_lid_pos\n self._name = name\n self._resource = resource", "def build_world(self, width, height, entrance, agent, objects):\n env = WumpusEnvironment(width, height, entrance)\n if self.trace:\n agent = wumpus_environment.TraceAgent(agent)\n agent.register_environment(env)\n env.add_thing(agent, env.entrance)\n for (obj, loc) in objects:\n env.add_thing(obj, loc)\n print env.to_string()\n print self.objects \n return env", "def make_board(self):\n generate = lambda: random.randint(1, 100) in range(1, self.p_pit+1)\n some_number = self.some_number\n agent = Agent(some_number)\n agent.program = Oozeplorer_Percept(agent)\n self.add_agent(agent)\n gold = Gold()\n self.add_thing(gold, None)\n for row in range(1, some_number + 1):\n for col in range(1, some_number + 1):\n valid_spot = (row, col) != gold.location and (row, col) != (1, 1)\n if valid_spot and generate():\n t_pt = Pit()\n t_pt.location = (row, col)\n self.things.append(t_pt)", "def __init__(self, island, x = 0, y = 0, s=\"A\"):\n self.island = island\n self.name = s\n self.x = x\n self.y = y", "def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet", "def box(self) -> None:\n self.screen.box()", "def create_simulation_box(Lx,Ly,Lz,nmol,molname,vdwradius,boxfilename, topfilename=None):\n\n os.system('gmx insert-molecules \\\n -box %s %s %s \\\n -nmol %s \\\n -ci %s.pdb \\\n -radius %s \\\n -o %s &> out.log'%(Lx,Ly,Lz,nmol,molname,vdwradius,boxfilename))\n\n \n # Actual nmol added\n with open('out.log', 'r') as f:\n for line in f:\n if 'Added' in line:\n nmol = int(line.split()[1])\n \n # read <molname>.top\n with open('%s.top'%molname, 'r') as f:\n data = ''\n for line in f:\n if len(line.split())!=0 and line.split()[0]=='%s'%molname:\n data+='%s'%molname + ' '*(9-len(molname)) + '%s\\n'%nmol\n else:\n data+=line\n \n if topfilename==None:\n # Update number of PA molecules in <molname>.top file \n with open('%s.top'%molname, 'w') as f:\n f.write(data)\n else:\n # Create <newtopfilename> file \n with open('%s'%topfilename, 'w') as f:\n f.write(data)", "def box(cls, radius, young_mod, shear_mod, w, h, t):\r\n\r\n area = 2*(w+t/2)*t + 2*(h-t/2)*t\r\n\r\n # Torsion constant\r\n I11 = 2*t*(w*h)**2 / (w + h)\r\n\r\n # Moments of area\r\n I33 = 2*(t*(h+t)**3)/12 + 2*((w-t)*t**3/12 + (w-t)*t*(h/2)**2)\r\n I22 = 2*(t*(w+t)**3)/12 + 2*((h-t)*t**3/12 + (h-t)*t*(w/2)**2)\r\n\r\n # Warping constant\r\n Iw = 0.0 # closed thin-walled section\r\n\r\n r = cls(radius=radius,\r\n area=area, I11=I11, I22=I22, I33=I33, Iw=Iw,\r\n young_mod=young_mod, shear_mod=shear_mod,\r\n sec_type='box', sec_params={'closed': True,\r\n 'w': w, 'h': h, 't': t})\r\n\r\n return r", "def makeTheHouse(pos, blockTypeMain= wool, blockTypeSecond= wool,\n mainColor= wMagenta, secondColor= wWhite,\n myDoor= wDoorWood):\n\n ### FRONT (& BACK )###\n for Front in range(0,22,21): #This is the trick for the back copy...\n \n mc.setBlocks(pos.x-4, pos.y,pos.z+6+Front,\n pos.x+7, pos.y+9, pos.z+6+Front, blockTypeMain, mainColor)\n mc.setBlocks(pos.x-3, pos.y+1,pos.z+6+Front,\n pos.x+6, pos.y+8, pos.z+6+Front, blockTypeSecond, secondColor)\n # FRONT - Remove blocks\n # Small trick to remove the 6 empty space by a loop\n #[[x,y],[x,y],[x,y],...]\n for i in [[-1,+1],[5,+1],[+2,0],[-1,+5],[2,+5],[5,+5]]:\n mc.setBlocks(pos.x+i[0], pos.y+i[1],pos.z+6+Front,\n pos.x+i[0]-1, pos.y+i[1]+2, pos.z+6+Front, air)\n #let's put the Glasses (that's almost the same than remove actually...)\n for i in [[-1,+1],[5,+1],[-1,+5],[2,+5],[5,+5]]:\n mc.setBlocks(pos.x+i[0], pos.y+i[1],pos.z+6+Front,\n pos.x+i[0]-1, pos.y+i[1]+2, pos.z+6+Front, wGlass_Pane)\n # The door at Entrance\n mc.setBlock(pos.x+1, pos.y, pos.z+6+Front, myDoor,4)\n mc.setBlock(pos.x+1, pos.y+1, pos.z+6+Front, myDoor,8)\n mc.setBlock(pos.x+2, pos.y, pos.z+6+Front, myDoor,1)\n mc.setBlock(pos.x+2, pos.y+1, pos.z+6+Front, myDoor,8)\n \n # ************\n \n # FRONT - Small top\n mc.setBlocks(pos.x-3, pos.y+10,pos.z+6+Front,\n pos.x+6, pos.y+14, pos.z+6+Front, blockTypeSecond, secondColor)\n mc.setBlocks(pos.x-1, pos.y+10,pos.z+6+Front,\n pos.x+4, pos.y+13, pos.z+6+Front, blockTypeMain, mainColor)\n mc.setBlocks(pos.x, pos.y+10,pos.z+6+Front,\n pos.x+3, pos.y+12, pos.z+6+Front, blockTypeSecond, secondColor)\n # FRONT-Small top Remove Blocks\n mc.setBlocks(pos.x+1, pos.y+11,pos.z+6+Front,\n pos.x+2, pos.y+12, pos.z+6+Front, air)\n # small trick to remove as \"stairs\" - funny ? no ?\n for i in range(0,10,1):\n iy = i\n if i > 5:\n iy=9-i\n #print i, iy\n mc.setBlocks(pos.x-3+i, pos.y+11+iy,pos.z+6+Front,\n pos.x-3+i, pos.y+15, pos.z+6+Front, air)\n # FRONT-Small Top put Glass\n mc.setBlocks(pos.x+1, pos.y+11,pos.z+6+Front,\n pos.x+2, pos.y+12, pos.z+6+Front, wGlass_Pane)\n\n\n # FRONT-Right & Left side \n for i in range(0,19,18):\n #print i\n mc.setBlocks(pos.x-4+i, pos.y,pos.z+7+Front,\n pos.x-11+i, pos.y+8, pos.z+7+Front, blockTypeMain, mainColor)\n mc.setBlocks(pos.x-5+i, pos.y+1,pos.z+7+Front,\n pos.x-10+i, pos.y+7, pos.z+7+Front, blockTypeSecond, secondColor)\n # blocks removal\n mc.setBlocks(pos.x-6+i, pos.y+1,pos.z+7+Front,\n pos.x-9+i, pos.y+7, pos.z+7+Front, wGlass_Pane)\n # the line\n mc.setBlocks(pos.x-5+i, pos.y+4,pos.z+7+Front,\n pos.x-11+i, pos.y+4, pos.z+7+Front, blockTypeMain, mainColor)\n \n #remove 2 extra columns\n mc.setBlocks(pos.x-4, pos.y, pos.z+7,\n pos.x-4, pos.y+8, pos.z+7, air)\n mc.setBlocks(pos.x-4+11, pos.y, pos.z+7,\n pos.x-4+11, pos.y+8, pos.z+7, air)\n\n\n ### MAIN WALLS RIGHT & LEFT SIDE ###\n for wall in range(0,26,25):\n mc.setBlocks(pos.x-11+wall, pos.y, pos.z+8,\n pos.x-11+wall, pos.y+8, pos.z+28, blockTypeMain, mainColor)\n\n mc.setBlocks(pos.x-11+wall, pos.y+1, pos.z+8,\n pos.x-11+wall, pos.y+7, pos.z+27, blockTypeSecond, secondColor)\n\n for i in range(0,15,7):\n mc.setBlocks(pos.x-11+wall, pos.y+1,pos.z+9+i,\n pos.x-11+wall, pos.y+7, pos.z+12+i, wGlass_Pane)\n \n # the 3 lines\n mc.setBlocks(pos.x-11+wall, pos.y, pos.z+14,\n pos.x-11+wall, pos.y+8, pos.z+14, blockTypeMain, mainColor)\n mc.setBlocks(pos.x-11+wall, pos.y, pos.z+21,\n pos.x-11+wall, pos.y+8, pos.z+21, blockTypeMain, mainColor)\n mc.setBlocks(pos.x-11+wall, pos.y+4, pos.z+8,\n pos.x-11+wall, pos.y+4, pos.z+28, blockTypeMain, mainColor)\n\n\n \n\n #same \n #removeBlocks(pos.x-1, pos.y+2, pos.z+6, 2, \n pass", "def __init__(self, world, x, y, direction):\n self.ID = world.__register__(x, y, direction)", "def draw_building():\n\n gerardo.penup()\n gerardo.backward(135)\n gerardo.pendown()\n gerardo.begin_fill()\n for i in range(2): # this loop draws out the rectangle for the building\n gerardo.forward(200)\n gerardo.right(90)\n gerardo.forward(100)\n gerardo.right(90)\n gerardo.end_fill()\n gerardo.hideturtle()", "def create_floor(self):\n def _on_enter(obj):\n return lambda: obj.play_blink(duration=1, loop=True)\n def _on_leave(obj):\n return lambda: obj.play_fadeout(duration=1)\n\n cell_size = self.map.get_cell_size()\n for x in xrange(0, self.map_size[0]):\n for y in xrange(0, self.map_size[1]):\n obj = Floor(\n parent=self.map,\n style={\n 'width': cell_size,\n 'height': cell_size,\n 'z-index': layers['floor'] }\n )\n # Make it blinking when the player stays on it\n make_trackingfloor(self, obj, x, y, \n on_enter=_on_enter(obj),\n on_leave=_on_leave(obj)\n )\n self.map.add_node(obj, x, y)", "def __init_ground(self, timeout=4):\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = self._robot.get_planning_frame()\n box_pose.pose.position.z = -0.025\n\n if self._ground_box_name not in self._scene.get_known_object_names():\n rospy.sleep(2)\n self._scene.add_box(self._ground_box_name, box_pose, size=(2, 2, 0.02))\n return self._wait_for_state_update(name=self._ground_box_name, is_known=True, timeout=timeout) \n else:\n self._loginfo(\"Box already in the Planning scene, skipping\")\n return True", "def run_world(self):\n self.world_alive = True\n self.world_setable = False", "def build_wall(self, type, pos1, pos2, thickness=1):\n raise NotImplementedError", "def __init__(self, world, location, elevation):\n self.world = world\n self.location = location # (row, col)\n self.elevation = elevation\n if elevation < 0:\n self.water_level = -elevation\n else:\n self.water_level = 0\n self.pollution = 0\n self.crab = None", "def __init__(self, room_number, door_position, next_room_number, open):\n\n self.room_number = room_number\n # each door has fixed dimensions (236 pixels), so we calculate the left and right sides\n self.x_left_door = door_position - (236 / 2)\n self.x_right_door = door_position + (236 / 2)\n self.next_room_number = next_room_number\n self.open = open", "def tunnel_world(self):\n maze = self.create_maze_world(height= 5, width = 9)\n # vertical walls\n for i in range(maze.dims[0]):\n if i is not 2:\n maze.add_wall( (i, 6), \"W\")\n maze.add_wall( (i, 2), \"W\")\n # tunnel walls\n for j in range(2,6):\n maze.add_wall( (2 , j), \"N\")\n maze.add_wall( (2, j), \"S\")\n return maze", "def do_lobby(self):\n\t\tevent = pygame.event.Event(be.E_STATE, {\"state\":be.S_LOBBY})\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.show_lobby()\n\t\tself.renderer.color = (255, 255, 255, 0)", "def main():\n # background\n background = background_maker()\n\n # face\n face = face_maker()\n\n # eye\n eye_l = eye_maker()\n eye_r = eye_maker()\n\n # mouth\n mouth = mouth_maker()\n mouth_1 = GArc(60, 60, 290, 60)\n mouth_2 = GArc(60, 60, 190, 60)\n\n # nose\n nose = GOval(10, 10)\n nose.filled = True\n\n # ear\n ear_l = ear_maker()\n ear_r = ear_maker()\n ear_ll = ear2_maker()\n ear_rr = ear2_maker()\n\n # body\n body = body_maker()\n body2 = body2_maker()\n body3 = body3_maker()\n\n # label\n label = label_maker('Rilakkuma', 70)\n label2 = label_maker('Min', 10, font='Dialog')\n\n # arm\n arm_l = arm1_maker()\n arm_r = arm2_maker()\n\n # leg\n leg = leg_maker()\n leg2 = leg_maker()\n\n # show my draw\n window.add(background)\n window.add(leg, (window.width - leg.width) / 2 - body.width/3.7, (window.height - leg.height) / 2 + body.height*1.1)\n window.add(leg2, (window.width - leg2.width) / 2 + body.width / 3.7,\n (window.height - leg2.height) / 2 + body.height * 1.1)\n window.add(body, (window.width - body.width) / 2, (window.height - body.height) / 2 + face.height/1.4)\n window.add(body2, (window.width - body2.width) / 2,\n (window.height - body2.height) / 2 + face.height/1.4 + body.height/3.3)\n window.add(body3, (window.width - body3.width) / 2, (window.height - body3.height) / 2 + face.height/1.2)\n window.add(arm_l, (window.width - arm_l.width) / 2 - body.width / 2.9,\n (window.height - arm_l.height) / 2 + face.height / 1.5)\n window.add(arm_r, (window.width - arm_r.width) / 2 + body.width / 2.9,\n (window.height - arm_r.height) / 2 + face.height / 1.5)\n window.add(label, (window.width-label.width)/2, window.height/4)\n window.add(ear_l, (window.width - ear_l.width) / 2 - face.width / 2.25,\n (window.height - ear_l.height) / 2 - face.height / 3)\n window.add(ear_ll, (window.width - ear_ll.width) / 2 - face.width / 2.25,\n (window.height - ear_ll.height) / 2 - face.height / 3.5)\n window.add(ear_r, (window.width - ear_r.width) / 2 + face.width / 2.25,\n (window.height - ear_r.height) / 2 - face.height / 3)\n window.add(ear_rr, (window.width - ear_rr.width) / 2 + face.width / 2.25,\n (window.height - ear_rr.height) / 2 - face.height / 3.5)\n window.add(face, (window.width - face.width) / 2, (window.height - face.height) / 2)\n window.add(eye_l, (window.width - eye_l.width) / 2 - face.width / 5, (window.height - eye_l.height) / 2)\n window.add(eye_r, (window.width - eye_r.width) / 2 + face.width / 5, (window.height - eye_r.height) / 2)\n window.add(mouth, (window.width - mouth.width) / 2, (window.height - mouth.height) / 2 + face.height / 8)\n window.add(nose, (window.width - nose.width) / 2, (window.height - nose.height) / 2 + face.height / 12)\n window.add(mouth_1, (window.width - mouth_1.width) / 2 - face.width / 20,\n (window.height - mouth_1.height) / 2 + face.height / 11)\n window.add(mouth_2, (window.width - mouth_2.width) / 2 + face.width / 20,\n (window.height - mouth_2.height) / 2 + face.height / 11)\n window.add(label2, window.width-label2.width, window.height)\n\n # kuma2\n kuma2_color = '0xFFEEDD'\n face2 = face_maker(140, color=kuma2_color)\n\n eye2_l = eye_maker(size=15)\n eye2_r = eye_maker(size=15)\n\n mouth2 = mouth_maker(size=40)\n mouth2_1 = GArc(60, 60, 290, 60)\n mouth2_2 = GArc(60, 60, 190, 60)\n\n nose2 = GOval(8, 8)\n nose2.filled = True\n\n ear2_l = ear_maker(size=50, color=kuma2_color)\n ear2_r = ear_maker(size=50, color=kuma2_color)\n ear2_ll = ear2_maker(size=30, color='0xFFC1E0')\n ear2_rr = ear2_maker(size=30, color='0xFFC1E0')\n\n body_2 = body_maker(size=100, color=kuma2_color)\n body2_2 = body2_maker(size=85, color=kuma2_color)\n body3_2 = body3_maker(size=60)\n\n arm2_l = arm1_maker(size=40, color=kuma2_color)\n arm2_r = arm2_maker(size=40, color=kuma2_color)\n\n leg_2 = leg_maker(size=25, color=kuma2_color)\n leg2_2 = leg_maker(size=25, color=kuma2_color)\n\n buttons = GOval(15, 15)\n buttons.filled = True\n buttons.fill_color = 'red'\n\n window.add(leg_2, (window.width - leg_2.width) / 2 - face.width / 1.05 - body_2.width/3.3,\n (window.height - leg_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(leg2_2, (window.width - leg2_2.width) / 2 - face.width / 1.05 + body_2.width/3.3,\n (window.height - leg2_2.height) / 2 + face.height / 1.4 + body2.height * 0.82)\n window.add(body_2, (window.width - body_2.width) / 2 - face.width/1.05,\n (window.height - body_2.height) / 2 + face.height / 1.4)\n window.add(body2_2, (window.width - body2_2.width) / 2 - face.width/1.05,\n (window.height - body2_2.height) / 2 + face.height / 1.4 + body_2.height / 3.3)\n window.add(body3_2, (window.width - body3_2.width) / 2 - face.width/1.05,\n (window.height - body3_2.height) / 2 + face.height / 1.2)\n window.add(arm2_l, (window.width - arm2_l.width) / 2 - face.width / 1.05 - body_2.width/2.9,\n (window.height - arm2_l.height) / 2 + face2.height / 1.06)\n window.add(arm2_r, (window.width - arm2_r.width) / 2 - face.width / 1.05 + body_2.width/2.9,\n (window.height - arm2_r.height) / 2 + face2.height / 1.06)\n window.add(ear2_l, (window.width - ear2_l.width) / 2 - face.width / 0.8,\n (window.height - ear2_l.height) / 2 - face2.height / 9)\n window.add(ear2_ll, (window.width - ear2_ll.width) / 2 - face.width / 0.8,\n (window.height - ear2_ll.height) / 2 - face2.height / 15)\n window.add(ear2_r, (window.width - ear2_r.width) / 2 - face.width / 1.5,\n (window.height - ear2_r.height) / 2 - face2.height / 9)\n window.add(ear2_rr, (window.width - ear2_rr.width) / 2 - face.width / 1.52,\n (window.height - ear2_rr.height) / 2 - face2.height / 15)\n window.add(face2, (window.width-face2.width)/2 - face.width/1.05, (window.height-face2.height)/2 + face2.height/4)\n window.add(eye2_l, (window.width - eye2_l.width) / 2 - face.width / 0.9,\n (window.height - eye2_l.height) / 2 + face2.height/4)\n window.add(eye2_r, (window.width - eye2_r.width) / 2 - face.width / 1.25,\n (window.height - eye2_r.height) / 2 + face2.height/4)\n window.add(mouth2, (window.width - mouth2.width) / 2 - face.width/1.05,\n (window.height - mouth2.height) / 2 + face2.height / 2.4)\n window.add(nose2, (window.width - nose2.width) / 2 - face.width/1.05,\n (window.height - nose2.height) / 2 + face2.height / 2.5)\n window.add(mouth2_1, (window.width - mouth2_1.width) / 2 - face.width / 1,\n (window.height - mouth2_1.height) / 2 + face2.height / 2.5)\n window.add(mouth2_2, (window.width - mouth2_2.width) / 2 - face.width / 1.1,\n (window.height - mouth2_2.height) / 2 + face2.height / 2.5)\n window.add(buttons, (window.width-buttons.width)/2 - face.width/1.05,\n (window.height-buttons.height)/2 + face.height/1.62)", "def ZoneBuilder():\n\n # Part 1: Zone Dimensions\n matrix, xaxis, yaxis, zaxis = dimensions()\n\n # Part 2: Assigning Room Existance.\n matrix = existance(matrix, xaxis, yaxis, zaxis)\n \n # Part 3: Creating room walls.\n \n # First, generate walls adjacent to void spaces.\n matrix = enclose_rooms(matrix, xaxis, yaxis, zaxis)\n \n matrix = select_walls(matrix, xaxis, yaxis, zaxis)", "def north_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n nwall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, y + height / 2, n), 0)\r\n self.walls.append(nwall)\r\n model = Plane(w=nwall.w()*2, h=nwall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, nwall.x(), nwall.y(), nwall.z())\r\n\r\n\r\n wallnum += 1", "def uncleScrooge(pos):\n bulldozer(pos)\n #print mc.postToChat(\"We made some free place. done !\")\n mc.postToChat(\"We made some free place. done !\")\n \n ground(pos, mainColor= wWhite, secondColor=wBlack)\n mc.setBlock(pos.x, pos.y, pos.z, 40)\n mc.setBlock(pos.x-1, pos.y, pos.z, 40)\n mc.postToChat(\"Ground done !\")\n\n pos.z += 5\n makeTheHouse(pos, blockTypeMain = wExtraWhite, blockTypeSecond = wGold, mainColor=1 , secondColor=0, myDoor= wDoorIron)\n mc.postToChat(\"House done !\")\n \n theRoof(pos, blockTypeMain = wDiamond_Block, mainColor=wBlack)\n mc.postToChat(\"The roof is done !\")\n\n makeTheDeco(pos, flowers = wFlower_Yellow)\n mc.postToChat(\"ALL Work done !\")\n\n # Ends Uncle Scrooge House", "def south_wall(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, y+height / 2, s), 0)\r\n self.walls.append(swall)\r\n model = Plane(w=swall.w()*2, h=swall.h()*2, name=name+str(wallnum))\r\n mergeshape.add(model, swall.x(),swall.y(),swall.z(), rx=0.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def __init__(self,\n walker,\n first_box,\n second_box,\n first_box_index=0,\n second_box_index=1,\n detection_region=None):\n super(MoveBoxToBox, self).__init__(walker)\n self._first_box = first_box\n self._second_box = second_box\n self._detection_region = detection_region\n self._first_box_index = first_box_index\n self._second_box_index = second_box_index\n self._walker_geoms = None", "def mk_room(self, bounding_box):\n if (bounding_box.top + self.edge_min + self.room_min\n > bounding_box.bottom):\n raise ValueError(\"Region too small to make room\")\n if (bounding_box.left + self.edge_min + self.room_min\n > bounding_box.right):\n raise ValueError(\"Region too small to make room\")\n h_max = bounding_box.bottom - bounding_box.top - self.edge_min\n w_max = bounding_box.right - bounding_box.left - self.edge_min\n height = random.randint(self.room_min, h_max)\n width = random.randint(self.room_min, w_max)\n\n # we now have a room height and width that fit within our bounding box.\n # Just need to decide where to put the top left corner\n y_start = random.randint(bounding_box.top + self.edge_min,\n bounding_box.bottom - height)\n x_start = random.randint(bounding_box.left + self.edge_min,\n bounding_box.right - width)\n room = Box(y_start, x_start, y_start + height - 1, x_start + width - 1)\n for i in range(y_start, y_start + height):\n for j in range(x_start, x_start + width):\n self.set_tile(Point(j, i))\n return room", "def add_spawns_outside_boss_doors(self: WWRandomizer):\n \n rooms_to_add_new_spawns_to = [\n (\"M_NewD2\", 10, TGDR, None, 11),\n #(\"kindan\", 16, TGDR, None, 13), # Already has a spawn, ID 1.\n (\"Siren\", 18, TGDR, None, 13),\n (\"sea\", 1, ACTR, 1, 56),\n (\"M_Dai\", 15, TGDR, None, 17),\n (\"kaze\", 12, TGDR, None, 13),\n ]\n \n for stage_name, room_number, chunk, layer, boss_door_index in rooms_to_add_new_spawns_to:\n new_spawn_id = 27\n \n dzs = self.get_arc(\"files/res/Stage/%s/Stage.arc\" % stage_name).get_file(\"stage.dzs\", DZx)\n dzr = self.get_arc(\"files/res/Stage/%s/Room%d.arc\" % (stage_name, room_number)).get_file(\"room.dzr\", DZx)\n \n if chunk == TGDR:\n dzx_for_door = dzs\n else:\n dzx_for_door = dzr\n \n door = dzx_for_door.entries_by_type_and_layer(chunk, layer=layer)[boss_door_index]\n spawn_dist_from_door = 200\n y_rot = door.y_rot\n if door.from_room_num != room_number and door.from_room_num != 63:\n y_rot = (y_rot + 0x8000) % 0x10000\n y_rot_degrees = y_rot * (90.0 / 0x4000)\n x_offset = math.sin(math.radians(y_rot_degrees)) * spawn_dist_from_door\n z_offset = math.cos(math.radians(y_rot_degrees)) * spawn_dist_from_door\n x_pos = door.x_pos + x_offset\n y_pos = door.y_pos\n z_pos = door.z_pos + z_offset\n \n if stage_name in [\"M_Dai\", \"kaze\"]:\n # Earth and Wind temple spawns must be in the stage instead of the room or the game will crash.\n dzx_for_spawn = dzs\n else:\n dzx_for_spawn = dzr\n \n spawns = dzx_for_spawn.entries_by_type(PLYR)\n assert len([spawn for spawn in spawns if spawn.spawn_id == new_spawn_id]) == 0\n \n new_spawn = dzx_for_spawn.add_entity(PLYR)\n new_spawn.spawn_type = 0\n new_spawn.room_num = room_number\n new_spawn.x_pos = x_pos\n new_spawn.y_pos = y_pos\n new_spawn.z_pos = z_pos\n new_spawn.y_rot = y_rot\n new_spawn.spawn_id = new_spawn_id\n \n dzx_for_spawn.save_changes()", "def play_land(hand, battlefield, graveyard, library):\n\thand['Gemstone Mine'] -= 1\n\tbattlefield['Gemstone Mine'] += 1\n\tlog(\"We played Gemstone Mine.\")\n\tdescribe_game_state(hand, battlefield, graveyard, library)", "def __init__(self, name):\n self.in_wall = False", "def __init__(self, block):\n super(CraftFlowerPot, self).__init__(block)\n self.pot = (block.getWorld()).getTileEntityAt(getX(), getY(), getZ())", "def __init__(self, world, location, elevation):\n LandCell.__init__(self, world, location, elevation)\n self.plant = 0\n self.reset_food_level()", "def generate(self, level):\n # TODO The dungeon's instances are spawned and loaded here.\n # fill map with \"blocked\" tiles\n level.maze = [[Tile(x, y, True) for y in range(level.height)] for x in range(level.width)]\n\n for r in range(level.max_rooms):\n # random width and height\n w = random.randint(level.min_room_size, level.max_room_size)\n h = random.randint(level.min_room_size, level.max_room_size)\n\n # random position without going out of the boundaries of the map\n x = random.randint(0, level.width - w - 1)\n y = random.randint(0, level.height - h - 1)\n\n # \"DungeonRoom\" class makes rectangles easier to work with\n new_room = Room(x, y, w, h)\n level.rooms.append(new_room)\n\n # run through the other rooms and see if they intersect with this one\n failed = False\n for other_room in level.rooms:\n if other_room is not new_room and new_room.intersect(other_room):\n failed = True\n break\n\n if not failed:\n # this means there are no intersections, so this room is valid\n\n # \"paint\" it to the map's tiles\n self._create_room(level, new_room)\n\n # center coordinates of new room, will be useful later\n new_x, new_y = new_room.center()\n\n if level.num_rooms > 0:\n # connect it to the previous room with a tunnel\n # center coordinates of previous room\n (prev_x, prev_y) = level.rooms[level.num_rooms - 1].center()\n\n # draw a coin (random number that is either 0 or 1)\n if random.randint(0, 1) == 1:\n # first move horizontally, then vertically\n self._create_h_tunnel(level, prev_x, new_x, prev_y)\n self._create_v_tunnel(level, prev_y, new_y, new_x)\n else:\n # first move vertically, then horizontally\n self._create_v_tunnel(level, prev_y, new_y, prev_x)\n self._create_h_tunnel(level, prev_x, new_x, new_y)\n\n # finally, append the new room to the list\n level.rooms.append(new_room)\n level.num_rooms += 1\n\n # connect them with a tunnel\n self._create_h_tunnel(level, 25, 55, 23)", "def grow_fungi(self, wall):\n if self.direction == 1:\n ledge_fungus = FirstLedge(self.rect.centery, self.room, wall, 'right')\n self.room.can_climb.add(ledge_fungus)\n else:\n ledge_fungus = FirstLedge(self.rect.centery, self.room, wall, 'left')\n self.room.can_climb.add(ledge_fungus)", "def make_wander_box(self):\n x = int(self.location[0])\n y = int(self.location[1])\n box_list = []\n box_rects = []\n\n for i in range(x-3, x+4):\n box_list.append([i, y-3])\n box_list.append([i, y+3])\n\n for i in range(y-2, y+3):\n box_list.append([x-3, i])\n box_list.append([x+3, i])\n\n for box in box_list:\n left = box[0]*32\n top = box[1]*32\n box_rects.append(pg.Rect(left, top, 32, 32))\n\n return box_rects", "def draw_house_walls(x, y, width, height):\n print('Типа рисую стены...', x, y, width, height)", "def box_mesh_with_hole(point1=Point(0,0,0), point2=Point(2,1,1), cyl_cent1 = Point(1, -10, 0.5), \n cyl_cent2= Point(1, 10, 0.5), cyl_rad=0.25, numpts=15):\n Router = mshr.Box(point1, point2)\n Rinner = mshr.Cylinder(cyl_cent1, cyl_cent2, cyl_rad, cyl_rad)\n domain = Router - Rinner\n\n mesh = mshr.generate_mesh(domain, numpts)\n print_mesh_stats(mesh)\n \n return mesh", "def create_hard_block_at(self, x, y):\n cell_size = self.map.get_cell_size()\n obj = HardBlock(\n parent=self.map,\n style={\n 'width': cell_size, \n 'height': cell_size * 2, \n 'z-index': layers['object'] }\n )\n # I am a hard block, I can stop the fire without being destroyed\n fireblocking(block(obj))\n\n self.map.add_node(obj, x, y, 0, -cell_size)\n return obj", "def __init__(self, nx, ny, ix=0, iy=0):\r\n self.__nx = nx\r\n self.__ny = ny\r\n self.__ix = ix\r\n self.__iy = iy\r\n self.__current_room = 0, 0\r\n self.__maze = [[Room(x, y) for y in range(ny)] for x in range(nx)]\r\n self.__entrance_room = 0, 0\r\n self.__exit_room = 0, 0\r\n self.__pillar_a = 0, 0\r\n self.__pillar_e = 0, 0\r\n self.__pillar_i = 0, 0\r\n self.__pillar_p = 0, 0\r\n self.count = 0\r\n self.original_map = \"\"", "def blacksmithing(forge, metalshop):\n blacksmithing = UncheckedPlace(\"Blacksmithing Area\")\n blacksmithing.contain(forge)\n blacksmithing.contain(metalshop)\n return blacksmithing", "def draw_door():\n\n jose.penup()\n jose.goto(38, -137)\n jose.pendown()\n jose.begin_fill()\n for i in range(2): # this loop draws a rectangle for the door of the building.\n jose.forward(40)\n jose.right(90)\n jose.forward(20)\n jose.right(90)\n jose.end_fill()\n jose.hideturtle()", "def init_game():\n return BoardRenderer('LifeSim', GRID_SIZE, BLOCK_SIZE), World(GRID_SIZE, LAKE_SIZE, FOREST_WIDTH)", "def test_can_instantiate_room(rectangle, test_area):\n assert Room\n new_room = Room(rectangle, 0, 1, 1, test_area, 'bathroom')\n assert isinstance(new_room, Room)", "def __init__(self, width, height):\n roomDict = {}\n for w in range(width):\n for h in range(height):\n roomDict[Position(w, h)] = 'dirty'\n self.tiles = roomDict\n self.width = width\n self.height = height", "def load_boxes(self, data):\r\n\r\n # worldbox represents the total map area\r\n self.worldbox = self.Box((0, 0), (len(data[0]) * self.cellwidth, len(data) * self.cellwidth))\r\n\r\n # create a box corresponding to each character/cell in the map file\r\n tl_x = 0\r\n tl_y = 0\r\n for row in data:\r\n for cell in row:\r\n if cell == \".\":\r\n self.wallboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n elif cell == \"x\":\r\n self.targetboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n tl_x += self.cellwidth\r\n tl_x = 0\r\n tl_y += self.cellwidth", "def __init__(self, rows, cols, mines):\n self.rows = rows\n self.cols = cols\n self.mines = mines\n self.opened = 0\n self.game_won = False\n self.game_lost = False\n self.board = self.__init__minefield__()\n self.tiles = self.__init__tiles__()", "def add_box(self):\n self.scenes[self.current_scene].add_object(Box())\n self.redraw()", "def __init__(self):\n super(Grasshopper, self).__init__()\n # self.world.gravity = (0.0,0.0)\n\n # Initialize all of the objects\n ground = self.world.CreateBody(position=(0, 20))\n ground.CreateEdgeChain(\n [ (-20,-20),\n (-20, 20),\n ( 20, 20),\n ( 20,-20),\n (-20,-20) ]\n )\n\n # Initialize sliders\n self.settings.altitude_p = pid_values['altitude'].p * 100\n self.settings.altitude_d = pid_values['altitude'].d * 100\n self.settings.lateral_p = pid_values['lateral drift'].p * 20\n self.settings.lateral_d = pid_values['lateral drift'].d * 20\n self.settings.attitude_p = pid_values['attitude'].p * 100\n self.settings.attitude_d = pid_values['attitude'].d * 100\n\n # Rocket\n self.ship=self.world.CreateDynamicBody(\n position=(0,6), angle=0.1,\n angularDamping=0, linearDamping=0)\n\n # And add a box fixture onto it (with a nonzero density, so it will move)\n box=self.ship.CreatePolygonFixture(box=self.ship_dimensions, density=self.ship_mass/(self.ship_dimensions[0]*self.ship_dimensions[1]), friction=0.3)", "def create_the_world(cls):\n from muddery.server.mappings.element_set import ELEMENT\n world = ELEMENT(\"WORLD\")()\n world.setup_element(\"\")\n cls._world_data = world", "def draw_house(x, y, width, height):\n print('Типа рисую дом...', x, y, width, height)\n foundation_height = 0.05 * height\n walls_height = 0.5 * width\n walls_width = 0.9 * width\n roof_height = height - foundation_height - walls_height\n draw_house_foundation(x, y, width, foundation_height)\n draw_house_walls(x, y - foundation_height, walls_width, walls_height)\n draw_house_roof(x, y - foundation_height - walls_height, width, roof_height)", "def add_box(self, timeout=4):\n\n # Side length of the box\n box_size = 0.16\n\n # Set pose of the box\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = 'world'\n box_pose.pose.orientation.w = 1.0\n box_pose.pose.position.x = 0.0\n box_pose.pose.position.y = 0.45\n box_pose.pose.position.z = 1.92\n\n # Add box to scene\n self.scene.add_box(self.box_name, box_pose, size=(box_size,\n box_size,\n box_size))\n\n # Wait for update and return status\n return self.wait_for_state_update(box_is_known=True,\n timeout=timeout)", "def main():\r\n\r\n print(\"Welcome to 'Box Eats Plants' by 'DtjiSoftwareDeveloper'.\")\r\n print(\"In this game, you need to move the box around the board to eat plants and avoid rocks.\")\r\n print(\"Enter 'Y' for yes.\")\r\n print(\"Enter anything else for no.\")\r\n continue_playing: str = input(\"Do you want to continue playing 'Box Eats Plants'? \")\r\n while continue_playing == \"Y\":\r\n # Clearing the command line window\r\n clear()\r\n\r\n # Setting up the game\r\n score: int = 0 # initial value\r\n game_over: bool = False # initial value\r\n board: Board = Board()\r\n box: Box = Box(0, 0) # initial value\r\n rocks: list = []\r\n plants: list = []\r\n while board.num_rocks() < 5:\r\n rocks.append(board.spawn_rock())\r\n\r\n while board.num_plants() < 5:\r\n plants.append(board.spawn_plant())\r\n\r\n while board.num_boxes() < 1:\r\n box = board.spawn_box()\r\n\r\n while not game_over:\r\n # Clearing the command line window\r\n clear()\r\n\r\n print(\"Below is the current representation of the board.\\n\" + str(board))\r\n print(\"Current coordinates of box: (\" + str(box.x) + \", \" + str(box.y) + \").\")\r\n print(\"Your score: \" + str(score))\r\n allowed: list = [\"UP\", \"DOWN\", \"LEFT\", \"RIGHT\"]\r\n print(\"Enter 'UP' to move box up.\")\r\n print(\"Enter 'DOWN' to move box down.\")\r\n print(\"Enter 'LEFT' to move box left.\")\r\n print(\"Enter 'RIGHT' to move box right.\")\r\n direction: str = input(\"Where do you want to move the box to? \")\r\n while direction not in allowed:\r\n print(\"Enter 'UP' to move box up.\")\r\n print(\"Enter 'DOWN' to move box down.\")\r\n print(\"Enter 'LEFT' to move box left.\")\r\n print(\"Enter 'RIGHT' to move box right.\")\r\n direction = input(\"Sorry, invalid input! Where do you want to move the box to? \")\r\n\r\n if direction == \"UP\":\r\n box.move_up(board)\r\n elif direction == \"DOWN\":\r\n box.move_down(board)\r\n elif direction == \"LEFT\":\r\n box.move_left(board)\r\n else:\r\n box.move_right(board)\r\n\r\n box_tile: Tile = board.get_tile_at(box.x, box.y)\r\n if box_tile.rock is not None:\r\n game_over = True\r\n\r\n if isinstance(box_tile.plant, Plant):\r\n score += 1\r\n plants.remove(box_tile.plant)\r\n box_tile.remove_plant()\r\n plants.append(board.spawn_plant())\r\n\r\n rock_direction: str = allowed[random.randint(0, 3)]\r\n if rock_direction == \"UP\":\r\n for rock in rocks:\r\n rock.move_up(board)\r\n rock_tile: Tile = board.get_tile_at(rock.x, rock.y)\r\n if isinstance(rock_tile.box, Box):\r\n game_over = True\r\n\r\n elif rock_direction == \"DOWN\":\r\n for rock in rocks:\r\n rock.move_down(board)\r\n rock_tile: Tile = board.get_tile_at(rock.x, rock.y)\r\n if isinstance(rock_tile.box, Box):\r\n game_over = True\r\n\r\n elif rock_direction == \"LEFT\":\r\n for rock in rocks:\r\n rock.move_left(board)\r\n rock_tile: Tile = board.get_tile_at(rock.x, rock.y)\r\n if isinstance(rock_tile.box, Box):\r\n game_over = True\r\n\r\n else:\r\n for rock in rocks:\r\n rock.move_right(board)\r\n rock_tile: Tile = board.get_tile_at(rock.x, rock.y)\r\n if isinstance(rock_tile.box, Box):\r\n game_over = True\r\n\r\n plant_direction: str = allowed[random.randint(0, 3)]\r\n if plant_direction == \"UP\":\r\n for plant in plants:\r\n plant.move_up(board)\r\n plant_tile: Tile = board.get_tile_at(plant.x, plant.y)\r\n if isinstance(plant_tile.box, Box):\r\n score += 1\r\n plants.remove(plant)\r\n plant_tile.remove_plant()\r\n plants.append(board.spawn_plant())\r\n\r\n elif plant_direction == \"DOWN\":\r\n for plant in plants:\r\n plant.move_down(board)\r\n plant_tile: Tile = board.get_tile_at(plant.x, plant.y)\r\n if isinstance(plant_tile.box, Box):\r\n score += 1\r\n plants.remove(plant)\r\n plant_tile.remove_plant()\r\n plants.append(board.spawn_plant())\r\n\r\n elif plant_direction == \"LEFT\":\r\n for plant in plants:\r\n plant.move_left(board)\r\n plant_tile: Tile = board.get_tile_at(plant.x, plant.y)\r\n if isinstance(plant_tile.box, Box):\r\n score += 1\r\n plants.remove(plant)\r\n plant_tile.remove_plant()\r\n plants.append(board.spawn_plant())\r\n\r\n else:\r\n for plant in plants:\r\n plant.move_right(board)\r\n plant_tile: Tile = board.get_tile_at(plant.x, plant.y)\r\n if isinstance(plant_tile.box, Box):\r\n score += 1\r\n plants.remove(plant)\r\n plant_tile.remove_plant()\r\n plants.append(board.spawn_plant())\r\n\r\n # Clearing the command line window\r\n clear()\r\n\r\n print(\"GAME OVER! Your score is \" + str(score))\r\n print(\"Enter 'Y' for yes.\")\r\n print(\"Enter anything else for no.\")\r\n continue_playing = input(\"Do you want to continue playing 'Box Eats Plants'? \")\r\n sys.exit()", "def load_building_blocks(path):\t\t\n\t#TODO : automatization\n\tbenzene = Building_Block(abbrev=\"B\", num_atoms=6,origin=0, para_pos=3, para_angle=0, meta_pos=4 , meta_angle = -np.pi/3., ortho_pos=5, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/benzene.xyz\")\n\tnapthtalene = Building_Block(abbrev=\"N\", num_atoms=18,origin=0, para_pos=12, para_angle=0., meta_pos=11 , meta_angle = -np.pi/3., ortho_pos=10, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/naphtalene.xyz\")\n\tdbPc1 = Building_Block(abbrev=\"dbPc1\", num_atoms=32,origin=13, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = +np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc1_block.xyz\")\n\tdbPc4 = Building_Block(abbrev=\"dbPc4\", num_atoms=55,origin=22, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc4.xyz\")\n\tdbPc6 = Building_Block(abbrev=\"dbPc6\", num_atoms=52,origin=17, para_pos=0, para_angle=0, meta_pos=1 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc6.xyz\")\n\tdbPc5 = Building_Block(abbrev=\"dbPc5\", num_atoms=58,origin=12, para_pos=26, para_angle=0, meta_pos=20 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc5.xyz\")\n\tpseudo_para_naph_PCP = Building_Block(abbrev=\"pseudo-para_naph_PCP\", num_atoms=44,origin=0, para_pos=18, para_angle=0, meta_pos=16 , meta_angle = -np.pi/3, ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/pseudo-para_naph_PCP.xyz\")\n\tline =Building_Block(abbrev=\"line\", num_atoms=4,origin=0, para_pos=1, para_angle=0, meta_pos=1 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/line.xyz\")\n\t#rot=Building_Block(abbrev=\"line\", num_atoms=47,origin=6, para_pos=16, para_angle=0, meta_pos=20 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/rot.xyz\")\n\t#stacked_anth=Building_Block(abbrev=\"stacked_anth\", num_atoms=62,origin=3, para_pos=22, para_angle=0, meta_pos=30 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/stacked_anth.xyz\")\n\t\n\tbuilding_blocks = [benzene,napthtalene,dbPc1,dbPc4,dbPc6, dbPc5,pseudo_para_naph_PCP, line]\n\n\treturn building_blocks", "def init_blocks(self):\n length = self.physics.len_blocks\n rect = Rectangle(self.rpos, self.rpos + Vector(length, length))\n self.rects.append(rect)\n self.physics.add_block(rect, 'bomberman')", "def house ():\n\n poly (3,300,\"red\")\n penup()\n setposition(0,-300)\n pendown()\n poly (4,300,\"brown\")\n penup()\n setposition(100,-300)\n pendown()\n poly(4,100,\"green\") \n\n return None", "def delete_box(self) -> None:\n from pymol import cmd\n\n # Reset all box variables\n self.x = 0\n self.y = 0\n self.z = 0\n # self.min_x_set = 0.0\n # self.max_x_set = 0.0\n # self.min_y_set = 0.0\n # self.max_y_set = 0.0\n # self.min_z_set = 0.0\n # self.max_z_set = 0.0\n # self.angle1_set = 0.0\n # self.angle2_set = 0.0\n # self.padding_set = 3.5\n\n # Delete Box and Vertices objects in PyMOL\n cmd.delete(\"vertices\")\n cmd.delete(\"box\")\n\n # Set Box variables in the interface\n self.min_x.setValue(self._default.min_x)\n self.max_x.setValue(self._default.max_x)\n self.min_y.setValue(self._default.min_y)\n self.max_y.setValue(self._default.max_y)\n self.min_z.setValue(self._default.min_z)\n self.max_z.setValue(self._default.max_z)\n self.angle1.setValue(self._default.angle1)\n self.angle2.setValue(self._default.angle2)\n\n # Change state of buttons in the interface\n self.button_draw_box.setEnabled(True)\n self.button_redraw_box.setEnabled(False)\n self.min_x.setEnabled(False)\n self.min_y.setEnabled(False)\n self.min_z.setEnabled(False)\n self.max_x.setEnabled(False)\n self.max_y.setEnabled(False)\n self.max_z.setEnabled(False)\n self.angle1.setEnabled(False)\n self.angle2.setEnabled(False)", "def add_to_world(self, thing):\n\t\tthing.set_world_info(self.current_id, self)\n\t\tself.gameObjects.append(thing)\n\t\tself.current_id += 1", "def roof(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None, makeroof=True, makeceiling=True):\r\n global wallnum\r\n\r\n roof = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, y+height+self.ceilingthickness / 2, z), 0)\r\n self.walls.append(roof)\r\n roofmodel = Plane(w=length, h=width, name=name+str(wallnum))\r\n mergeshape.add(roofmodel,x,y+height+self.ceilingthickness,z,rx=90.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1" ]
[ "0.6026777", "0.5739972", "0.57147884", "0.5655483", "0.56292", "0.55451924", "0.552503", "0.5509055", "0.5503862", "0.5486842", "0.54634327", "0.5458408", "0.54434514", "0.54172194", "0.5397755", "0.5392325", "0.5368659", "0.5365851", "0.5354393", "0.5353537", "0.5352438", "0.53369254", "0.53333443", "0.53324777", "0.53119767", "0.5305906", "0.5293444", "0.5270544", "0.5263082", "0.52553815", "0.52372414", "0.5217876", "0.52069616", "0.51993144", "0.5194889", "0.51939654", "0.51765656", "0.5175504", "0.51647645", "0.514697", "0.51441866", "0.51424813", "0.51342815", "0.51296705", "0.51234525", "0.51228225", "0.5113999", "0.51125765", "0.51092035", "0.51028275", "0.51018846", "0.50880426", "0.50874704", "0.5079826", "0.50789666", "0.5074729", "0.50735915", "0.5065318", "0.5063842", "0.50615007", "0.50602925", "0.50574183", "0.505712", "0.5054755", "0.50477993", "0.5045624", "0.5045349", "0.50450927", "0.5037683", "0.50350595", "0.5029766", "0.5028798", "0.5026286", "0.50194836", "0.50183606", "0.50152606", "0.50121826", "0.5007915", "0.49965376", "0.49893996", "0.49874872", "0.49853367", "0.49809742", "0.4977458", "0.49765354", "0.4967841", "0.49628997", "0.49599624", "0.49545085", "0.492788", "0.49270236", "0.49265265", "0.49262106", "0.4914601", "0.49054128", "0.49048966", "0.49021062", "0.48965222", "0.48960152", "0.48959202", "0.4894069" ]
0.0
-1
Update the requirements.txt file and reformat the Pipfile.
def main(requirements_file, skip_requirements_file, pipfile, skip_pipfile): # type: (str, bool, str, bool) -> None pipfile_path = path.Path(pipfile) pf = load_pipfile(pipfile_path) if not skip_requirements_file: requirements_file_path = path.Path(requirements_file) update_requirements(requirements_file_path, pf) if not skip_pipfile: dump_pipfile(pipfile_path, pf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def update(self, ctx):\n # read original contents of pipfile\n with open('Pipfile') as f:\n original_pipfile = f.read()\n\n # run git pull. If nothing new is pulled, exit here.\n pull_output = await ctx.invoke(ctx.bot.get_command('pull'))\n\n if 'updating' not in pull_output.lower():\n return\n\n commit_message = subprocess.run(['git', 'log', '-1', '--pretty=%B'], stdout=subprocess.PIPE)\n await ctx.send('```yaml\\n{}```'.format(commit_message.stdout.decode('utf-8')))\n\n # read new contents of pipfile\n with open('Pipfile') as f:\n new_pipfile = f.read()\n\n # if no package changes, we just reload the changed extensions.\n # Unless if the main file was changed, which cannot be reloaded,\n # in which case the bot must be restarted.\n if new_pipfile == original_pipfile:\n pattern = r\" cogs\\/(.*).py *\\| [0-9]{1,9} \\+{0,}-{0,}\\n\"\n names = re.findall(pattern, pull_output)\n if not names or 'main' not in names:\n reload_cmd = ctx.bot.get_command('reload')\n for name in names:\n # first subgroup is either helpers or commandcogs, which we don't care about\n await ctx.invoke(reload_cmd, extension_name=name[0])\n await ctx.send('Up to date.')\n return\n\n else:\n # run pipenv install to get all the latest packages\n await ctx.send('Running `pipenv install`, please hold...')\n # Note: when tested in the wild, the bot seemed to be restarted by systemd hereish\n res = subprocess.run(['pipenv', 'install'])\n if res.returncode != 0:\n await ctx.send(\n 'Uh oh, found an error while running `pipenv install`. Time for you to get on fixing it.')\n return\n\n # give a verbal notice if our service file (which restarts us) is not running\n res = subprocess.run(['systemctl', 'status', 'mothbot'], stdout=subprocess.PIPE)\n if res.returncode != 0:\n await ctx.send('WARNING: Error fetching mothbot.service status. Make sure I get restarted.')\n elif 'Active: active (running)' not in res.stdout.decode('utf-8'):\n await ctx.send('WARNING: mothbot.service does not appear to be running. Restart me manually.')\n\n # logout\n await ctx.bot.logout()", "def update_requirements():\n\n with virtualenv(VIRTUALENV_PATH):\n cmd = ['pip install']\n cmd += ['--requirement %s' % os.path.join(CODE_DIR,'requirements.txt')]\n run(' '.join(cmd))", "def update_dependencies():\n pip = env.virtualenv.child('bin', 'pip')\n reqs = env.code_dir.child('deploy-requirements.txt')\n sudo('%s -q install -U pip' % pip)\n sudo('%s -q install -r %s' % (pip, reqs))", "def pipupdate():\n\n packages = [d for d in pkg_resources.working_set]\n subprocess.call('pip install --upgrade ' + ' '.join(packages))", "def update_requirements():\n\n check_prompt = (\n not env.prompt or\n console.confirm(\n \"Update virtualenv requirements based on requirements.txt file?\",\n default=True,\n )\n )\n\n if check_prompt:\n with cd(\"%s\" % env.repo_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"pip install\"\n \" --requirement %s/requirements.txt\" % env.repo_path\n )", "def _patch_pip(obuffer, updates, **options):\n\n seen = []\n\n def patched_parse_requirements(*args, **kwargs):\n if not options['no_recursive']:\n filename = args[0]\n if filename not in seen:\n if os.path.isfile(filename):\n seen.append(filename)\n buf = StringIO()\n _internal_update_requirements(\n buf, updates,\n input_file=filename,\n output_file=options['output_file'],\n force=options['force'],\n interactive=options['interactive'],\n skip=options['skip'],\n only=options['only'],\n minor=options['minor'],\n patch=options['patch'],\n pre=options['pre'],\n dry_run=options['dry_run'],\n no_recursive=options['no_recursive'],\n echo=options['echo'],\n index_urls=options['index_urls'],\n verify=options['verify'],\n )\n if not options['dry_run']:\n if options['output_file']:\n obuffer.write(buf.getvalue())\n else:\n with open(filename, 'w') as output:\n output.write(buf.getvalue())\n buf.close()\n return []\n req_file.parse_requirements = patched_parse_requirements", "def update_requirements():\n with cd(REMOTE_REPO_DIR):\n cmd = ['npm install']\n # cmd += ['--requirement %s' % os.path.join(CODE_DIR,'requirements.txt')]\n run(' '.join(cmd))", "def update(self):\n self.content = self.get_content()\n self.dependencies = self.content['requirements']['run']\n self.pythonversion = self.content['extra']['pythonversion']\n self.package_name = self.content['package']['name']", "def freeze():\n proc = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)\n with open('requirements.txt', 'wb') as fout:\n fout.write(proc.stdout)", "def update_project():\n with cd(env.code_dir):\n with _virtualenv():\n run('git pull origin master')\n install_requirements()\n perform_migration()\n collect_static()", "def upgrade_dependencies():\n # upgrade pip\n print(\"Upgrading/installing any required dependencies...\")\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"--user\",\n \"--upgrade\", \"pip\", \"--no-warn-script-location\"],\n shell=True, check=True)\n print(\"pip package manager has been upgraded to the latest version\")\n\n # upgrade/install dependencies such as robot framework\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"--user\",\n \"--upgrade\", \"--no-warn-script-location\", \"-r\",\n os.path.join(os.path.curdir, \"requirements.txt\")],\n shell=True, check=True)\n print(\"Robot framework has been upgraded to the latest version\")\n print(\"PyQT5 has been upgraded to the latest version\")", "def install_deps():\n with open('requirements.txt', 'r') as f:\n packages = f.readlines()\n new_pkgs = []\n for resource in packages:\n new_pkgs.append(resource.strip())\n return new_pkgs", "def update_requirements():\n\n require('code_root', provided_by=env.environments)\n requirements = os.path.join(env.code_root, 'requirements')\n sdists = os.path.join(requirements, 'sdists')\n base_cmd = ['pip install']\n base_cmd += ['-q -E %(virtualenv_root)s' % env]\n base_cmd += ['--no-index --find-links=file://%s' % sdists]\n # install GDAL by hand, before anything else that might depend on it\n cmd = base_cmd + ['--no-install \"GDAL==1.6.1\"']\n sudo(' '.join(cmd), user=env.deploy_user)\n # this directory won't exist if GDAL was already installed\n if files.exists('%(virtualenv_root)s/build/GDAL' % env):\n sudo('rm -f %(virtualenv_root)s/build/GDAL/setup.cfg' % env, user=env.deploy_user)\n with cd('%(virtualenv_root)s/build/GDAL' % env):\n sudo('%(virtualenv_root)s/bin/python setup.py build_ext '\n '--gdal-config=gdal-config '\n '--library-dirs=/usr/lib '\n '--libraries=gdal1.6.0 '\n '--include-dirs=/usr/include/gdal '\n 'install' % env, user=env.deploy_user)\n # force reinstallation of OpenBlock every time\n with settings(warn_only=True):\n sudo('pip uninstall -y -E %(virtualenv_root)s ebpub ebdata obadmin' % env)\n for file_name in ['ebpub.txt', 'ebdata.txt', 'obadmin.txt', 'openrural.txt']:\n apps = os.path.join(requirements, file_name)\n cmd = base_cmd + ['--requirement %s' % apps]\n sudo(' '.join(cmd), user=env.deploy_user)", "def main() -> None:\n verify_pip_is_installed()\n print('Regenerating \"requirements.txt\" file...')\n install_python_dev_dependencies.compile_pip_requirements(\n 'requirements.in', 'requirements.txt')\n # Adds a note to the beginning of the 'requirements.txt' file to make sure\n # developers understand that they should not append or change this\n # autogenerated file.\n with utils.open_file(\n common.COMPILED_REQUIREMENTS_FILE_PATH, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(\n '# Developers: Please do not modify this auto-generated file. If\\n'\n '# you want to add, remove, upgrade, or downgrade libraries,\\n'\n '# please change the `requirements.in` file, and then follow\\n'\n '# the instructions there to regenerate this file.\\n' + content)\n\n mismatches = get_mismatches()\n if mismatches:\n _rectify_third_party_directory(mismatches)\n validate_metadata_directories()\n else:\n print(\n 'All third-party Python libraries are already installed correctly.')", "def parse_requirements_from_pipfile():\n lineiter = (line.strip() for line in open('Pipfile'))\n requirements_pipfile_style = [line for line in lineiter]\n start_index = requirements_pipfile_style.index('[packages]') + 1\n end_index = requirements_pipfile_style.index('[requires]') - 1\n requirements = list(map(lambda x: x.replace(' = \"', '').replace('\"', ''),\n requirements_pipfile_style[start_index:end_index]))\n return requirements", "def format(session):\n session.install('-rrequirements-dev.txt')\n run_yapf(session, diff=False)", "def update_package_files(self) -> None:\n # create the package folder\n self.package_path.mkdir(parents=True, exist_ok=True)\n\n self.clean() # Delete any previous *.py? files\n self.copy_stubs()\n self.create_readme()\n self.create_license()", "def pip(c):\n\n if Path('requirements.txt').exists():\n c.run(\"pip install -r requirements.txt\")\n\n for sp_ns in ns_foreach_task_subdir():\n try:\n sp_ns.tasks.pip(c)\n except UnexpectedExit:\n pass", "def update_requirements(input_file=None, output_file=None, force=False,\n interactive=False, skip=[], only=[], minor=[],\n patch=[], pre=[], dry_run=False,\n no_recursive=False, echo=False, index_urls=[],\n verify=True):\n\n obuffer = StringIO()\n updates = defaultdict(list)\n\n # patch pip for handling nested requirements files\n _patch_pip(obuffer, updates, input_file=input_file, output_file=output_file,\n force=force, interactive=interactive, skip=skip, only=only,\n minor=minor, patch=patch, pre=pre, dry_run=dry_run,\n no_recursive=no_recursive, echo=echo, index_urls=index_urls,\n verify=verify)\n\n _internal_update_requirements(obuffer, updates,\n input_file=input_file,\n output_file=output_file,\n force=force,\n skip=skip,\n only=only,\n minor=minor,\n patch=patch,\n pre=pre,\n interactive=interactive,\n dry_run=dry_run,\n no_recursive=no_recursive,\n echo=echo,\n index_urls=index_urls,\n verify=verify,\n )\n\n if not dry_run:\n if not output_file:\n output_file = input_file\n with open(output_file, 'w') as output:\n output.write(obuffer.getvalue())\n\n obuffer.close()\n\n return updates", "def pip_install_req_file(req_file):\n pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'\n sh(f\"{pip_cmd} -r {req_file}\")", "def pip_lock_file() -> None:\n with open(\"requirements.freeze.all\", \"w\") as ff:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"freeze\"], stdout=ff)\n with open(\"requirements.freeze.tmp\", \"w\") as ff:\n subprocess.check_call([\"grep\", \"inmanta\", \"requirements.freeze.all\"], stdout=ff)\n # pip freeze can produce lines with @ that refer to folders outside the container\n # see also https://github.com/pypa/pip/issues/8174\n # also ignore inmanta-dev-dependencies as this is pinned in the requirements.dev.txt\n with open(\"requirements.freeze\", \"w\") as ff:\n subprocess.check_call(\n [\n \"grep\",\n \"-v\",\n \"-e\",\n \"@\",\n \"-e\",\n \"inmanta-dev-dependencies\",\n \"-e\",\n \"inmanta-module-\",\n \"requirements.freeze.tmp\",\n ],\n stdout=ff,\n )\n yield", "def create_update_pyproject_toml(self) -> None:\n if (self.toml_path).exists():\n # do not overwrite the version of a pre-existing file\n _pyproject = self.pyproject\n assert _pyproject is not None\n # clear out the packages section\n _pyproject[\"tool\"][\"poetry\"][\"packages\"] = []\n # update the dependencies section by readin that from the template file\n with open(CONFIG.template_path / \"pyproject.toml\", \"rb\") as f:\n tpl = tomllib.load(f)\n\n _pyproject[\"tool\"][\"poetry\"][\"dependencies\"] = tpl[\"tool\"][\"poetry\"][\"dependencies\"]\n\n else:\n # read the template pyproject.toml file from the template folder\n try:\n with open(CONFIG.template_path / \"pyproject.toml\", \"rb\") as f:\n _pyproject = tomllib.load(f)\n _pyproject[\"tool\"][\"poetry\"][\"version\"] = self.mpy_version\n except FileNotFoundError as e:\n log.error(f\"Could not find template pyproject.toml file {e}\")\n raise (e)\n\n # update the name , version and description of the package\n _pyproject[\"tool\"][\"poetry\"][\"name\"] = self.package_name\n _pyproject[\"tool\"][\"poetry\"][\"description\"] = self.description\n # write out the pyproject.toml file\n self.pyproject = _pyproject", "def setup(ctx):\r\n ctx.run('pip3 install -r requirements.txt')", "def install_requirements():\n run_commands('pip install -r ./requirements/dev.txt')", "def install_requirements_file(self, requirements_file):\n self.run_fabric_task(install_requirements_without_virtualenv(requirements_file))", "def lint_format(session):\n session.install('-rrequirements-dev.txt')\n run_yapf(session, diff=True)", "def install_requirements():\n _git_pull()\n _install_requirements()\n _syncdb()\n _migrate()\n _restart_webserver()", "def deploy():\n git_pull()\n if confirm(\"Install/upgrade requirements with pip?\"):\n install_requeriments()\n django_command('collectstatic')\n django_command('migrate')\n restart()", "def freeze():\n dependencies = sh('pip freeze', capture=True).split(os.linesep)\n\n with open('requirements.txt', 'w') as file:\n for dep in dependencies:\n if not dep.startswith('bones-testing'):\n file.write(dep+'\\n')", "def upgrade_pip():\n out_info(\"Upgrading pip...\")\n pipexe = [sys.executable, \"-m\", \"pip\"]\n pipexe.extend([\"install\", \"--no-cache-dir\", \"-qq\", \"--upgrade\"])\n if not IS_ADMIN and not IS_VIRTUALENV:\n pipexe.append(\"--user\")\n pipexe.append(\"pip\")\n run(pipexe)", "def format_requirements():\n for filename, requirements in _read_requirements():\n _write_requirements(filename, requirements)", "def update_go_deps(self):\n self.go_version()\n env = self.m.step.get_from_context('env', {})\n env.update(self.go_env)\n with self.m.step.context({'env': env}):\n self.m.run.with_retry(\n self.m.step,\n 'update go pkgs',\n UPDATE_GO_ATTEMPTS,\n cmd=[self.go_exe, 'get', '-u', '-t', '%s/...' % INFRA_GO_PKG])", "def sync():\n for filename, requirements in _sync():\n _write_requirements(filename, requirements)", "def install_requirements():\n req_path = os.path.join(vlogger_dir, \"requirements.txt\")\n subprocess.call([\"pip\", \"install\", \"-r\", req_path])", "def install_requirements(self, rel_path):\n self._log.debug(\"Installing requirements {}\".format(rel_path))\n\n rel_path = rel_path.replace(\"/\", os.path.sep)\n full_path = os.path.join(self._code_dir, rel_path)\n\n with open(full_path, \"rb\") as f:\n data = f.read()\n\n # this takes a fair amount of time sometimes, so if there's an\n # empty requirements.txt file, skip installing it\n actual_req_count = 0\n for line in data.split(\"\\n\"):\n line = line.strip()\n if line == \"\" or line.startswith(\"#\"):\n continue\n actual_req_count += 1\n if actual_req_count == 0:\n self._log.debug(\"Empty requirements.txt, skipping\")\n return\n\n try:\n threading.local().indentation = 0\n pypi_hostname = re.match(r'^.*://([^/]+)/.*$', self.pypi_loc).group(1)\n self._run_pip_main([\n \"install\",\n \"--user\",\n \"--trusted-host\", pypi_hostname,\n \"-i\", self.pypi_loc,\n \"-r\", full_path\n ])\n \n # this is expected - pip.main will *always* exit\n except SystemExit as e:\n # TODO\n raise Exception(\"Is SystemExit normal?\")\n\n threading.local().indentation = 0", "def install_packages():\n with open(\"requirements.txt\", \"w\") as requirements_file:\n subprocess.run([\"pipenv\", \"lock\", \"-r\"], stdout=requirements_file)\n\n subprocess.run(\n [\"pip\", \"install\", \"-r\", \"requirements.txt\", \"--no-deps\", \"-t\", BUILD_DIR]\n )", "def install(self):\n\n self.clean_git_checkout(self.git_repo, '/src')\n\n self.__copy_config_templates();\n\n self.local(\"sudo pip install -r src/requirements.txt --upgrade\")\n\n if not self.is_local():\n PiService.install(self) #copy to remote\n\n self.sudo(\"pip install -r src/requirements.txt --upgrade\")", "def build_reqs():\n requirements_path = Path.cwd() / \"src\" / \"requirements.in\"\n if not requirements_path.is_file():\n secho(\"No requirements.in found. Copying contents from requirements.txt...\")\n contents = (Path.cwd() / \"src\" / \"requirements.txt\").read_text()\n requirements_path.write_text(contents)\n python_call(\"piptools\", [\"compile\", str(requirements_path)])\n secho(\n (\n \"Requirements built! Please update requirements.in \"\n \"if you'd like to make a change in your project's dependencies, \"\n \"and re-run build-reqs to generate the new requirements.txt.\"\n )\n )", "def prod_server():\n sh(\"bin/pip freeze -r requirements.txt production/requirements.txt\")", "def pur(**options):\n\n if not options['requirement']:\n options['requirement'] = 'requirements.txt'\n\n format_list_arg(options, 'skip')\n format_list_arg(options, 'only')\n format_list_arg(options, 'minor')\n format_list_arg(options, 'patch')\n format_list_arg(options, 'pre')\n\n options['echo'] = True\n\n global PUR_GLOBAL_UPDATED\n PUR_GLOBAL_UPDATED = 0\n\n update_requirements(\n input_file=options['requirement'],\n output_file=options['output'],\n force=options['force'],\n interactive=options['interactive'],\n skip=options['skip'],\n only=options['only'],\n minor=options['minor'],\n patch=options['patch'],\n pre=options['pre'],\n dry_run=options['dry_run'],\n no_recursive=options['no_recursive'],\n echo=options['echo'],\n index_urls=options['index_url'],\n verify=options['verify'],\n )\n\n if not options['dry_run']:\n _echo('All requirements up-to-date.')\n\n if options['nonzero_exit_code']:\n if PUR_GLOBAL_UPDATED > 0:\n raise ExitCodeException(11)\n raise ExitCodeException(10)", "def freeze_ansible_role_requirements_file(filename=\"\"):\n update_ansible_role_requirements_file(\n filename, branchname=\"master\", milestone_freeze=True\n )", "def upgrade_packages():\n\n require('environment', provided_by=env.environments)\n system.update_apt_sources()\n system.upgrade_apt_packages()", "def restore(c):\n c.run('pip install -r tests/requirements.txt')", "def sync_virtualenv(ctx):\n if not path.isfile(\"./pyenv/bin/pip\"):\n ctx.run(\"virtualenv --no-site-packages --python=/usr/bin/python2.7 pyenv\")\n ctx.run(\"PIP_DOWNLOAD_CACHE=/var/tmp/ ./pyenv/bin/pip install -r requirements.txt\")\n print(\"\"\"\n Installation completed. Please check any error messages above.\n\n If you are going to use `openstack` or ansible directly on the command line, run\n\n . ./pyenv/bin/activate\n\n or even add it to your ~/.bashrc\n \"\"\")", "def gen_requirements(site_packages_dir, requirements_file):\n utils_requirements.lock_requirements(\n requirements_file=requirements_file,\n site_packages_dir=site_packages_dir,\n )", "def install_requirements():\n local('. fabric_factory/ve/bin/activate; easy_install pip')\n local('. fabric_factory/ve/bin/activate; pip install -r requirements.txt')", "def install_requirements():\n run('source %(env_path)s/bin/activate; pip install -U -r %(repo_path)s/requirements.txt' % env)", "def _pipInstall(self, directory: Directory) -> None:\n\n pipExec = os.path.join(os.path.dirname(sys.executable), \"pip\")\n\n pipArgs = [sys.executable, pipExec] + self.makePipArgs(directory)\n\n # The platform update is tested for dependencies when it's first uploaded\n # PIP has a bug, when you have updated packages for several dependent files\n # and try to install them all at once, some of the packages don't update.\n pipArgs += ['--no-deps']\n\n pipArgs = ' '.join(pipArgs)\n\n try:\n spawnPty(pipArgs)\n logger.info(\"Peek package update complete.\")\n\n except Exception as e:\n logSpawnException(e)\n\n # Update the detail of the exception and raise it\n e.message = \"Failed to install packages from the new release.\"\n raise", "def add_uppers():\n for filename, requirements in _sync():\n LOG.info(\"Obtaining latest versions of packages for %s.\", filename)\n for req in requirements:\n if isinstance(req, Requirement):\n if isinstance(req.version, dict) and not req.version[\"max\"]:\n req.sync_max_version_with_pypy()\n _write_requirements(filename, requirements)", "def install_requirements():\r\n if env.hosts:\r\n run ('cd %(path)s %(command_join)s env/bin/pip install -r current-release/requirements.txt' % env)\r\n else:\r\n local('%spip install -r requirements.txt' % virtualenv_bin, capture=False)", "def bump_client_version(self):\n path = os.path.join(\".\", \"client\", \"package.json\")\n input_file = open(path)\n\n try:\n package = json.loads(input_file.read().decode(\"utf-8\"))\n except (ValueError) as e:\n print(\"Unable to read \" + path + \" \" + e) # noqa: T201\n raise SystemExit(1)\n\n package[\"version\"] = __semver__\n\n try:\n with open(path, \"w\", encoding=\"utf-8\") as f:\n f.write(str(json.dumps(package, indent=2, ensure_ascii=False)))\n except (OSError) as e:\n print( # noqa: T201\n \"Error setting the version for front-end assets: \" + str(e)\n )\n raise SystemExit(1)", "def update(self):\n with settings(user=self.serviceUser):\n self.venv.create()\n\n self.venv.install_twisted()\n self.venv.install(\" \".join(\"\"\"\n psycopg2==2.7.5\n pygments==2.2.0\n spambayes==1.1b3\n trac==1.2.2\n trac-github==2.3\n requests_oauthlib==1.0.0\n svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310\n git+https://github.com/twisted-infra/twisted-trac-plugins.git\n \"\"\".split()))\n\n # This is txacme v2 but is not yet released.\n # Should be replaced on we have txacme v2.\n # See https://github.com/twisted/txacme/pull/158\n self.venv.install(\n \"--index=https://pypi.chevah.com/simple txacme==1.0.0.chevah4\")\n\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)", "def clean_extra_package_managment_files():\n use_pipenv = '{{cookiecutter.use_pipenv}}'\n to_delete = []\n\n if use_pipenv == 'yes':\n to_delete = to_delete + ['requirements.txt', 'requirements']\n else:\n to_delete.append('Pipfile')\n\n try:\n for file_or_dir in to_delete:\n if os.path.isfile(file_or_dir):\n os.remove(file_or_dir)\n else:\n shutil.rmtree(file_or_dir)\n sys.exit(0)\n except OSError as e:\n sys.stdout.write(\n 'While attempting to remove file(s) an error occurred'\n )\n sys.stdout.write('Error: {}'.format(e))", "def install_deps():\n default = open('requirements.txt', 'r').readlines()\n new_pkgs = []\n links = []\n for resource in default:\n if 'git+https' in resource:\n pkg = resource.split('#')[-1]\n links.append(resource.strip())\n new_pkgs.append(pkg.replace('egg=', '').rstrip())\n else:\n new_pkgs.append(resource.strip())\n return new_pkgs, links", "def update():\n with cd(env.directory):\n\n # update plone\n result = sudo('git pull', user=env.deploy_user)\n quick_update = 'Already up-to-date.' in result\n\n if quick_update:\n # Plonesite Recipe replaces site on the fly\n print 'UPDATE: No full Buildout required: {0:s}'.format(result)\n # buildout\n stop()\n sudo('./bin/buildout install plonesite', user=env.deploy_user)\n start()\n\n else:\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n\n sudo('rm -rf ./var/blobstorage', user=env.deploy_user)\n sudo('rm -rf ./var/filestorage', user=env.deploy_user)\n sudo('rm .installed.cfg', user=env.deploy_user)\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo('./bin/zeoclient_debug adduser admin admin', user=env.deploy_user) # noqa: E501\n\n # load page twice to fill cache and prevent a bug showing raw html\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501", "def install_requirements():\n with cd(env.code_dir):\n with _virtualenv():\n sudo('pip install -r requirements.txt', pty=True)", "def _setup_pip(self, context):\n # We run ensurepip in isolated mode to avoid side effects from\n # environment vars, the current directory and anything else\n # intended for the global Python environment\n cmd = [context.env_exec_cmd, '-Im', 'ensurepip', '--upgrade',\n '--default-pip']\n subprocess.check_output(cmd, stderr=subprocess.STDOUT)", "def install_requirements():\n require(\"release\", provided_by=[deploy])\n with cd(\"%(path)s\" % env):\n sudo(\"./bin/pip install -r ./releases/%(release)s/requirements.txt\" % env)", "def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")", "def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")", "def update():\n require('PROJECT_NAME')\n\n with cd(utils.home('apps', env.PROJECT_NAME)):\n run('hg pull')\n run('hg up')", "def install():\n verun('pip install -r {0}'.format(requirements))", "def requirements(context):\n pip_compile = \"pip-compile --annotate --quiet\"\n\n command = (\n f\"{pip_compile} requirements/base.in \"\n f\"&& {pip_compile} requirements/local.in \"\n f\"&& {pip_compile} requirements/production.in\"\n )\n command = f\"run --rm django bash -c '{command}'\"\n run_command(context, get_local_user(), False, None, None, command)", "def test_vcs_entry_supersedes_non_vcs(pipenv_instance_pypi):\n with pipenv_instance_pypi(chdir=True) as p:\n jinja2_uri = p._pipfile.get_fixture_path(\"git/jinja2\").as_uri()\n with open(p.pipfile_path, \"w\") as f:\n f.write(\n \"\"\"\n[[source]]\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\nFlask = \"*\"\nJinja2 = {{ref = \"2.11.0\", git = \"{0}\"}}\n \"\"\".format(jinja2_uri).strip()\n )\n c = p.pipenv(\"install\")\n assert c.returncode == 0\n installed_packages = [\"Flask\", \"Jinja2\"]\n assert all([k in p.pipfile[\"packages\"] for k in installed_packages])\n assert all([k.lower() in p.lockfile[\"default\"] for k in installed_packages])\n assert all([k in p.lockfile[\"default\"][\"jinja2\"] for k in [\"ref\", \"git\"]]), str(p.lockfile[\"default\"])\n assert p.lockfile[\"default\"][\"jinja2\"].get(\"ref\") is not None\n assert (\n p.lockfile[\"default\"][\"jinja2\"][\"git\"]\n == jinja2_uri\n )", "def pylint(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_pylint(session)", "def update_openblock():\n\n tf = tempfile.mktemp(suffix='-openblock')\n local('git clone git://github.com/openplans/openblock.git {0}'.format(tf))\n dest = os.path.join(PROJECT_ROOT, 'requirements', 'sdists')\n for name in ('obadmin', 'ebdata', 'ebpub'):\n package = os.path.join(tf, name)\n os.chdir(package)\n local('pip install -e {source} -d {dest}'.format(source=package,\n dest=dest))\n shutil.rmtree(tf)", "def _pip_install_requirements(\n install_path: str, requirements_path: str\n) -> None:\n verify_pip_is_installed()\n _run_pip_command([\n 'install', '--require-hashes', '--no-deps', '--target',\n install_path, '--no-dependencies', '-r', requirements_path, '--upgrade'\n ])", "def rewrite(self):\n for f in self.files:\n metadata = dict()\n metadata[\"description\"] = f.metadata.get(\"desc\", \"Unknown\")\n metadata[\"script\"] = os.path.basename(f.filename)\n metadata[\"requires\"] = []\n for package, component in f.requires:\n if package == self.key:\n metadata[\"requires\"].append(\"/\" + component)\n else:\n metadata[\"requires\"].append(package + \"/\" + component)\n metadata[\"provides\"] = [ p[1] for p in f.provides ]\n # Resolve symlinks\n real_filename = os.path.realpath(f.filename)\n LOG.info(\"Editing: \" + real_filename)\n new_filename = f.filename + \".new\"\n new = file(new_filename, \"w\")\n new.write(\"/*\\n---\\n\")\n new.write(yaml.dump(metadata))\n new.write(\"\\n...\\n*/\\n\")\n new.write(file(f.filename).read())\n new.close()\n os.rename(new_filename, real_filename)\n\n package_data = dict()\n package_data[\"name\"] = self.key\n package_data[\"sources\"] = []\n package_data[\"version\"] = \"Unknown\"\n package_data[\"copyright\"] = \"Unknown\"\n package_data[\"description\"] = \"Unknown\"\n target_dir = os.path.dirname(self.scripts_json_filename)\n # package.yml is typically in the parent of the scripts.json dir\n if os.path.basename(target_dir) == \"Source\":\n target_dir = os.path.dirname(target_dir)\n target_filename = os.path.join(target_dir, \"package.yml\")\n for f in self.files:\n common = os.path.commonprefix([target_filename, f.filename])\n source_file = f.filename[len(common):]\n package_data[\"sources\"].append(source_file)\n LOG.info(\"Writing: \" + target_filename)\n out = file(target_filename, \"w\")\n out.write(yaml.dump(package_data))\n out.close()", "def update():\n\n # update plone\n with cd(env.directory):\n sudo('git pull', user=env.deploy_user)\n\n with cd(env.directory):\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('rm -rf ./src-mrd', user=env.deploy_user)\n else:\n sudo('./bin/pip install --no-cache-dir -r requirements.txt', user=env.deploy_user) # noqa: E501\n\n sudo('rm -rf ./var/blobstorage ./var/filestorage .installed.cfg ', user=env.deploy_user) # noqa: E501\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo(\"sleep 10\")\n\n # create plonesite with addons (uses different ports for py2 and py3)\n if env.latest:\n if env.python3:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py3.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py2.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n\n # load page to warmup\n sudo('/usr/bin/wget -S -qO- {domain} > /dev/null'.format(domain=env.domain), user=env.deploy_user) # noqa: E501", "def install_requirements(self):\n logging.debug('Attempting to pip install requirements to build dir...')\n try:\n pip_install_cmd = Lambda.PIP_INSTALL_REQUIREMENTS_TMPL.format(requirements=self.requirements_file,\n build_dir=self.build_dir)\n completed_process = subprocess.run(pip_install_cmd, shell=True, check=True,\n stdout=subprocess.PIPE)\n\n logging.debug('Successful pip install.')\n logging.debug('stdout: {}'.format(completed_process.stdout))\n logging.debug('stderr: {}'.format(completed_process.stderr))\n except subprocess.CalledProcessError as e:\n logging.error('Failed to install pip requirements to build dir..')\n raise e", "def piprot():\n cli_parser = argparse.ArgumentParser(\n epilog=\"Here's hoping your requirements are nice and fresh!\"\n )\n cli_parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"verbosity, can be supplied more than once \"\n \"(enabled by default, use --quiet to disable)\",\n )\n cli_parser.add_argument(\n \"-l\",\n \"--latest\",\n action=\"store_true\",\n help=\"print the lastest available version for out \" \"of date requirements\",\n )\n cli_parser.add_argument(\n \"-x\",\n \"--verbatim\",\n action=\"store_true\",\n help=\"output the full requirements file, with \"\n \"added comments with potential updates\",\n )\n cli_parser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_true\",\n help=\"be a little less verbose with the output \" \"(<0.3 behaviour)\",\n )\n cli_parser.add_argument(\n \"-o\", \"--outdated\", action=\"store_true\", help=\"only list outdated requirements\"\n )\n\n cli_parser.add_argument(\n \"-g\",\n \"--github\",\n help=\"Test the requirements from a GitHub repo. \"\n \"Requires that a `requirements.txt` file \"\n \"exists in the root of the repository.\",\n )\n\n cli_parser.add_argument(\n \"-b\",\n \"--branch\",\n help=\"The branch to test requirements from, used with \"\n \"the Github URL support.\",\n )\n\n cli_parser.add_argument(\n \"-t\",\n \"--token\",\n help=\"Github personal access token to be used with \" \"the Github URL support.\",\n )\n\n cli_parser.add_argument(\n \"-p\", \"--path\", help=\"Path to requirements file in remote repository.\"\n )\n\n cli_parser.add_argument(\n \"-d\",\n \"--delay\",\n help=\"Delay before an outdated package triggers an error.\"\n \"(in days, default to 1).\",\n )\n\n cli_parser.add_argument(\"-u\", \"--url\", help=\"URL to requirements file.\")\n\n # if there is a requirements.txt file, use it by default. Otherwise print\n # usage if there are no arguments.\n nargs = \"+\"\n\n if (\n \"--github\" in sys.argv\n or \"-g\" in sys.argv\n or \"-u\" in sys.argv\n or \"--url\" in sys.argv\n ):\n nargs = \"*\"\n\n default = None\n if os.path.isfile(\"requirements.txt\"):\n nargs = \"*\"\n default = [open(\"requirements.txt\")]\n\n cli_parser.add_argument(\n \"file\",\n nargs=nargs,\n type=argparse.FileType(),\n default=default,\n help=\"requirements file(s), use \" \"`-` for stdin\",\n )\n\n cli_args = cli_parser.parse_args()\n\n if len(cli_args.file) > 1 and cli_args.verbatim:\n sys.exit(\"--verbatim only allowed for single requirements files\")\n\n verbose = True\n if cli_args.quiet:\n verbose = False\n elif cli_args.verbatim:\n verbose = False\n\n # call the main function to kick off the real work\n main(\n req_files=cli_args.file,\n verbose=verbose,\n outdated=cli_args.outdated,\n latest=cli_args.latest,\n verbatim=cli_args.verbatim,\n repo=cli_args.github,\n branch=cli_args.branch,\n path=cli_args.path,\n token=cli_args.token,\n url=cli_args.url,\n delay=cli_args.delay,\n )", "def upgrade(self, dependencies = False):\n pip_args = []\n proxy = environ.get('http_proxy')\n if proxy:\n pip_args.append('--proxy')\n pip_args.append(proxy)\n pip_args.append('install')\n pip_args.append(self.pkg)\n if self.index is not None:\n pip_args.append('-i')\n pip_args.append(\"{}/\".format(self.index))\n if not dependencies:\n pip_args.append(\"--no-deps\")\n if self._get_current() != [-1]:\n pip_args.append(\"--upgrade\")\n a=pip.main(pip_args)\n return a==0", "def read_pipfile() -> List[str]:\n pfile = configparser.ConfigParser()\n pfile.read('Pipfile')\n req_specifiers = []\n for package, version in pfile['packages'].items():\n # normalize strings, since Pipenv likes to add quotes on some things\n package = package.strip('\\'\"')\n version = version.strip('\\'\"')\n spec = package + ('' if version == '*' else version)\n req_specifiers.append(spec)\n return req_specifiers", "def main(\n req_files,\n verbose=False,\n outdated=False,\n latest=False,\n verbatim=False,\n repo=None,\n path=\"requirements.txt\",\n token=None,\n branch=\"master\",\n url=None,\n delay=None,\n):\n requirements = []\n\n if repo:\n github_url = build_github_url(repo, branch, path, token)\n req_file = get_requirements_file_from_url(github_url)\n requirements.extend(parse_req_file(req_file))\n elif url:\n req_file = get_requirements_file_from_url(url)\n requirements.extend(parse_req_file(req_file))\n else:\n for req_file in req_files:\n requirements.extend(parse_req_file(req_file, verbatim=verbatim))\n req_file.close()\n\n total_time_delta = 0\n max_outdated_time = 0\n results = []\n\n for req, version, ignore in requirements:\n if verbatim and not req:\n results.append(version)\n elif req:\n results.append(\n {\n \"req\": req,\n \"version\": version,\n \"ignore\": ignore,\n \"latest\": request(get_pypi_url(req)),\n \"specified\": request(get_pypi_url(req, version)),\n }\n )\n\n for result in results:\n if isinstance(result, str):\n print(result.replace(\"\\n\", \"\"))\n continue\n\n if result[\"ignore\"]:\n if verbatim:\n print(\"{}=={} # norot\".format(result[\"req\"], result[\"version\"]))\n else:\n print(\"Ignoring updates for {}. \".format(result[\"req\"]))\n continue\n\n req = result[\"req\"]\n version = result[\"version\"]\n\n latest_version, latest_release_date = get_version_and_release_date(\n req, verbose=verbose, response=result[\"latest\"]\n )\n specified_version, specified_release_date = get_version_and_release_date(\n req, version, response=result[\"specified\"]\n )\n\n if latest_release_date and specified_release_date:\n time_delta = (latest_release_date - specified_release_date).days\n total_time_delta = total_time_delta + time_delta\n max_outdated_time = max(time_delta, max_outdated_time)\n\n if verbose:\n if time_delta > 0:\n print(\n \"{} ({}) is {} days out of date. \"\n \"Latest is {}\".format(req, version, time_delta, latest_version)\n )\n elif version != latest_version:\n print(\n \"{} ({}) is out of date. \"\n \"Latest is {}\".format(req, version, latest_version)\n )\n elif not outdated:\n print(\"{} ({}) is up to date\".format(req, version))\n\n if latest and latest_version != specified_version:\n print(\n \"{}=={} # Updated from {}\".format(\n req, latest_version, specified_version\n )\n )\n elif verbatim and latest_version != specified_version:\n print(\n \"{}=={} # Latest {}\".format(req, specified_version, latest_version)\n )\n elif verbatim:\n print(\"{}=={}\".format(req, specified_version))\n\n elif verbatim:\n print(\"{}=={} # Error checking latest version\".format(req, version))\n\n verbatim_str = \"\"\n if verbatim:\n verbatim_str = \"# Generated with piprot {}\\n# \".format(VERSION)\n\n if total_time_delta > 0 and delay is None:\n print(\n \"{}Your requirements are {} \"\n \"days out of date\".format(verbatim_str, total_time_delta)\n )\n sys.exit(1)\n elif delay is not None and max_outdated_time > int(delay):\n print(\n \"{}At least one of your dependencies is {} \"\n \"days out of date which is more than the allowed\"\n \"{} days.\".format(verbatim_str, max_outdated_time, delay)\n )\n sys.exit(1)\n elif delay is not None and max_outdated_time <= int(delay):\n print(\n \"{}All of your dependencies are at most {} \"\n \"days out of date.\".format(verbatim_str, delay)\n )\n else:\n print(\n \"{}Looks like you've been keeping up to date, \"\n \"time for a delicious beverage!\".format(verbatim_str)\n )", "def bump_arr():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--file\",\n help=\"path to ansible-role-requirements.yml file\",\n default=\"ansible-role-requirements.yml\",\n )\n parser.add_argument(\n \"os-branch\",\n help=\"Branch to use to find the role SHA for openstack roles. Master will also freeze external roles.\",\n )\n args = parser.parse_args()\n releasing.update_ansible_role_requirements_file(filename=args['file'],branchname=args['os-branch'])", "def check_requirements(session):\n session.install('-r', 'testing/requirements.txt')\n\n if 'update' in session.posargs:\n command = 'update-requirements'\n else:\n command = 'check-requirements'\n\n reqfiles = list(_list_files('.', 'requirements*.txt'))\n\n for reqfile in reqfiles:\n session.run('gcp-devrel-py-tools', command, reqfile)", "def update(self, gppkg_filename):\n run_command(\"gppkg --update %s\" % gppkg_filename)\n self.assertTrue(self.check_install(gppkg_filename))", "def test_parse_update_spec_file(self):\n content_init = \"\\n\".join([\n \"Requires: python-pkg1 >=1.0\",\n \"BuildRequires: python-pkg1 >= 1.0\",\n ])\n content_expected = \"\\n\".join([\n \"Requires: python-pkg1 >= 2.0\",\n \"BuildRequires: python-pkg1 >= 1.0\",\n ])\n self.assertEqual(\n content_expected,\n pr.parse_update_spec_file(\n \"testpackage.spec\",\n content_init, {\n \"install_requires\": [\n \"pkg1>=2.0\",\n ],\n }\n )\n )", "def _reinstall_all_dependencies() -> None:\n _pip_install_requirements(\n common.THIRD_PARTY_PYTHON_LIBS_DIR,\n common.COMPILED_REQUIREMENTS_FILE_PATH\n )", "def install_dependencies():\n\n # check python version and verify we are using Python 3\n if sys.version[0] < '3':\n print(\"ERROR: python version 3 required. You are using version \"\n \"{}\".format(sys.version))\n print(\"You must install python 3 from https://www.python.org\")\n print(\"Make sure to check the 'pip' package manager option when\")\n print(\"installing python\")\n return\n try:\n import pip\n except ModuleNotFoundError:\n print(\"The python 'pip' package manager is required.\")\n print(\"Go to https://www.python.org and download Python 3\")\n print(\"When re-installing, select 'modify' and make sure\")\n print(\"to check the 'pip' option\")\n return\n\n print(\"Python 3 and pip is installed\")\n\n # upgrade/install dependencies such as robot framework\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"-q\", \"--user\",\n \"--no-warn-script-location\", \"-r\",\n os.path.join(os.path.curdir, \"requirements.txt\")],\n shell=True, check=True)\n print(\"Robot framework is installed and up to date\")\n print(\"PyQT5 is installed and up to date\")", "def update_from_repo():\n\treturn", "def required():\n pip = path(\"bin/pip\")\n if not pip.exists():\n sh('%s install -E tg2env -r normal-reqs.txt --extra-index-url=http://www.turbogears.org/2.0/downloads/current/index' % pip)\n call_pavement('pavement.py', 'develop')", "def pip_requirements():\n\n require(\n \"virtualenv_path\",\n \"requirements_path\",\n \"http_proxy\",\n \"https_proxy\",\n \"sudo_user\",\n )\n cmd = \"pip install --quiet --requirement %s\" % env.requirements_path\n\n # append packages url if specified\n if env.get(\"packages_url\") is not None:\n cmd += \" -f %s\" % env.get(\"packages_url\")\n\n with context_managers.proxy(env.http_proxy, env.https_proxy):\n with context_managers.virtualenv(env.virtualenv_path):\n sudo(cmd, user=env.sudo_user)", "def deps(ctx):\n header(deps.__doc__)\n with ctx.cd(ROOT):\n ctx.run(\n \"pip install -r requirements/develop.pip -r requirements/doc.pip\", pty=True\n )", "def lint(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_all_linters(session)", "def update_ansible_role_requirements_file(\n filename=\"\", branchname=\"\", milestone_freeze=False\n):\n if branchname not in [\n \"master\",\n \"stable/ocata\",\n \"stable/pike\",\n \"stable/queens\",\n \"stable/rocky\",\n \"stable/stein\",\n \"stable/train\",\n \"stable/ussuri\",\n \"stable/victoria\",\n \"stable/wallaby\",\n ]:\n raise ValueError(\"Branch not recognized %s\" % branchname)\n\n openstack_roles, external_roles, all_roles = sort_roles(filename)\n\n clone_root_path = tempfile.mkdtemp()\n\n for role in all_roles:\n trackbranch = role.get(\"trackbranch\")\n if not trackbranch or trackbranch.lower() == \"none\":\n print(\n \"Skipping role %s branch\" % role[\"name\"]\n )\n continue\n\n copyreleasenotes = False\n\n shallow_since = role.get(\"shallow_since\")\n\n # We don't want to copy config_template renos even if it's an openstack\n # role, as it's not branched the same way.\n if role in openstack_roles and (not role[\"src\"].endswith(\"config_template\")):\n copyreleasenotes = True\n\n # Freeze sha by checking its trackbranch value\n # Do not freeze sha if trackbranch is None\n if trackbranch:\n try:\n role_repo = clone_role(\n role[\"src\"], trackbranch, clone_root_path, depth=\"1\"\n )\n # Unfreeze on master, not bump\n if branchname == \"master\" and not milestone_freeze:\n print(\"Unfreeze master role\")\n role[\"version\"] = trackbranch\n # Freeze or Bump\n else:\n role_head = role_repo.head()\n role[\"version\"] = role_head.decode()\n print(\"Bumped role %s to sha %s\" % (role[\"name\"], role[\"version\"]))\n\n if shallow_since:\n head_timestamp = role_repo[role_head].commit_time\n head_datetime = datetime.fromtimestamp(head_timestamp) - timedelta(days=1)\n role[\"shallow_since\"] = head_datetime.strftime('%Y-%m-%d')\n\n # Copy the release notes `Also handle the release notes\n # If frozen, no need to copy release notes.\n if copyreleasenotes:\n print(\"Copying %s's release notes\" % role[\"name\"])\n copy_role_releasenotes(role_repo.path, \"./\")\n finally:\n shutil.rmtree(role_repo.path)\n\n shutil.rmtree(clone_root_path)\n print(\"Overwriting ansible-role-requirements\")\n with open(filename, \"w\") as arryml:\n yaml = YAML() # use ruamel.yaml to keep comments that could appear\n yaml.dump(all_roles, arryml)", "def update_config(\n source,\n target,\n force=True,\n):\n\n source = Path(source)\n target = Path(target)\n\n managed_files = (\n 'requirements.in',\n 'requirements.txt',\n )\n\n if (any([osp.exists(target / f)\n for f in managed_files])\n and not force):\n\n raise OSError(\"Project config exists, not overwriting\")\n\n elif force:\n for f in managed_files:\n f = target / f\n if osp.isdir(f):\n print(f\"Cleaning {f}\")\n shutil.rmtree(f)\n elif osp.isfile(f):\n print(f\"Cleaning {f}\")\n os.remove(f)\n\n print(\"Updating .jubeo/requirements.in\")\n shutil.copyfile(\n source / \"requirements.in\",\n target / \"requirements.in\"\n )\n\n print(\"Updating .jubeo/requirements.txt\")\n shutil.copyfile(\n source / \"requirements.txt\",\n target / \"requirements.txt\"\n )", "def sub_install_python_requirements_aws():\n # Activate the virtualenv\n activate = 'source {0}/{1}/bin/activate'.format(\n env.virtualenv['dir'], env.virtualenv['name'])\n run(activate)\n\n # make sure the directory is there\n run('mkdir -p /home/ubuntu')\n\n # put the local directory '/Users/jenniferchen/github/HS698-project'\n # - it contains files or subdirectories\n # to the ubuntu server\n put('/Users/jenniferchen/github/HS698-project',\n '/home/ubuntu')\n\n # Install Python requirements\n install = 'pip install -r ' \\\n '/home/ubuntu/HS698-project/Flask_app/requirements.txt'\n\n # Join and execute the commands\n sudo(install)\n # Run the file app.py to start the Flask app\n dev_server = 'python HS698-project/Flask_app/app.py'\n run(dev_server)", "def _update_properties_file(self, lines, filename):\n found_version_line = False\n if filename.endswith('cogent-requirements.txt'):\n for lineno, line in enumerate(lines):\n if 'packages/source/c/cogent' in line:\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n http_base = lines[lineno].rsplit('/',1)[0]\n lines[lineno] = '%s/PyCogent-%s.tgz\\n' % (http_base, self.Version)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "async def pull(ctx, pip=None):\n dev = ctx.message.author\n if bot.botdev_role in dev.roles or bot.owner_role in dev.roles:\n await ctx.send(\"`Pulling changes...`\")\n call([\"git\", \"stash\", \"save\"])\n call([\"git\", \"pull\"])\n call([\"git\", \"stash\", \"clear\"])\n pip_text = \"\"\n if pip == \"-p\" or pip == \"--pip\" or pip == \"-Syu\":\n await ctx.send(\"`Updating python dependencies...`\")\n call([\"python3.6\", \"-m\", \"pip\", \"install\", \"--user\", \"--upgrade\", \"-r\",\n \"requirements.txt\"])\n pip_text = \" and updated python dependencies\"\n await ctx.send(\"Pulled changes{}! Restarting...\".format(pip_text))\n call([\"python3.6\", \"SchmuckBot.py\"])\n else:\n if \"pacman\" in ctx.message.content:\n await ctx.send(\"`{} is not in the sudoers file. This incident will be reported.`\".format(ctx.message.author.display_name))\n else:\n await ctx.send(\"Only bot devs and / or owners can use this command\")", "def up_to_date(self, gyp_file, target=None, **kw):\n raise NotImplementedError", "def update_packages(self, config_file):\n entries = yacman.load_yaml(config_file)\n self.update(entries)\n return True", "def sub_install_python_requirements():\n # Activate the virtualenv\n activate = 'source {0}/{1}/bin/activate'.format(\n env.virtualenv['dir'], env.virtualenv['name'])\n run(activate)\n\n # Install Python requirements\n install = 'pip install -r /vagrant/Flask_app/requirements.txt'\n\n # Join and execute the commands\n run(activate + '; ' + install)", "def _metadata_update_pulp():\n items = db.firmware.get_all()\n files_to_scan = []\n files_to_scan.append('firmware.xml.gz')\n files_to_scan.append('firmware.xml.gz.asc')\n for item in items:\n if item.target != 'stable':\n continue\n files_to_scan.append(item.filename)\n\n # for each file in stable plus metadata\n data = []\n download_dir = app.config['DOWNLOAD_DIR']\n for f in files_to_scan:\n fn = os.path.join(download_dir, f)\n if not os.path.exists(fn):\n continue\n\n # filename,sha256,size\n sha256 = _hashfile(open(fn, 'rb'), hashlib.sha256())\n fn_sz = os.path.getsize(fn)\n data.append('%s,%s,%i\\n' % (f, sha256, fn_sz))\n\n # write file\n filename = os.path.join(download_dir, 'PULP_MANIFEST')\n f = open(filename, 'w')\n f.writelines(data)\n f.close()\n\n # upload to CDN\n blob = open(filename, 'rb').read()\n _upload_to_cdn(filename, blob)\n return", "def update(filepath, github_account, discovery_documents):\n repo = _git.clone_from_github(\n _REPO_PATH, join(filepath, _REPO_NAME), github_account=github_account)\n venv_filepath = join(repo.filepath, 'venv')\n check_output(['virtualenv', venv_filepath, '-p', 'python2.7'])\n # The PHP client library generator is published in the\n # \"google-apis-client-generator\" package.\n check_output([join(venv_filepath, 'bin/pip'),\n 'install',\n 'google-apis-client-generator==1.7.0'])\n added, updated = _generate_and_commit_all_clients(\n repo, venv_filepath, discovery_documents)\n commit_count = len(added) + len(updated)\n if commit_count == 0:\n return\n _run_tests(repo)\n repo.soft_reset('HEAD~{}'.format(commit_count))\n commitmsg = _commit_message.build(added, None, updated)\n repo.commit(commitmsg, github_account.name, github_account.email)\n repo.push()", "def update(filepath, github_account):\n repo = _git.clone_from_github(\n _REPO_PATH, join(filepath, _REPO_NAME), github_account=github_account)\n _install_dependencies(repo)\n added, deleted, updated = _generate_all_clients(repo)\n if not any([added, deleted, updated]):\n return\n _run_tests(repo)\n commitmsg = _commit_message.build(added, deleted, updated)\n repo.add(['api_names_out.yaml', 'generated'])\n repo.commit(commitmsg, github_account.name, github_account.email)\n repo.push()", "def update_os_packages(self):\n self.summarize_operation(\"Updating OS Packages\")\n print subprocess.call(shlex.split(\"sudo apt-get update -y\"))", "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()", "def _freeze(requirements, python):\n output = []\n try:\n version_out = subprocess.check_output(\n [python, \"--version\"], stderr=subprocess.STDOUT)\n output.append(version_out)\n version_all = version_out.decode('utf-8').split()[1]\n version = '.'.join(version_all.split('.')[:2])\n with fixtures.TempDir() as temp:\n output.append(subprocess.check_output(\n [python, '-m', 'venv', temp.path]))\n pip_bin = os.path.join(temp.path, 'bin', 'pip')\n output.append(subprocess.check_output(\n [pip_bin, 'install', '-U', 'pip', 'setuptools', 'wheel']))\n output.append(subprocess.check_output(\n [pip_bin, 'install', '-r', requirements]))\n freeze = subprocess.check_output(\n [pip_bin, 'freeze'])\n output.append(freeze)\n return (version, _parse_freeze(freeze.decode('utf-8')))\n except Exception as exc:\n if isinstance(exc, subprocess.CalledProcessError):\n output.append(exc.output)\n raise Exception(\n \"Failed to generate freeze: %s %s\"\n % (b'\\n'.join(output).decode('utf-8'), exc))", "def update_readme():\n\n temp = \"\"\"<head>\n <title>Unittest Results</title>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css\" integrity=\"sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7\" crossorigin=\"anonymous\">\n</head>\"\"\"\n\n with open(\"README_proxy.md\", \"r\") as old_readme_file:\n old_readme_txt = old_readme_file.read()\n\n with open(\"reports/test_result.html\", \"r\") as html_file:\n html = html_file.read().splitlines()[0:-21]\n html = \"\\n\".join(html).replace(temp, \"\")\n\n with open(\"README.md\", \"w\") as new_readme_file:\n new_readme_file.write(old_readme_txt + \"\\n\\n\\n\" + html + \"</body></html>\")" ]
[ "0.7084875", "0.70844316", "0.69630224", "0.69021636", "0.6898123", "0.6449469", "0.6324992", "0.62681717", "0.6202624", "0.6088608", "0.6079661", "0.6035315", "0.6010844", "0.6007739", "0.5954219", "0.5889616", "0.58804846", "0.58313686", "0.58312327", "0.5784639", "0.5762652", "0.57445145", "0.5733484", "0.57174057", "0.5714839", "0.5712307", "0.57108456", "0.5705552", "0.5701553", "0.56840646", "0.5676569", "0.56477475", "0.56360936", "0.5600089", "0.5592228", "0.5583702", "0.557452", "0.5558097", "0.55560654", "0.55388534", "0.5502892", "0.5485736", "0.5483998", "0.54786086", "0.5445733", "0.5434736", "0.5433873", "0.54292834", "0.5429065", "0.5414648", "0.53900987", "0.5381863", "0.5367683", "0.5351757", "0.5350366", "0.53464335", "0.5335741", "0.5333382", "0.5329442", "0.5329442", "0.5290385", "0.5287519", "0.5273974", "0.52692515", "0.5258439", "0.5255288", "0.52482224", "0.52324176", "0.5227999", "0.5227996", "0.5207782", "0.5201119", "0.51920986", "0.5179852", "0.5164468", "0.5147454", "0.5143614", "0.513734", "0.5123799", "0.5121262", "0.51127386", "0.5108106", "0.5107676", "0.510319", "0.5090679", "0.5083027", "0.50817853", "0.5077806", "0.5076981", "0.5072885", "0.5057207", "0.5053475", "0.5048093", "0.5044631", "0.5043591", "0.5042937", "0.5030585", "0.4994573", "0.4987151", "0.49823278" ]
0.68897945
5
Calc 2d spline course with interpolation
def calc_2d_spline_interpolation(x, y, num=100): sp = Spline2D(x, y) s = np.linspace(0, sp.s[-1], num+1)[:-1] r_x, r_y, r_yaw, r_k = [], [], [], [] for i_s in s: ix, iy = sp.calc_position(i_s) r_x.append(ix) r_y.append(iy) r_yaw.append(sp.calc_yaw(i_s)) r_k.append(sp.calc_curvature(i_s)) travel = np.cumsum([np.hypot(dx, dy) for dx, dy in zip(np.diff(r_x), np.diff(r_y))]).tolist() travel = np.concatenate([[0.0], travel]) return r_x, r_y, r_yaw, r_k, travel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolateSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [6*x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [6*x2, 2, 0, 0]\n \n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def interpolatePeriodicSpline(x, y) :\n n = len(x)\n\n dim = 4 * (n - 1)\n b = np.zeros((dim, 1))\n A = np.zeros((dim, dim))\n\n for i in range(n-1):\n x1 = x[i]\n x2 = x[i+1]\n y1 = y[i]\n y2 = y[i+1]\n b[i*4:(i+1)*4, 0] = [y1, y2, 0, 0]\n\n A[i*4, i*4:(i+1)*4] = [pow(x1,3), pow(x1,2), x1, 1] \n A[i*4+1, i*4:(i+1)*4] = [pow(x2,3), pow(x2,2), x2, 1]\n if (i != n-2):\n A[i*4+2, i*4:(i+2)*4] = [3*pow(x2,2), 2 * x2, 1, 0, -3*pow(x2,2), -2 * x2, -1, 0, ]\n A[i*4+3, i*4:(i+2)*4] = [6*x2, 2, 0, 0, -6*x2, -2, 0, 0]\n else: \n A[i*4+2, 0:4] = [3 * pow(x[0],2), 2 * x[0], 1, 0]\n A[i*4+2, i*4:(i+1)*4] = [-3 * pow(x2,2), -2 * x2, -1, 0]\n A[i*4+3, 0:4] = [6 * x[0], 2, 0, 0]\n A[i*4+3, i*4:(i+1)*4] = [-6 * x2, -2, 0, 0]\n # solve linear system for the coefficients of the spline\n coeffs = np.linalg.solve(A, b)\n\n # extract local pieces\n spline = []\n for k in range(n-1):\n spline.append(np.poly1d(coeffs[k*4:(k+1)*4, 0]))\n\n return spline", "def d2_spline(self, x):\n return (-1 + 3 * x ** 2 - (5 * x ** 3) / 3) * (self.unit_step(x) - self.unit_step(x - 1)) + \\\n (-7 / 2 + (15 * x) / 2 - (9 * x ** 2) / 2 + (5 * x ** 3) / 6) * (\n self.unit_step(x - 1) - self.unit_step(x - 2)) + \\\n (9 / 2 - (9 * x) / 2 + (3 * x ** 2) / 2 - x ** 3 / 6) * (\n self.unit_step(x - 2) - self.unit_step(x - 3))", "def spline_interp(h,yy,yy_diff2,x) :\n assert type(yy)==numpy.ndarray\n #print(__name__, type(h))\n assert type(h)!=numpy.ndarray\n \n n=yy.shape[0]\n nlo=max(int(x/h),0)\n if nlo>n-1: return(0.0)\n nhi=min(nlo+1,n-1)\n a=nhi-x/h # This is checked... different to Fortran version due to 0-based arrays\n b=1.0-a\n y=a*yy[nlo]+b*yy[nhi]+((a**3-a)*yy_diff2[nlo]+(b**3-b)*yy_diff2[nhi])*(h**2)/6.0\n return y", "def cspline_params(self):\n b = np.zeros(self.n)\n c = np.zeros(self.n-1)\n d = np.zeros(self.n-1)\n B = np.zeros(self.n)\n Q = np.ones(self.n-1)\n D = 2 * np.ones(self.n)\n dx = np.zeros(self.n-1)\n p = np.zeros(self.n-1)\n\n # Calculate x-interval and slope\n for j in range(self.n-1):\n dx[j] = self.x[j+1] - self.x[j]\n p[j] = (self.y[j+1] - self.y[j]) / dx[j]\n\n # Fill B\n B[0] = 3 * p[0]\n for i in range(self.n-2):\n B[i+1] = 3 * (p[i] + p[i+1] * dx[i] / dx[i+1])\n B[-1] = 3 * p[-2]\n \n # Fill D\n for i in range(self.n-2):\n D[i+1] = 2 * dx[i] / dx[i+1] + 2\n\n # Fill Q\n for i in range(self.n-2):\n Q[i+1] = dx[i] / dx[i+1]\n\n # Gauss elimination\n for i in range(1, self.n):\n D[i] = D[i] - Q[i-1] / D[i-1]\n B[i] = B[i] - B[i-1] / D[i-1]\n\n # Back-substitution\n b[-1] = B[-1] / D[-1]\n list = range(self.n-1)\n for i in list[::-1]:\n b[i] = (B[i] - Q[i] * b[i+1]) / D[i]\n\n # Calculate c and d\n for i in range(self.n-1):\n c[i] = (3 * p[i] - 2 * b[i] - b[i+1]) / dx[i]\n d[i] = (b[i] + b[i+1] - 2 * p[i]) / dx[i]\n c[-1] = -3 * d[-1] * dx[-1]\n\n return b, c, d", "def cubicSpline(x,y,x_int):\n\n #region \"learn\" the coefficients of the cubic polynomials that interpolate intervals in x.\n # amount of intervals/splines\n n = len(x)-1\n\n # a_i = y_i\n a = y[:-1]\n\n # h_i = x_{i+1} - x_i for i in 0..n-1\n h = x[1:]-x[:-1]\n\n # 2 * h_i + h_{i+1}\n diagA = 2*(h[1:]+h[:-1])\n \n # h_1..h_n-2\n hInA = h[1:-1]\n\n A = np.eye(n-1)*diagA\n # distribute h_1..h_n-2 above and underneath the diagonal\n A += np.diag(hInA,1)\n A += np.diag(hInA,-1)\n\n # construct RHS\n z = 3/h[1:] * (y[2:] - y[1:-1]) - 3/h[:-1] * (y[1:-1] - y[:-2])\n\n # c_0 = c_{n} = 0\n c = np.zeros(n+1)\n\n c[1:-1] = np.linalg.solve(A,z)\n \n b = (y[1:]-y[:-1])/h - h/3*(c[1:] + 2*c[:-1])\n\n d = 1/(3*h)*(c[1:]-c[:-1])\n #endregion\n\n #region interpolate all points in x_int\n y_int = x_int.copy()\n # for all intervals\n for i in range(len(x)-1):\n # find points to interpolate within given interval\n idx = np.where(np.logical_and(x[i]<= x_int,x_int < x[i+1]))[0]\n xx = x_int[idx]\n yy = np.polyval(np.array([d[i],c[i],b[i],a[i]]), xx-x[i])\n y_int[idx] = yy\n print(f'interpolating in interval [{x[i]},{x[i+1]}[')\n print(xx)\n print(yy)\n print('\\n')\n\n # edgecase where x_int contains exactly last interval border\n #find indicies if x_int contains dupes\n idx = np.where(x_int == x[len(x)-1])[0] \n # interpolate with last interval polynomial\n i = len(a)-1\n y_int[idx] = np.polyval(np.array([d[i],c[i],b[i],a[i]]), x_int[idx]-x[i])\n #endregion\n return y_int", "def get_spline(points):\n import numpy\n import scipy.linalg\n\n # sort points by x value\n points = sorted(points, key=lambda point: point[\"x\"])\n\n n = len(points) - 1\n\n # Set up a system of equations of form Ax=b\n A = numpy.zeros(shape=(4*n, 4*n))\n b = numpy.zeros(shape=(4*n, 1))\n\n for i in range(0, n):\n # 2n equations from condtions (S2)\n A[i][4*i+0] = points[i][\"x\"]**3\n A[i][4*i+1] = points[i][\"x\"]**2\n A[i][4*i+2] = points[i][\"x\"]\n A[i][4*i+3] = 1\n b[i] = points[i][\"y\"]\n\n A[n+i][4*i+0] = points[i+1][\"x\"]**3\n A[n+i][4*i+1] = points[i+1][\"x\"]**2\n A[n+i][4*i+2] = points[i+1][\"x\"]\n A[n+i][4*i+3] = 1\n b[n+i] = points[i+1][\"y\"]\n\n # 2n-2 equations for (S3):\n if i == 0:\n continue\n # point i is an inner point\n A[2*n+(i-1)][4*(i-1)+0] = 3*points[i][\"x\"]**2\n A[2*n+(i-1)][4*(i-1)+1] = 2*points[i][\"x\"]\n A[2*n+(i-1)][4*(i-1)+2] = 1\n A[2*n+(i-1)][4*(i-1)+0+4] = -3*points[i][\"x\"]**2\n A[2*n+(i-1)][4*(i-1)+1+4] = -2*points[i][\"x\"]\n A[2*n+(i-1)][4*(i-1)+2+4] = -1\n b[2*n+(i-1)] = 0\n\n A[3*n+(i-1)][4*(i-1)+0] = 6*points[i][\"x\"]\n A[3*n+(i-1)][4*(i-1)+1] = 2\n A[3*n+(i-1)][4*(i-1)+0+4] = -6*points[i][\"x\"]\n A[3*n+(i-1)][4*(i-1)+1+4] = -2\n b[3*n+(i-1)] = 0\n # Natural spline:\n A[3*n-1+0][0+0] += 6*points[0][\"x\"]\n A[3*n-1+0][0+1] += 2\n b[3*n-1+0] += 0\n\n A[3*n+n-1][4*(n-1)+0] += 6*points[n][\"x\"]\n A[3*n+n-1][4*(n-1)+1] += 2\n b[3*n+n-1] += 0\n\n x = scipy.linalg.solve(A, b)\n spline = []\n for i in range(0, n):\n spline.append({\"u\": points[i][\"x\"], \"v\": points[i+1][\"x\"],\n \"a\": float(x[4*i+0]),\n \"b\": float(x[4*i+1]),\n \"c\": float(x[4*i+2]),\n \"d\": float(x[4*i+3])})\n return spline", "def y(df,x):\r\n x_p=np.array(df['Vertices'])\r\n y_p=np.array(df['DIxPRE 252'])\r\n cs = scipy.interpolate.splrep(x_p,y_p)\r\n return scipy.interpolate.splev(x,cs)", "def b_spline(x,y,deg,N):\n tck,u = interpolate.splprep([x,y],k=deg,s=0)\n X = np.linspace(1.0,0.0,N)\n spline = interpolate.splev(X,tck)\n return spline[0],spline[1]", "def spline(self):\n self.rho = np.linspace(0,1,self.nrho)\n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n try:\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n except:\n self.vt = np.zeros(self.nrho, dtype=float)\n self.zeff = self._spline(self.rho_in, self.zeff_in, self.rho)\n\n self._extrapolate()", "def spline_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tD = d_coefficients(x1,x2,x3,C)\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,C,D)\n\tA = a_coefficients(y1,y2)\n\treturn(A,B,C[:2],D)", "def fitspline2d(x,y,z,nx,ny,order=3,s=1000):\n print()\n print(\"fitting 2d spline. go refresh your coffee...\\n\")\n xnew_edges, ynew_edges = np.mgrid[0:nx+1:complex(nx+1), 0:ny+1:complex(ny+1)]\n xnew = xnew_edges[:-1, :-1] + np.diff(xnew_edges[:2, 0])[0] / 2.\n ynew = ynew_edges[:-1, :-1] + np.diff(ynew_edges[0, :2])[0] / 2.\n tck = interpolate.bisplrep(x, y, z, s=s,kx=order,ky=order)\n znew = interpolate.bisplev(xnew[:,0], ynew[0,:], tck)\n zim = np.transpose(znew)\n # testing to see if transpose is not necessary\n #zim = znew\n print(\"returning to your regular program...\")\n return zim", "def interpolate_spline(y, N):\n l = len(y)\n x = np.linspace(0, l, l)\n spline = interpolate.InterpolatedUnivariateSpline(x,y)\n xnew = np.linspace(0, l, N*l)\n ynew = spline(xnew)\n return ynew", "def _prepare_spline_interpolator(self):\n\n # TODO Replace by scipy.ndimage.interpolation.map_coordinates\n\n from scipy.interpolate import RectBivariateSpline\n\n x = self.offset.value\n y = np.log10(self.energy.value)\n\n self._spline = RectBivariateSpline(x, y, self.eff_area.value)", "def interp_spline(x0, x, y, z):\n\n size = len(x)\n\n # find index\n index = np.asarray(x.searchsorted(x0), dtype=bool)\n np.clip(index, 1, size - 1, index)\n\n xi1, xi0 = x[index], x[index - 1]\n yi1, yi0 = y[index], y[index - 1]\n zi1, zi0 = z[index], z[index - 1]\n hi1 = xi1 - xi0\n # print(xi0, xi1, yi0, yi1, zi0, zi1)\n\n # calculate cubic\n f0 = zi0 / (6 * hi1) * (xi1 - x0) ** 3 + zi1 / (6 * hi1) * (x0 - xi0) ** 3 + (yi1 / hi1 - zi1 * hi1 / 6) * (\n x0 - xi0) + (yi0 / hi1 - zi0 * hi1 / 6) * (xi1 - x0)\n\n return f0", "def evaluate_spline_coeffs(self, i):\n width = self.bounds[i] - self.bounds[i - 1] # region size, lambda\n si = self.slopes[i]\n sim1 = self.slopes[i - 1]\n rsi = self.region_slopes[i]\n gim1 = self.gis[i - 1]\n\n a = (3.0 * (3.0 * si + sim1) - 12.0 * rsi) / (2.0 * width ** 3)\n b = (12.0 * rsi - 3.0 * (si + 3.0 * sim1)) / (2.0 * width ** 3)\n c = 2 * rsi - 0.5 * si - 0.5 * sim1\n d = gim1 - width * (0.5 * rsi - 0.125 * si - 0.375 * sim1)\n\n return a, b, c, d", "def _smooth(self):\n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n self._extrapolate()", "def interpolate_2d(x, y, z, x_new, y_new):\n fun = RectBivariateSpline(x, y, z, kx=1, ky=1) # linear interpolation\n return fun(x_new, y_new)", "def create_spline(x, y):\n\n x = np.asfarray(x)\n y = np.asfarray(y)\n\n # remove non finite values\n indexes = np.isfinite(x)\n x = x[indexes]\n y = y[indexes]\n\n # check if sorted\n if np.any(np.diff(x) < 0):\n indexes = np.argsort(x)\n x = x[indexes]\n y = y[indexes]\n\n size = len(x)\n\n xdiff = np.diff(x)\n ydiff = np.diff(y)\n\n # allocate buffer matrices\n Li = np.empty(size)\n Li_1 = np.empty(size - 1)\n z = np.empty(size)\n\n # fill diagonals Li and Li-1 and solve [L][y] = [B]\n Li[0] = np.sqrt(2 * xdiff[0])\n Li_1[0] = 0.0\n B0 = 0.0 # natural boundary\n z[0] = B0 / Li[0]\n\n for i in range(1, size - 1, 1):\n Li_1[i] = xdiff[i - 1] / Li[i - 1]\n Li[i] = np.sqrt(2 * (xdiff[i - 1] + xdiff[i]) - Li_1[i - 1] * Li_1[i - 1])\n Bi = 6 * (ydiff[i] / xdiff[i] - ydiff[i - 1] / xdiff[i - 1])\n z[i] = (Bi - Li_1[i - 1] * z[i - 1]) / Li[i]\n\n i = size - 1\n Li_1[i - 1] = xdiff[-1] / Li[i - 1]\n Li[i] = np.sqrt(2 * xdiff[-1] - Li_1[i - 1] * Li_1[i - 1])\n Bi = 0.0 # natural boundary\n z[i] = (Bi - Li_1[i - 1] * z[i - 1]) / Li[i]\n\n # solve [L.T][x] = [y]\n i = size - 1\n z[i] = z[i] / Li[i]\n for i in range(size - 2, -1, -1):\n z[i] = (z[i] - Li_1[i - 1] * z[i + 1]) / Li[i]\n\n return x, y, z", "def smooth(self):\n self.te=self._spline(self.te_in[0,:], self.te_in[1,:], self.rho)\n self.ne=self._spline(self.ne_in[0,:], self.ne_in[1,:], self.rho)\n self.ti=self._spline(self.ti_in[0,:], self.ti_in[1,:], self.rho)\n self.vt=self._spline(self.vt_in[0,:], self.vt_in[1,:], self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.ni_in[i,0,:], self.ni_in[i,1,:], self.rho)\n self._extrapolate()", "def parametric_combined_spline(x, y, k=3, resolution=100, kv=None, s=None):\n x = np.array(x)\n y = np.array(y)\n\n nt = np.linspace(0, 1, resolution)\n\n # Prepare linear combination of splines with given knot vector\n tckp,u = scipy.interpolate.splprep([x,y],k=(k or 3),t=kv,s=s)\n x2, y2 = scipy.interpolate.splev(np.linspace(0,1,400), tckp)\n\n return x2, y2", "def splineint(a, b, y, mu1, munp1):\n n = len(y) - 1\n h = (b - a)/float(n)\n c, d = ones(n - 2), 4*ones(n - 1)\n l ,u = trifactor(c, d, c)\n b1 = (6/h**2)*(y[2:(n + 1)] - 2*y[1:n] + y[:(n - 1)])\n b1[0] -= mu1\n b1[n - 2] -= munp1\n trisolve(l, u, c, b1)\n mu2ton = b1\n mu = zeros(n + 1)\n mu[0] = mu1\n mu[1:n] = mu2ton\n mu[n] = munp1\n \n C = array(zeros((n, 4))) # Apply lemma 1.3 to obtain the coefficient matrix\n C[:, 0] = y[:n]\n C[:, 1] = (y[1:(n+1)] - y[:n])/h - h*mu[:n]/3-h*mu[1:(n+1)]/6\n C[:, 2] = mu[:n]/2\n C[:, 3] = (mu[1:(n + 1)] - mu[0:n])/(6*h)\n C = matrix(C)\n return linspace(a, b - h, n), C", "def compute_spline(self, initial_state, final_state):\r\n a, b, c, s = self._initialize_spline(initial_state, final_state)\r\n final_state_pred = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n\r\n converge = self._check_converge(final_state, final_state_pred)\r\n total_iter = 0\r\n # pdb.set_trace()\r\n while (total_iter < self.max_iter) & (converge is not True): # (total_iter < self.max_iter) \r\n \r\n \r\n correction = self._compute_correction(initial_state, final_state, a, b, c, s)\r\n a = a - correction[0]\r\n b = b - correction[1]\r\n # c = c - correction[2]\r\n s = s - correction[2]\r\n \r\n final_state_pred = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n\r\n converge = self._check_converge(final_state, final_state_pred)\r\n total_iter = total_iter +1\r\n\r\n # print(total_iter)\r\n # print(final_state_pred)\r\n # print(s)\r\n\r\n # sometimes it converge to negative s (travel distance) which \r\n # is invalid..., need to figure it out...\r\n if (converge == True) & (s > 0):\r\n final_state_pred, point_list = self._path_sampling_one_shot(initial_state, a, b, c, s)\r\n else:\r\n point_list = [[-1,-1]]\r\n\r\n return point_list", "def interp_n2(t, x, y):\n\n return y[:, 0] + (t - x[0]) * (y[:, 1] - y[:, 0]) / (x[1] - x[0])", "def interp_2d(_x, _y, _x_min, _x_step, _nx, _y_min, _y_step, _ny, _ar_f, _ord=3, _ix_per=1, _ix_ofst=0):\r\n if(_ord == 1): #bi-linear interpolation based on 4 points\r\n ix0 = int(trunc((_x - _x_min)/_x_step + 1.e-09))\r\n if(ix0 < 0):\r\n ix0 = 0\r\n elif(ix0 >= _nx - 1):\r\n ix0 = _nx - 2\r\n ix1 = ix0 + 1\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n \r\n iy0 = int(trunc((_y - _y_min)/_y_step + 1.e-09))\r\n if(iy0 < 0):\r\n iy0 = 0\r\n elif(iy0 >= _ny - 1):\r\n iy0 = _ny - 2\r\n iy1 = iy0 + 1\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = f10 - a00\r\n a01 = f01 - a00\r\n a11 = a00 - f01 - f10 + f11\r\n return a00 + tx*(a10 + ty*a11) + ty*a01\r\n\r\n elif(_ord == 2): #bi-quadratic interpolation based on 6 points\r\n ix0 = int(round((_x - _x_min)/_x_step))\r\n if(ix0 < 1):\r\n ix0 = 1\r\n elif(ix0 >= _nx - 1):\r\n ix0 = _nx - 2\r\n ixm1 = ix0 - 1\r\n ix1 = ix0 + 1\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n\r\n iy0 = int(round((_y - _y_min)/_y_step))\r\n if(iy0 < 1):\r\n iy0 = 1\r\n elif(iy0 >= _ny - 1):\r\n iy0 = _ny - 2\r\n iym1 = iy0 - 1\r\n iy1 = iy0 + 1\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iym1_nx_ix_per = iym1*nx_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n ixm1_ix_per_p_ix_ofst = ixm1*_ix_per + _ix_ofst\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n fm10 = _ar_f[iy0_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f0m1 = _ar_f[iym1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = 0.5*(f10 - fm10)\r\n a01 = 0.5*(f01 - f0m1)\r\n a11 = a00 - f01 - f10 + f11\r\n a20 = 0.5*(f10 + fm10) - a00\r\n a02 = 0.5*(f01 + f0m1) - a00\r\n return a00 + tx*(a10 + tx*a20 + ty*a11) + ty*(a01 + ty*a02)\r\n \r\n elif(_ord == 3): #bi-cubic interpolation based on 12 points\r\n ix0 = int(trunc((_x - _x_min)/_x_step + 1.e-09))\r\n if(ix0 < 1):\r\n ix0 = 1\r\n elif(ix0 >= _nx - 2):\r\n ix0 = _nx - 3\r\n ixm1 = ix0 - 1\r\n ix1 = ix0 + 1\r\n ix2 = ix0 + 2\r\n tx = (_x - (_x_min + _x_step*ix0))/_x_step\r\n\r\n iy0 = int(trunc((_y - _y_min)/_y_step + 1.e-09))\r\n if(iy0 < 1):\r\n iy0 = 1\r\n elif(iy0 >= _ny - 2):\r\n iy0 = _ny - 3\r\n iym1 = iy0 - 1\r\n iy1 = iy0 + 1\r\n iy2 = iy0 + 2\r\n ty = (_y - (_y_min + _y_step*iy0))/_y_step\r\n\r\n nx_ix_per = _nx*_ix_per\r\n iym1_nx_ix_per = iym1*nx_ix_per\r\n iy0_nx_ix_per = iy0*nx_ix_per\r\n iy1_nx_ix_per = iy1*nx_ix_per\r\n iy2_nx_ix_per = iy2*nx_ix_per\r\n ixm1_ix_per_p_ix_ofst = ixm1*_ix_per + _ix_ofst\r\n ix0_ix_per_p_ix_ofst = ix0*_ix_per + _ix_ofst\r\n ix1_ix_per_p_ix_ofst = ix1*_ix_per + _ix_ofst\r\n ix2_ix_per_p_ix_ofst = ix2*_ix_per + _ix_ofst\r\n f0m1 = _ar_f[iym1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f1m1 = _ar_f[iym1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n fm10 = _ar_f[iy0_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n a00 = _ar_f[iy0_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f10 = _ar_f[iy0_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f20 = _ar_f[iy0_nx_ix_per + ix2_ix_per_p_ix_ofst]\r\n fm11 = _ar_f[iy1_nx_ix_per + ixm1_ix_per_p_ix_ofst]\r\n f01 = _ar_f[iy1_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f11 = _ar_f[iy1_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n f21 = _ar_f[iy1_nx_ix_per + ix2_ix_per_p_ix_ofst]\r\n f02 = _ar_f[iy2_nx_ix_per + ix0_ix_per_p_ix_ofst]\r\n f12 = _ar_f[iy2_nx_ix_per + ix1_ix_per_p_ix_ofst]\r\n a10 = -0.5*a00 + f10 - f20/6 - fm10/3\r\n a01 = -0.5*a00 + f01 - f02/6 - f0m1/3\r\n a11 = -0.5*(f01 + f10) + (f02 - f12 + f20 - f21)/6 + (f0m1 - f1m1 + fm10 - fm11)/3 + f11\r\n a20 = -a00 + 0.5*(f10 + fm10)\r\n a02 = -a00 + 0.5*(f01 + f0m1)\r\n a21 = a00 - f01 + 0.5*(f11 - f10 - fm10 + fm11)\r\n a12 = a00 - f10 + 0.5*(f11 - f01 - f0m1 + f1m1)\r\n a30 = 0.5*(a00 - f10) + (f20 - fm10)/6\r\n a03 = 0.5*(a00 - f01) + (f02 - f0m1)/6\r\n a31 = 0.5*(f01 + f10 - f11 - a00) + (f21 + fm10 - f20 - fm11)/6\r\n a13 = 0.5*(f10 - f11 - a00 + f01) + (f0m1 + f12 - f02 - f1m1)/6\r\n return a00 + tx*(a10 + tx*(a20 + tx*(a30 + ty*a31) + ty*a21) + ty*a11) + ty*(a01 + ty*(a02 + ty*(a03 + tx*a13) + tx*a12))\r\n return 0", "def splineval(x,C,X):\n m = len(X)\n i = findsubintervals(x,X) \n G = zeros(m)\n for j in range(m):\n k = i[j]\n t = X[j] - x[k]\n G[j]=C[k,:]* t**array([[0],[1],[2],[3]])\n return G", "def _computeSpline(self, p0, p1, p2, p3):\n t = 0.0\n while t <= 1:\n point = CatmullRomSpline.computePoint(p0, p1, p2, p3, t)\n self.points.append(point)\n t += 0.1", "def spline_linear(x, f, x_k, x_ki):\n A = (x_ki - x) / (x_ki - x_k)\n B = (x - x_k) / (x_ki - x_k)\n \n return A*f(x_k) + B*f(x_ki)", "def smooth(self):\n \n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n\n #self.zeff = self._spline(self.rho_in, self.zeff_in, self.rho)\n\n self._extrapolate()", "def bspline_curve2d():\n # Create a curve instance\n curve = BSpline.Curve()\n\n # Set curve degree\n curve.degree = 3\n\n # Set control points\n curve.ctrlpts = [[5.0, 5.0], [10.0, 10.0], [20.0, 15.0], [35.0, 15.0], [45.0, 10.0], [50.0, 5.0]]\n\n # Set knot vector\n curve.knotvector = [0.0, 0.0, 0.0, 0.0, 0.33, 0.66, 1.0, 1.0, 1.0, 1.0]\n\n # Set sample size\n curve.sample_size = SAMPLE_SIZE\n\n # Return the instance\n return curve", "def drawLine2P(x,y,xlims):\n \n xrange = np.arange(xlims[0],xlims[1],1)\n A = np.vstack([x, np.ones(len(x))]).T\n k, b = np.linalg.lstsq(A, y, rcond=None)[0]\n return [xrange, k*xrange + b]", "def interpol(self,x,y,x1):\n \n N = len(x)\n i = np.minimum(np.maximum(np.searchsorted(x,x1,side='right'),1),N-1)\n xl = x[i-1]\n xr = x[i]\n yl = y[i-1]\n yr = y[i]\n y1 = yl + (yr-yl)/(xr-xl) * (x1-xl)\n above = x1 > x[-1]\n below = x1 < x[0]\n y1 = np.where(above,y[-1] + (x1 - x[-1]) * (y[-1]-y[-2])/(x[-1]-x[-2]), y1)\n y1 = np.where(below,y[0],y1)\n \n return y1, i", "def smooth(self):\n \n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n self._extrapolate()", "def d1_spline(self, x):\n return (-x + x ** 3 - (5 * x ** 4) / 12) * (self.unit_step(x) - self.unit_step(x - 1)) + \\\n (5 / 8 - (7 * x) / 2 + (15 * x ** 2) / 4 - (3 * x ** 3) / 2 + (5 * x ** 4) / 24) * (\n self.unit_step(x - 1) - self.unit_step(x - 2)) + \\\n (-27 / 8 + (9 * x) / 2 - (9 * x ** 2) / 4 + x ** 3 / 2 - x ** 4 / 24) * (\n self.unit_step(x - 2) - self.unit_step(x - 3))", "def get2DSpline( self ):\n return (self._splines[self._layout.dims_order[-2]],\n self._splines[self._layout.dims_order[-1]])", "def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint", "def method_2d(knots,y_n,num):\n cv_iter = 10 # number of iteration for cross-validation \n GSV = np.zeros((cv_iter,cv_iter))\n# tr = np.zeros((cv_iter,cv_iter))\n# fun =np.zeros((cv_iter,cv_iter))\n lam_x = np.linspace(0,0.2,cv_iter)\n lam_y = np.linspace(0,0.2,cv_iter)\n num_knots = len(knots)\n linear_knots = knots[1:num_knots-1]\n num_knots = num_knots-4\n znam = np.zeros((num_knots))\n basis = np.zeros((num,num_knots))\n basis_1 = np.zeros((num,num_knots))\n basis_deriative = np.zeros((num,num_knots))\n basis_deriative_1 = np.zeros((num,num_knots))\n S = np.zeros((num_knots,num_knots,num))\n vs = BsplineVectorSpace(2, knots)\n vs_1 = BsplineVectorSpace(1, linear_knots)\n I_i = np.eye(num_knots)\n for i in xrange(0,num_knots):\n basis[:,i] = vs.basis_der(i,0)(np.linspace(0,1,num))\n basis_deriative[:,i] = vs.basis_der(i,1)(np.linspace(0,1,num))/num\n basis_1[:,i] = vs_1.basis_der(i,0)(np.linspace(0,1,num))\n basis_deriative_1[:,i] = vs_1.basis_der(i,1)(np.linspace(0,1,num))/num\n B = abs(basis_deriative-basis_1)\n S = np.zeros((num_knots,num_knots,num))\n k = np.zeros((num_knots,num_knots,num))\n for i in xrange(num_knots):\n for j in xrange(num_knots):\n S[i,j,:] = B[:,i]*B[:,j]\n k[i,j,:] =basis_deriative_1[:,i] * basis_deriative_1[:,j]\n S_int = np.zeros((num_knots,num_knots))\n k_int = np.zeros((num_knots,num_knots))\n for i in xrange(num_knots):\n for j in xrange(num_knots):\n S_int[i,j] = integrate.trapz(S[i,j,:])\n k_int[i,j] = integrate.trapz(k[i,j,:])\n basis_product = np.kron(basis,basis)\n S_x = np.kron(S_int,I_i)\n S_y = np.kron(I_i,S_int)\n K_x = np.kron(k_int,I_i)\n K_y = np.kron(I_i,k_int)\n for i in xrange(cv_iter):\n for j in xrange(cv_iter):\n influence_matrix = np.dot(np.dot(basis_product,(np.linalg.inv(np.dot(np.transpose(\n basis_product),basis_product)+lam_x[i]*S_x+lam_y[j]*S_y+lam_x[i]* K_x+lam_y[j]*K_y))),np.transpose(basis_product))\n for k in xrange(num_knots):\n znam[k] =(1-influence_matrix[k,k])**2\n tr = np.sum(znam)\n fun = np.sum((y_n-np.dot(influence_matrix,y_n))**2)\n GSV[i,j] =fun/(num*tr)\n print i,j\n a,b = np.unravel_index(GSV.argmin(), GSV.shape)\n# a = np.argmin(np.argmin(GSV,axis = 0))\n# b = np.argmin(np.argmin(GSV,axis = 1))\n lamb_x = lam_x[a]\n lamb_y = lam_y[b]\n print lamb_x,lamb_y\n model_fit = np.dot(np.dot(np.dot(basis_product,(np.linalg.inv(np.dot(np.transpose(\n basis_product),basis_product)+lamb_x*S_x+lamb_y*S_y+lamb_x* K_x+lamb_y*K_y))),np.transpose(basis_product)),y_n)\n return model_fit,GSV", "def spline(*args) -> core.Spline:\n X, Y, kws = util.parseargs(*args)\n return core.Spline(X, Y)", "def compute_interpolator(self, **kwargs):\n from scipy.interpolate import RectBivariateSpline\n\n if 'degree' in kwargs:\n degree = kwargs['degree']\n if hasattr(degree, '__iter__') and len(degree) == 2:\n degx = int(degree[0])\n degy = int(degree[1])\n else:\n degx = int(degree)\n degy = int(degree)\n if degx < 0 or degy < 0:\n raise ValueError(\"Interpolator degree must be a non-negative \"\n \"integer\")\n else:\n degx = 3\n degy = 3\n\n smoothness = kwargs.get('s', 0)\n\n x = np.arange(self._nx, dtype=float)\n y = np.arange(self._ny, dtype=float)\n self.interpolator = RectBivariateSpline(\n x, y, self._data.T, kx=degx, ky=degy, s=smoothness\n )\n\n self._store_interpolator_kwargs(**kwargs)", "def compute_interpolator(self, **kwargs):\n from scipy.interpolate import RectBivariateSpline\n\n if 'degree' in kwargs:\n degree = kwargs['degree']\n if hasattr(degree, '__iter__') and len(degree) == 2:\n degx = int(degree[0])\n degy = int(degree[1])\n else:\n degx = int(degree)\n degy = int(degree)\n if degx < 0 or degy < 0:\n raise ValueError(\"Interpolator degree must be a non-negative \"\n \"integer\")\n else:\n degx = 3\n degy = 3\n\n smoothness = kwargs.get('s', 0)\n\n # Interpolator must be set to interpolate on the undersampled\n # pixel grid, going from 0 to len(undersampled_grid)\n x = np.arange(self._nx, dtype=float) / self.oversampling[1]\n y = np.arange(self._ny, dtype=float) / self.oversampling[0]\n self.interpolator = RectBivariateSpline(\n x, y, self._data.T, kx=degx, ky=degy, s=smoothness)\n\n self._store_interpolator_kwargs(**kwargs)", "def spline_derivative(x_vector, y_vector):\n tck = interpolate.splrep(x_vector, y_vector, k=3)\n deriv_y = np.array(interpolate.spalde(x_vector, tck))\n deriv_y = deriv_y[:, 1] # take the first derivative of the spline\n return deriv_y", "def interpolateCatmulRomeSpline(points):\n pointsLen = len(points)\n\n # The curve curve(C) will contain an array of (x,y) points.\n curve = []\n for i in range(pointsLen-3):\n segment = interpolateCatmulRomeSegment(points[i], points[i+1], points[i+2], points[i+3])\n curve.extend(segment)\n\n return curve", "def smooth(self):\n \n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n\n self.zeff = self._spline(self.rho_in, self.zeff_in, self.rho)\n\n self._extrapolate()", "def cr_spline(A, mu):\n mu, mu2 = mu, mu ** 2\n\n return A[0] * mu * mu2 + A[1] * mu2 + A[2] * mu + A[3]", "def interpol(x, X, Y):\n \n for idx, xx in enumerate(X):\n if x <= xx:\n break\n \n x2 = xx \n y2 = Y[idx]\n x1 = X[idx-1] \n y1 = Y[idx-1] \n y = (y2-y1)/(x2-x1)*(x-x1) + y1\n \n return y", "def Interpolate(ax, ay, x, npoints):\r\n\r\n assert(ax[1]>ax[0]) # test for ascending order, at least for first point\r\n \r\n if (verbose): \r\n print 'interpolate/extrapolate to x=',x,', npoints=',npoints\r\n\r\n # Find best data points to use, based on which are closest to \r\n # requested point x. Will find <npoints> (or fewer) best data points and \r\n # return as an array.\r\n ibest = FindBest(ax,x,npoints)\r\n npoints = len(ibest) # make sure npoints is updated in case was reduced\r\n if (verbose): \r\n print 'ibest',ibest\r\n\r\n # Build the polynomial y(x), evaluated at the point x.\r\n y = 0.0\r\n for i in range(npoints): # do i=0,npoints-1\r\n li = 1.0\r\n ni = ibest[i] # index to ith best point\r\n # build up li[x] term, evaluated at the point x\r\n for j in range(npoints): # do j=0,npoints-1\r\n if (i != j): # exclude j=i term\r\n nj = ibest[j] # index to jth best point\r\n li = li*(x-ax[nj])/(ax[ni]-ax[nj])\r\n y = y+ay[ni]*li\r\n \r\n return y", "def interpolate(x, x_arr, y_arr, type='interp', order=3, left=None,\n right=None):\n if type == 'interp':\n y = np.interp(x, x_arr, y_arr, left=left, right=right)\n if type == 'spline':\n if left is None:\n y_arr[0] = left\n if right is None:\n y_arr[-1] = right\n\n tk = scint.splrep(x_arr, y_arr, k=order)\n y = scint.splev(x, tk, der=0)\n\n return y", "def spline(self, smoothing=None):\r\n from lsst.analysis import utils\r\n return utils.fitspline(self.points, self.z, smoothing)", "def spline_base(self, x):\n return (11 / 20 - x ** 2 / 2 + x ** 4 / 4 - x ** 5 / 12) * (self.unit_step(x) - self.unit_step(x - 1)) + \\\n (17 / 40 + (5 * x) / 8 - (7 * x ** 2) / 4 + (5 * x ** 3) / 4 - (3 * x ** 4) / 8 + x ** 5 / 24) * (\n self.unit_step(x - 1) - self.unit_step(x - 2)) + \\\n (243 / 120 - (81 * x) / 24 + (9 * x ** 2) / 4 - (3 * x ** 3) / 4 + x ** 4 / 8 - x ** 5 / 120) * (\n self.unit_step(x - 2) - self.unit_step(x - 3))", "def solve_i():\r\n x = np.array([ -2.1, -1.45, -1.3, -0.2, 0.1, 0.15, 0.8, 1.1, 1.5, 2.8, 3.8 ])\r\n y = np.array([0.012155, 0.122151, 0.184520, 0.960789, 0.990050, 0.977751,\r\n 0.527292, 0.298197, 0.105399, 3.936690E-4, 5.355348E-7])\r\n # find and plot both interpolations and the oiginal points\r\n plt.figure(1)\r\n cubic_interpol(x,y)\r\n lin_interpol(x,y)\r\n plt.plot(x, y, 'rx', ms = 10, label = 'Points')\r\n # plot settings\r\n plt.title('Cubic & Linear Interpolation Given Points')\r\n plt.xlabel('x',fontsize = 14)\r\n plt.ylabel('y',fontsize = 14)\r\n plt.legend()", "def graph_smooth_from_pts():\n fig_name = 'lect2_isom'\n\n # given data\n x = np.array([0.0, 0.2, 0.4, 0.6, 0.65])\n ra = np.array([39.0, 53.0, 59.0, 38.0, 25.0])\n design_eq = np.divide(50.0, ra)\n print(\"Isom example design equation points: {}\".format(design_eq))\n\n # cubic spline\n tck = interpolate.splrep(x, design_eq, s=0)\n x_new = np.linspace(0.0, 0.7, 101)\n y_new = interpolate.splev(x_new, tck, der=0)\n # alternately, from interpolation\n cubic_interp = interpolate.interp1d(x, design_eq, kind='quadratic', fill_value=\"extrapolate\")\n make_fig(fig_name, x, design_eq, ls1='o', x2_array=x_new, y2_array=y_new,\n x3_array=x_new, y3_array=cubic_interp(x_new),\n y1_label=\"data\", y2_label=\"quadratic\", y3_label=\"cubic\",\n x_label=r'conversion (X, unitless)', y_label=r'$\\displaystyle\\frac{F_{A0}}{-r_A} \\left(m^3\\right)$',\n x_lima=0.0, x_limb=0.7, y_lima=0.0, y_limb=2.5,\n )", "def different_quadratic_extrpolation_upper(x_interp, x_spline, y_spline):\n\n index_upper_1 = len(x_spline) - 2\n index_upper_2 = len(x_spline) - 1\n x1_upper = x_spline[index_upper_1]\n x2_upper = x_spline[index_upper_2]\n f2_upper = y_spline[index_upper_2]\n\n df1_dx_upper = calc_gradient(x_spline, y_spline, index_upper_1)/(x2_upper - x1_upper)\n df2_dx_upper = calc_gradient(x_spline, y_spline, index_upper_2)/(x2_upper - x1_upper)\n\n # Solve 2ax-b = df_dx for the gradient at point 1 and 2\n # Rearrange both equations to find 'a' and 'b' quadratic coefficients\n a_upper = (df2_dx_upper - df1_dx_upper)/(2.*(x2_upper - x1_upper))\n b_upper = df1_dx_upper - 2.*a_upper*x1_upper\n\n # Find c by solving at the fixed points (f = a x**2 + bx + c) at point 1 for the lower, and point 2 for the upper\n c_upper = f2_upper - a_upper*x2_upper**2 - b_upper*x2_upper\n return a_upper*x_interp**2 + b_upper*x_interp + c_upper", "def spline_liniara(X, Y, pointx):\n n = len(X)\n\n for j in range(0, n - 1):\n if X[j] <= pointx <= X[j + 1]:\n a = Y[j]\n b = (Y[j + 1] - Y[j]) / (X[j + 1] - X[j])\n return a + b * (pointx - X[j])", "def testSplineCurveIsC1Smooth(self):\n x1 = jnp.linspace(0., 8., 10000)\n x2 = x1 + 1e-7\n\n fn = self.variant(distribution.partition_spline_curve)\n y1 = fn(x1)\n y2 = fn(x2)\n grad = jax.grad(lambda z: jnp.sum(fn(z)))\n dy1 = grad(x1)\n dy2 = grad(x2)\n\n chex.assert_tree_all_close(y1, y2, atol=1e-5, rtol=1e-5)\n chex.assert_tree_all_close(dy1, dy2, atol=1e-5, rtol=1e-5)", "def myinterp2d(x, y, z, xnew, ynew, method='linear'):\n x = np.ravel(x)\n y = np.ravel(y)\n z = np.ravel(z)\n znew = griddata((x, y), z, (xnew, ynew), method=method, fill_value=0.)\n return znew", "def interpolation(x, y, x_new, model=\"InterpolatedUnivariateSpline\", plot=False, title=\"\"):\n\n num_col = y.shape[1]\n\n if isinstance(x_new, float):\n num_row = 1\n else:\n num_row = len(x_new)\n\n y_new = np.zeros((num_row, num_col))\n y_new_dot = np.zeros((num_row, num_col))\n\n if model == \"InterpolatedUnivariateSpline\":\n\n for idx in range(0, num_col):\n\n # TODO: InterpolatedUnivariateSpline seems to have problems with multidimensional arrays\n interpolate_ = interpolate.InterpolatedUnivariateSpline(x, y[:, idx], k=3)\n y_new[:, idx] = interpolate_(x_new)\n # y_new_dot[:,idx] = spline.derivative()(x_new) #TODO: Does this work too?\n y_new_dot[:, idx] = _determine_time_derivate(interpolate_, x_new, 1)\n\n if plot == True:\n _plot_interpolation(x, y, x_new, y_new, title=title)\n\n elif model == \"interp1d\":\n\n # TODO: Extrapolation has to be handled (e.g. with fill_value=(0.0,0.0) argument)\n interpolate_ = interpolate.interp1d(x, y, kind=\"cubic\", axis=0, bounds_error=False)\n y_new = interpolate_(x_new)\n y_new_dot = _determine_time_derivate(interpolate_, x_new, num_col)\n\n if plot == True:\n _plot_interpolation(x, y, x_new, y_new, title=title)\n\n elif model == \"lagrange\":\n\n for idx in range(0, num_col):\n\n # Rescaling of data necessary, because Lagrange interpolation is numerical unstable\n xm = np.mean(x)\n ym = np.mean(y[:, idx])\n xs = np.std(x)\n ys = np.std(y[:, idx])\n xscaled = (x - xm) / xs\n yscaled = (y[:, idx] - ym) / ys\n\n # interpolate_ = interpolate.lagrange(xscaled, yscaled) # worst performance\n # interpolate_ = _lagrange2(xscaled, yscaled) # improved performance\n interpolate_ = _lagrange(xscaled, yscaled) # fastest performance\n y_new[:, idx] = interpolate_((x_new - xm) / xs) * ys + ym\n\n # Determine derivate of 'y_new'\n t_offset = 0.5\n y1 = interpolate_((x_new - t_offset - xm) / xs) * ys + ym\n y2 = interpolate_((x_new + t_offset - xm) / xs) * ys + ym\n\n y_new_dot[:, idx] = (y2 - y1) / (t_offset * 2)\n\n if plot == True:\n _plot_interpolation(x, y, x_new, y_new, title=title)\n\n elif model == \"BarycentricInterpolator\":\n\n for idx in range(0, num_col):\n\n # TODO: Is the rescaling of data necessary here?\n xm = np.mean(x)\n ym = np.mean(y[:, idx])\n xs = np.std(x)\n ys = np.std(y[:, idx])\n xscaled = (x - xm) / xs\n yscaled = (y[:, idx] - ym) / ys\n\n interpolate_ = interpolate.BarycentricInterpolator(xscaled, yscaled)\n y_new[:, idx] = interpolate_((x_new - xm) / xs) * ys + ym\n\n # Determine derivate of 'y_new'\n t_offset = 0.5\n y1 = interpolate_((x_new - t_offset - xm) / xs) * ys + ym\n y2 = interpolate_((x_new + t_offset - xm) / xs) * ys + ym\n\n y_new_dot[:, idx] = (y2 - y1) / (t_offset * 2)\n\n if plot == True:\n _plot_interpolation(x, y, x_new, y_new, title=title)\n\n # elif model == 'polyfit':\n\n # #y_new_dot = np.zeros((len(x_new), num_col))\n # degree = 2\n # poly_dot_coeff = np.zeros((degree, num_col))\n #\n\n # #TODO: Polynomial fit over the complete time period of 3 days are not convenient.\n # # Difference of up to 1000 km between original SP3 orbit data and polynomial\n # # fit.\n\n # x_ref = np.amin(x) #TODO: orb_ref_time should be part of 'orb' Dataset\n # x = (x - x_ref) * 86400\n # poly_coeff = poly.polyfit(x, y, degree)\n # y_new = poly.polyval(x_new,poly_coeff)\n\n # for idx in range(0,num_col):\n # poly_dot_coeff[:,idx] = np.polyder(poly_coeff[:,idx])\n # polynomial_dot = np.poly1d(poly_dot_coeff[:,idx])\n # y_new_dot[:,idx] = polynomial_dot((x_new - x_ref) * 86400)\n\n # if plot == True:\n # _plot_orbit_polynomial(sat, poly_coeff, x, y)\n\n return y_new, y_new_dot", "def interpolate(x0, y0, x1, y1, x):\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n\n return y", "def getSplineCurve(self, waypoints):\n num_waypoints = len(waypoints)\n\n # Store waypoint data for interpolation\n positions = np.zeros((num_waypoints + 1, 3))\n for waypoint_id in range(num_waypoints):\n cur_waypoint = waypoints[waypoint_id]\n cur_position = cur_waypoint.position\n cur_gps_pos = cur_position.gps_position\n positions[waypoint_id, 0] = cur_gps_pos.latitude\n positions[waypoint_id, 1] = cur_gps_pos.longitude\n positions[waypoint_id, 2] = cur_position.altitude_msl\n\n # Get the intra waypoint travel times\n waypoint_travel_times = self.getInterWaypointTravelTimes(waypoints)\n # Get the waypoint times\n pos_times = self.getWaypointTimes(waypoint_travel_times)\n total_travel_time = pos_times[len(pos_times)-1]\n\n # Create spline representation\n spline_k = 3 if num_waypoints >= 3 else 2 # Cubic if enough points\n spline_reps = list()\n for iter_dim in range(3):\n tck = splrep(pos_times, positions[:, iter_dim], k=spline_k, per=1)\n spline_reps.append(tck)\n\n return (total_travel_time, spline_reps)", "def spline(nodes: np.ndarray, resolution=360, degree=3) -> np.ndarray:\n data = np.vstack((nodes, nodes[0]))\n tck, u = interpolate.splprep([data[:, 0], data[:, 1]], s=0, per=True, k=degree)[:2]\n return np.array(interpolate.splev(np.linspace(0, 1, resolution), tck)).T", "def get_interpolator(x: np.array, y: np.array):\n return intp.PchipInterpolator(x, y)", "def fit_spline(x, y, **kwargs):\n xf, yf = get_finite(x,y)\n iisort = np.argsort(xf)\n return interpolate.UnivariateSpline(xf[iisort],yf[iisort], **kwargs)", "def polynomialInterpolation2D(self,graph,T):\n x=[graph[i][0] for i in range(len(graph))]\n y=[graph[i][1] for i in range(len(graph))]\n return lambda t:(self.polynomialInterpolation(x)(t),self.polynomialInterpolation(y)(t))", "def DrawSpline(*args, **kwargs):\n return _gdi_.DC_DrawSpline(*args, **kwargs)", "def compute_smoothed_traj(path, V_des, alpha, dt):\n ########## Code starts here ##########\n x_old = []\n y_old = []\n for point in path:\n x_old.append(point[0])\n y_old.append(point[1])\n\n t_old = [0] * len(path)\n for i in range(1, len(path)):\n dist = ((x_old[i] - x_old[i-1])**2 + (y_old[i] - y_old[i-1])**2)**.5\n t_old[i] = t_old[i-1] + dist / V_des\n\n t_smoothed = np.arange(0, t_old[-1], dt)\n\n # Get new splines\n spl_x_old = scipy.interpolate.splrep(t_old, x_old, s=alpha)\n spl_y_old = scipy.interpolate.splrep(t_old, y_old, s=alpha)\n\n # Evaluate splines at t_smoothed times\n spl_x_new = scipy.interpolate.splev(t_smoothed, spl_x_old)\n spl_y_new = scipy.interpolate.splev(t_smoothed, spl_y_old)\n\n # Calculate first derivatives\n spl_xd_new = scipy.interpolate.splev(t_smoothed, spl_x_old, 1)\n spl_yd_new = scipy.interpolate.splev(t_smoothed, spl_y_old, 1)\n\n # Calculate second derivatives\n spl_xdd_new = scipy.interpolate.splev(t_smoothed, spl_x_old, 2)\n spl_ydd_new = scipy.interpolate.splev(t_smoothed, spl_y_old, 2)\n\n traj_smoothed = np.zeros((len(t_smoothed), 7))\n # Create trajectory\n for t in range(len(t_smoothed)):\n if(t == 0):\n THETA = 0\n else:\n THETA = np.arctan2((spl_y_new[t] - spl_y_new[t-1]),\n (spl_x_new[t] - spl_x_new[t-1]))\n if(t == 1):\n traj_smoothed[0, 2] = THETA\n traj_smoothed[t] = np.array([spl_x_new[t], spl_y_new[t], THETA, spl_xd_new[t], spl_yd_new[t], spl_xdd_new[t], spl_ydd_new[t]])\n ########## Code ends here ##########\n\n return traj_smoothed, t_smoothed\n ########## Code ends here ##########\n\n return traj_smoothed, t_smoothed", "def interpolator(mat, distance):\n\n# calculate the place of each meassurement relative to the whole yourney of the\n# ship\n gesdistance = np.zeros(len(distance)+1)\n gesdis = distance[0]\n for i in range(1, len(distance)+1):\n gesdistance[i] = gesdistance[i-1] + distance[i-1]\n if i < len(distance):\n gesdis = gesdis + distance[i]\n\n# calculates the minimum distance for number of points of the interpolation\n mini = distance[0]\n for i in range(len(distance)):\n if distance[i] < mini:\n mini = distance[i]\n\n# interpolates linear over every depth\n newmat = np.zeros((len(mat), int(gesdis/mini)))\n\n wth = 0\n for leng in range(len(newmat)):\n newveloc = interp1d(gesdistance, mat[leng, :], kind=\"linear\")\n for wth in range(int(gesdis/mini)):\n newmat[leng, wth] = newveloc(wth*mini)\n for wdth in range(int(gesdis/mini)):\n newvelocdepth = interp1d(np.append(np.arange(0, 458, 20), 458), np.append(newmat[::20, wdth], newmat[457, wdth]), kind=\"linear\")\n for le in range(len(newmat)):\n newmat[le, wdth] = newvelocdepth(le)\n\n return np.flip(newmat), gesdis", "def qspline_params(self):\n b = np.zeros(self.n-1)\n c = np.zeros(self.n-1)\n dx = np.zeros(self.n-1)\n p = np.zeros(self.n-1)\n\n # Calculate x-interval and slope\n for j in range(self.n-1):\n dx[j] = self.x[j+1] - self.x[j]\n p[j] = (self.y[j+1] - self.y[j]) / dx[j]\n \n # Find c forward-recursively\n list = range(self.n-2)\n for i in list:\n c[i+1] = (p[i+1] - p[i] - c[i] * dx[i]) / dx[i+1]\n \n # Find c backward-recursively from 1/2c_n-1\n c[-1] = c[-1] / 2\n for i in list[::-1]:\n c[i] = (p[i+1] - p[i] - c[i+1] * dx[i+1]) / dx[i]\n\n # Find b\n for i in range(self.n-1):\n b[i] = p[i] - c[i] * dx[i]\n return b, c", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.01):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = 0.0\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def H(x, X, Y, dY):\n\n def L(i):\n #return p[i] * (x ** i)\n p = [(x - X[i]) / (X[j] - X[i]) for j in range(n) if j != i]\n return reduce(op.mul, p)\n\n def dL(i):\n #return d[i-1] * (x ** (i-1))\n if i < n-1:\n return (Y[i+1] - Y[i]) / (X[i+1] - X[i])\n else:\n return (Y[i] - Y[i-1]) / (X[i] - X[i-1])\n\n def A(i):\n return (1 - 2 * (x - X[i]) * dL(i)) * (L(i) ** 2)\n\n def B(i):\n return (x - X[i]) * (L(i) ** 2)\n\n assert(len(X) != 0 and len(X) == len(Y)), 'Quantidade de valores em X e Y diferentes'\n n = len(X)\n #p = interpolate.lagrange(X, Y)\n #d = polyder(p)\n h1 = sum(A(i) * Y[i] for i in range(n))\n h2 = sum(B(i) * dY[i] for i in range(n))\n return h1 + h2", "def interpolate(self, xs):\n tck = splrep(self._xs, self._ys)\n new_ys = splev(xs, tck, der=0)\n return new_ys", "def spliner(x, y, k=3, sig=5, s=None, fev=100, w=None, clip='both', \\\n verbose=False, plotfit=False, plotall=False, diag=False):\n # 2010-07-05 13:51 IJC: Adapted from polyfitr\n from numpy import polyfit, polyval, isfinite, ones\n from scipy import interpolate\n from pylab import plot, legend, title\n\n xx = array(x, copy=True)\n yy = array(y, copy=True)\n noweights = (w==None)\n if noweights:\n ww = ones(xx.shape, float)\n else:\n ww = array(w, copy=True)\n\n #ww = 1./err**2\n\n ii = 0\n nrej = 1\n\n goodind = isfinite(xx)*isfinite(yy)*isfinite(ww)\n \n #xx = xx[goodind]\n #yy = yy[goodind]\n #ww = ww[goodind]\n\n while (ii<fev and (nrej<>0)):\n spline = interpolate.UnivariateSpline(xx[goodind],yy[goodind],w=ww[goodind],s=s,k=k)\n residual = yy[goodind] - spline(xx[goodind])\n stdResidual = std(residual)\n #if verbose: print stdResidual\n if clip=='both':\n ind = abs(residual) <= (sig*stdResidual) \n elif clip=='above':\n ind = residual < sig*stdResidual\n elif clip=='below':\n ind = residual > -sig*stdResidual\n else:\n ind = ones(residual.shape, bool)\n\n goodind *= ind\n #xx = xx[ind]\n #yy = yy[ind]\n #ww = ww[ind]\n\n ii += 1\n nrej = len(residual) - len(xx)\n if plotall:\n plot(x,y, '.', xx[goodind],yy[goodind], 'x', x, spline(x), '--')\n legend(['data', 'fit data', 'fit'])\n title('Iter. #' + str(ii) + ' -- Close all windows to continue....')\n\n if verbose:\n print str(len(x)-len(xx[goodind])) + ' points rejected on iteration #' + str(ii)\n\n if (plotfit or plotall):\n plot(x,y, '.', xx[goodind],yy[goodind], 'x', x, spline(x), '--')\n legend(['data', 'fit data', 'fit'])\n title('Close window to continue....')\n\n if diag:\n chisq = ( (residual)**2 / yy )[goodind].sum()\n spline = (spline, chisq, ii, goodind)\n\n return spline", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.0):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = pts[0][2]\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def _spline(self,rho,data, rho_new):\n dummy = interpolate.InterpolatedUnivariateSpline(rho, data, ext=0)\n data_new = dummy(rho_new)\n return data_new", "def DrawSpline(*args, **kwargs):\n return _gdi_.PseudoDC_DrawSpline(*args, **kwargs)", "def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):\n return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)", "def lin_interpol(x_p, y_p):\r\n f = np.zeros([ x_p.shape[0] - 1 , 4 ]) # Coefficents and interval array\r\n \r\n for i in range( x_p.shape[0] - 1 ): # for every x[i], x[i+1] pair\r\n \r\n x_coeff = (y_p[i+1] - y_p[i]) / (x_p[i+1] - x_p[i])\r\n const = (x_p[i+1]*y_p[i] - x_p[i]*y_p[i+1] ) / (x_p[i+1] - x_p[i])\r\n \r\n # save the x coefficent, constant and the interval for this line\r\n f[i,:] = x_coeff, const, x_p[i], x_p[i+1]\r\n \r\n for a, b, start, end in f: # for every line fitted\r\n line_x = np.linspace( start, end, 3) # points to plot in x_range\r\n line_y = line_x * a + b # find the fitted line value at these points\r\n plt.plot(line_x,line_y,'k--', lw = 1, label = 'Linear' if a==f[0][0] else \"\") # only label one plot\r", "def _interpolation(self, video):\n self.F_int = []\n self.mgrid_0 = []\n self.mgrid_1 = []\n for p in range(video.points.shape[0]):\n _m_0, _m_1 = np.meshgrid(self.extended_points_0[p], self.extended_points_1[p])\n _F_int = interp2d(self.extended_points_0[p], self.extended_points_1[p], video.mraw[0, _m_0, _m_1], kind='cubic')\n self.F_int.append(_F_int)\n\n m_0, m_1 = np.meshgrid(self.extended_points_0[p, self.pad:-self.pad], self.extended_points_1[p, self.pad:-self.pad])\n self.mgrid_0.append(m_0)\n self.mgrid_1.append(m_1)", "def fitfunc_SC(x, *p):\n val = p[0]\n for n in range(0, len(p) - 1, 2):\n ind = n + 1\n mode = (n / 2) + 1\n val = (\n val\n + p[ind] * np.sin(2 * np.pi * mode * (x) / 360.0)\n + p[ind + 1] * np.cos(2 * np.pi * mode * (x) / 360.0)\n )\n return val", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def interpolate_point(xi, x, y):\n num_pts = len(x)\n if num_pts%2==0:\n #Even\n i_h2 = num_pts/2\n i_h1 = num_pts/2 - 1\n if x[i_h2] < xi:\n return interpolate_point(xi, x[i_h2:], y[0:i_h2:])\n elif x[i_h1] > xi:\n return interpolate_point(xi, x[0:i_h1], y[0:i_h1])\n else:\n return ((xi-x[i_h1])*y[i_h2]+(x[i_h2]-xi)*y[i_h1])/(x[i_h2]-x[i_h1])\n else:\n #Odd\n i_half = num_pts/2\n if x[i_half] < xi:\n return interpolate_point(xi, x[i_half:], y[i_half:])\n elif x[i_half] > xi:\n return interpolate_point(xi, x[0:i_half+1], y[0:i_half+1])\n else:\n return y[i_half]", "def different_quadratic_extrpolation_lower(x_interp, x_spline, y_spline):\n index_lower_1 = 0\n index_lower_2 = 1\n x1_lower = x_spline[index_lower_1]\n x2_lower = x_spline[index_lower_2]\n x3_lower = x_spline[index_lower_2 + 1]\n f1_lower = y_spline[index_lower_1]\n\n df1_dx_lower = calc_gradient(x_spline, y_spline, index_lower_1)/(x2_lower - x1_lower)\n df2_dx_lower = calc_gradient(x_spline, y_spline, index_lower_2)/(x3_lower - x2_lower)\n\n # Solve 2ax-b = df_dx for the gradient at point 1 and 2\n # Rearrange both equations to find 'a' and 'b' quadratic coefficients\n a_lower = (df2_dx_lower - df1_dx_lower)/(2.*(x2_lower - x1_lower))\n b_lower = df1_dx_lower - 2.*a_lower*x1_lower\n\n # Find c by solving at the fixed points (f = a x**2 + bx + c) at point 1 for the lower, and point 2 for the upper\n c_lower = f1_lower - a_lower*x1_lower**2 - b_lower*x1_lower\n return a_lower*x_interp**2 + b_lower*x_interp + c_lower", "def interpolateCubicPeriodic() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th parameter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolatePeriodicSpline(keytime, y)\n S.append(interpolants)\n return S", "def compute_smoothed_traj(path, V_des, alpha, dt):\n ########## Code starts here ##########\n #convert path to numpy array because otherwise it's a pain!\n path = np.array(path)\n #create the time vector buy finding the distance from each point and dividing by the straight line velocity\n N = len(path)\n t = np.zeros(N)\n for i in range(1, N):\n #get the distance between the points\n distance = np.linalg.norm(path[i, :] - path[i-1, :])\n #calc the time based on distance and velocity\n t[i] = distance/V_des + t[i-1]\n t_smoothed = np.arange(t[0], t[-1], dt);\n print(t_smoothed.size)\n \n #interpolate over the given path \n x_tck = scipy.interpolate.splrep(t, path[:,0], s=alpha)\n y_tck = scipy.interpolate.splrep(t, path[:,1], s=alpha)\n \n #allocate for the trajectory\n traj_smoothed = np.zeros([len(t_smoothed),7])\n \n #generate the states\n traj_smoothed[:,0] = scipy.interpolate.splev(t_smoothed, x_tck)\n traj_smoothed[:,1] = scipy.interpolate.splev(t_smoothed, y_tck)\n traj_smoothed[:,3] = scipy.interpolate.splev(t_smoothed, x_tck, der=1)\n traj_smoothed[:,4] = scipy.interpolate.splev(t_smoothed, y_tck, der=1)\n traj_smoothed[:,2] = np.arctan2(traj_smoothed[:,4], traj_smoothed[:,3])\n traj_smoothed[:,5] = scipy.interpolate.splev(t_smoothed, x_tck, der=2)\n traj_smoothed[:,6] = scipy.interpolate.splev(t_smoothed, y_tck, der=2) \n ########## Code ends here ##########\n\n return traj_smoothed, t_smoothed", "def test_multi_segment_non_axis_aligned_bspline_second_derivative() -> None:\n # a length=1.0 3-segment b-spline that isn't axis aligned. Each\n # segment is length=1/3.\n control_points = np.array(\n [\n (0.0, 0.0, 0.0),\n (-1 / 12 * (2 + 2 ** 0.5), 1 / 12 * (2 - 2 ** 0.5), 1 / 6),\n (-1.0 / 3.0, 1.0 / 3.0, 0.0),\n (-1.0 / 6.0, 1.0 / 2.0, 1.0 / (3.0 * (2 ** 0.5))),\n ]\n )\n curve = BSplineCurve(control_points)\n nominal_second_derivatives = np.array(\n # computed using Mathematica's BSplineFunction over\n # np.linspace(0.0, 3.0, num=13) (inputs scaled by 1/3)\n [\n [1.41421, 1.41421, -2.0],\n [1.4041, 1.23744, -1.63215],\n [1.39399, 1.06066, -1.2643],\n [1.38388, 0.883883, -0.896447],\n [1.37377, 0.707107, -0.528595],\n [1.36366, 0.53033, -0.160744],\n [1.35355, 0.353553, 0.207107],\n [1.34344, 0.176777, 0.574958],\n [1.33333, 4.44089 * 10 ** -16, 0.942809],\n [1.32322, -0.176777, 1.31066],\n [1.31311, -0.353553, 1.67851],\n [1.303, -0.53033, 2.04636],\n [1.29289, -0.707107, 2.41421],\n ]\n )\n for index, parameter in np.ndenumerate(np.linspace(0.0, 3.0, num=13)):\n np.testing.assert_allclose(\n curve.second_derivative_at(parameter),\n nominal_second_derivatives[index],\n err_msg=(\n \"Fails to find the second_derivative along a \"\n \"multi-displacement non-axis-aligned b-spline.\"\n ),\n )", "def interpolV(y, x, newX):\r\n \r\n num = len(x)\r\n #if (num != len(y)):\r\n #//System.out.println(\"Toolbox.interpolV(): Old x and y must be same length\"); \r\n \r\n newNum = len(newX)\r\n #//System.out.println(\"interpolV: newNum \" + newNum + \" num \" + num); \r\n #newY = [0.0 for i in range(newNum)]\r\n\r\n#//Renormalize ordinates:\r\n \r\n iMinAndMax = minMax(y)\r\n norm = y[iMinAndMax[1]]\r\n #//System.out.println(\"norm \" + norm);\r\n #yNorm = [0.0 for i in range(num)]\r\n newYNorm = [0.0 for i in range(newNum)] \r\n #for i in range(num):\r\n # yNorm[i] = y[i] / norm \r\n yNorm = [ x / norm for x in y ]\r\n\r\n#// Set any newX elements that are *less than* the first x element to th first \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n start = 0\r\n for i in range(newNum):\r\n if (newX[i] <= x[1]):\r\n newYNorm[i] = yNorm[0]\r\n start += 1\r\n \r\n if (newX[i] > x[1]):\r\n break\r\n \r\n \r\n#//System.out.println(\"start \" + start);\r\n#//System.out.println(\"x[0] \" + x[0] + \" x[1] \" + x[1] + \" newX[start] \" + newX[start]);\r\n#double jWght, jm1Wght, denom;\r\n\r\n\r\n if (start < newNum-1):\r\n\r\n j = 1 #//initialize old abscissae index\r\n #//outer loop over new abscissae\r\n for i in range(start, newNum):\r\n\r\n #//System.out.println(\"i \" + i + \" j \" + j);\r\n\r\n#// break out if current element newX is *greater* that last x element\r\n if ( (newX[i] > x[num-1]) or (j > (num-1)) ):\r\n break \r\n \r\n\r\n while (x[j] < newX[i]): \r\n j += 1\r\n \r\n #//System.out.println(\"i \" + i + \" newX[i] \" + newX[i] + \" j \" + j + \" x[j-1] \" + x[j-1] + \" x[j] \" + x[j]);\r\n #//1st order Lagrange method:\r\n jWght = newX[i] * (1.0 - (x[j-1]/newX[i])) #//(newX[i]-x[j-1])\r\n jm1Wght = x[j] * (1.0 - (newX[i]/x[j])) #//(x[j]-newX[i])\r\n denom = x[j] * (1.0 - (x[j-1]/x[j])) #//(x[j]-x[j-1])\r\n jWght = jWght / denom\r\n jm1Wght = jm1Wght / denom\r\n #//newYNorm[i] = (yNorm[j]*(newX[i]-x[j-1])) + (yNorm[j-1]*(x[j]-newX[i]));\r\n newYNorm[i] = (yNorm[j]*jWght) + (yNorm[j-1]*jm1Wght)\r\n #//System.out.println(\"i \" + i + \" newYNorm[i] \" + newYNorm[i] + \" j \" + j + \" yNorm[j-1] \" + yNorm[j-1] + \" yNorm[j] \" + yNorm[j]);\r\n \r\n\r\n#// Set any newX elements that are *greater than* the first x element to the last \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n for i in range(newNum):\r\n if (newX[i] >= x[num-1]):\r\n newYNorm[i] = yNorm[num-1]\r\n \r\n \r\n\r\n #//Restore orinate scale\r\n #for i in range(newNum):\r\n # newY[i] = newYNorm[i] * norm \r\n newY = [ x * norm for x in newYNorm ]\r\n\r\n\r\n return newY", "def bspline_surface():\n # Create a surface instance\n surf = BSpline.Surface()\n\n # Set degrees\n surf.degree_u = 3\n surf.degree_v = 3\n\n # Set control points\n surf.ctrlpts_size_u = 6\n surf.ctrlpts_size_v = 6\n surf.ctrlpts = [[-25.0, -25.0, -10.0], [-25.0, -15.0, -5.0], [-25.0, -5.0, 0.0], [-25.0, 5.0, 0.0],\n [-25.0, 15.0, -5.0], [-25.0, 25.0, -10.0], [-15.0, -25.0, -8.0], [-15.0, -15.0, -4.0],\n [-15.0, -5.0, -4.0], [-15.0, 5.0, -4.0], [-15.0, 15.0, -4.0], [-15.0, 25.0, -8.0],\n [-5.0, -25.0, -5.0], [-5.0, -15.0, -3.0], [-5.0, -5.0, -8.0], [-5.0, 5.0, -8.0],\n [-5.0, 15.0, -3.0], [-5.0, 25.0, -5.0], [5.0, -25.0, -3.0], [5.0, -15.0, -2.0],\n [5.0, -5.0, -8.0], [5.0, 5.0, -8.0], [5.0, 15.0, -2.0], [5.0, 25.0, -3.0],\n [15.0, -25.0, -8.0], [15.0, -15.0, -4.0], [15.0, -5.0, -4.0], [15.0, 5.0, -4.0],\n [15.0, 15.0, -4.0], [15.0, 25.0, -8.0], [25.0, -25.0, -10.0], [25.0, -15.0, -5.0],\n [25.0, -5.0, 2.0], [25.0, 5.0, 2.0], [25.0, 15.0, -5.0], [25.0, 25.0, -10.0]]\n\n # Set knot vectors\n surf.knotvector_u = [0.0, 0.0, 0.0, 0.0, 0.33, 0.66, 1.0, 1.0, 1.0, 1.0]\n surf.knotvector_v = [0.0, 0.0, 0.0, 0.0, 0.33, 0.66, 1.0, 1.0, 1.0, 1.0]\n\n # Set sample size\n surf.sample_size = SAMPLE_SIZE\n\n # Return the instance\n return surf", "def refine(self, spline_data, tolerance=170.0, recursions=0):\r\n\r\n # self.spline_data = [coo, u, t, der1, der2, tck]\r\n xx, yy = spline_data[0]\r\n t = spline_data[2]\r\n tck = spline_data[5]\r\n\r\n logger.debug('\\nPoints before refining: {} \\n'.format(len(xx)))\r\n\r\n xn = copy.deepcopy(xx)\r\n yn = copy.deepcopy(yy)\r\n tn = copy.deepcopy(t)\r\n\r\n j = 0\r\n refinements = 0\r\n first = True\r\n refined = dict()\r\n\r\n for i in range(len(xx) - 2):\r\n refined[i] = False\r\n\r\n # angle between two contour line segments\r\n a = np.array([xx[i], yy[i]])\r\n b = np.array([xx[i + 1], yy[i + 1]])\r\n c = np.array([xx[i + 2], yy[i + 2]])\r\n angle = Utils.angle_between(a - b, c - b, degree=True)\r\n\r\n if angle < tolerance:\r\n\r\n logger.debug('Refining between segments {} {},'\r\n .format(i, i + 1))\r\n logger.debug('Tol={0:5.1f}, Angle={1:05.1f}\\n'\r\n .format(tolerance, angle))\r\n\r\n refined[i] = True\r\n refinements += 1\r\n\r\n # parameters for new points\r\n t1 = (t[i] + t[i + 1]) / 2.\r\n t2 = (t[i + 1] + t[i + 2]) / 2.\r\n\r\n # coordinates of new points\r\n p1 = interpolate.splev(t1, tck, der=0)\r\n p2 = interpolate.splev(t2, tck, der=0)\r\n\r\n # insert points and their parameters into arrays\r\n if i > 0 and not refined[i - 1]:\r\n xn = np.insert(xn, i + 1 + j, p1[0])\r\n yn = np.insert(yn, i + 1 + j, p1[1])\r\n tn = np.insert(tn, i + 1 + j, t1)\r\n j += 1\r\n xn = np.insert(xn, i + 2 + j, p2[0])\r\n yn = np.insert(yn, i + 2 + j, p2[1])\r\n tn = np.insert(tn, i + 2 + j, t2)\r\n j += 1\r\n\r\n if first and recursions > 0:\r\n logger.debug('Recursion level: {} \\n'.format(recursions))\r\n first = False\r\n\r\n logger.debug('Points after refining: {}'.format(len(xn)))\r\n\r\n # update coordinate array, including inserted points\r\n spline_data[0] = (xn, yn)\r\n # update parameter array, including parameters of inserted points\r\n spline_data[2] = tn\r\n\r\n # this is the recursion :)\r\n if refinements > 0:\r\n self.refine(spline_data, tolerance, recursions + 1)\r\n\r\n # stopping from recursion if no refinements done in this recursion\r\n else:\r\n # update derivatives, including inserted points\r\n spline_data[3] = interpolate.splev(tn, tck, der=1)\r\n spline_data[4] = interpolate.splev(tn, tck, der=2)\r\n\r\n logger.debug('No more refinements.')\r\n logger.debug('\\nTotal number of recursions: {}'\r\n .format(recursions - 1))\r\n\r\n # due to recursive call to refine, here no object can be returned\r\n # instead use self to transfer data to the outer world :)\r\n self.spline_data = copy.deepcopy(spline_data)\r\n return", "def Spline_Intermediate_Points(Output, Method, Program, properties, Temperature, molecules_in_coord, Pressure,\n Statistical_mechanics, min_RMS_gradient, **keyword_parameters):\n from scipy.interpolate import spline\n print \"Using cubic spline to determine intermediate temperature steps.\"\n # Setting file endings\n if Program == 'Tinker':\n file_ending = '.xyz'\n elif Program == 'Test':\n file_ending = '.npy'\n keyword_parameters['Parameter_file'] = ''\n\n Temperature = np.sort(np.unique(np.append(Temperature, properties[:,0])))\n\n # Setting step points and tangents/gradients at those points\n if (Method == 'GiQ') or (Method == 'GiQg'):\n spline_points = np.zeros(len(Temperature))\n tangent = np.load(Output + '_dV_' + Method + '.npy')\n elif (Method == 'GaQ') or (Method == 'GaQg'):\n spline_points = np.zeros((len(Temperature), 3, 3))\n tangent = np.load(Output + '_dh_' + Method + '.npy')\n\n for i in range(len(Temperature)):\n if any(Temperature[i] == properties[:, 0]) != True:\n for j in range(len(properties[:, 0])):\n if properties[j, 0] < Temperature[i]:\n lower_bound = j\n if properties[j, 0] > Temperature[i]:\n upper_bound = j\n break\n h = properties[upper_bound, 0] - properties[lower_bound, 0]\n theta = (Temperature[i] - properties[lower_bound, 0]) / h\n if (Method == 'GiQ') or (Method == 'GiQg'):\n spline_points[i] = RK_Dense_Output(theta, properties[lower_bound, 6], properties[upper_bound, 6],\n tangent[lower_bound, 2], tangent[upper_bound, 2], h)\n if (Method == 'GaQ') or (Method == 'GaQg'):\n matrix_order = np.matrix([[0, 0], [1, 1], [2, 2], [0, 1], [0, 2], [1, 2]])\n lower_crystal_matrix = Ex.Lattice_parameters_to_Crystal_matrix(properties[lower_bound, 7:13])\n upper_crystal_matrix = Ex.Lattice_parameters_to_Crystal_matrix(properties[upper_bound, 7:13])\n for j in range(6):\n spline_points[i, matrix_order[j, 0], matrix_order[j, 1]] = \\\n RK_Dense_Output(theta, lower_crystal_matrix[matrix_order[j, 0], matrix_order[j, 1]],\n upper_crystal_matrix[matrix_order[j, 0], matrix_order[j, 1]],\n tangent[lower_bound, matrix_order[j, 0], matrix_order[j, 1] + 4],\n tangent[upper_bound, matrix_order[j, 0], matrix_order[j, 1] + 4], h)\n\n for i in range(len(Temperature)):\n if any(Temperature[i] == properties[:, 0]) != True:\n print \" Adding in temperature point: \" + str(Temperature[i]) + \" K\"\n for j in range(len(properties[:, 0])):\n if properties[j, 0] < Temperature[i]:\n lower_bound = j\n if properties[j, 0] > Temperature[i]:\n upper_bound = j\n break\n\n if (Method == 'GiQ') or (Method == 'GiQg'):\n volume_fraction_change = spline_points[i]/properties[i - 1, 6]\n dcrystal_matrix = 0.\n keyword_parameters['Crystal_matrix_Reference'] = 0.\n if Method == 'GiQ':\n keyword_parameters['Gruneisen'] = 0.\n keyword_parameters['Volume_Reference'] = 0.\n keyword_parameters['Wavenumber_Reference'] = 0.\n elif (Method == 'GaQ') or (Method == 'GaQg'):\n volume_fraction_change = 0.\n dcrystal_matrix = spline_points[i] - \\\n Ex.Lattice_parameters_to_Crystal_matrix(properties[lower_bound, 7:13])\n keyword_parameters['Volume_Reference'] = 0.\n if Method == 'GaQ':\n keyword_parameters['Gruneisen'] = 0.\n keyword_parameters['Wavenumber_Reference'] = 0.\n keyword_parameters['Crystal_matrix_Reference'] = 0.\n\n# Ex.Call_Expansion(Method, 'expand', Program, 'Cords/' + Output + '_' + Method + 'T' +\n# str(properties[lower_bound, 0]) + file_ending, molecules_in_coord, min_RMS_gradient,\n Ex.Call_Expansion(Method, 'expand', Program, 'temp' + file_ending, molecules_in_coord, min_RMS_gradient,\n Parameter_file=keyword_parameters['Parameter_file'],\n volume_fraction_change=volume_fraction_change, dcrystal_matrix=dcrystal_matrix,\n Output=Output + '_' + Method + 'T' + str(Temperature[i]))\n os.system('cp ' + Output + '_' + Method + 'T' + str(Temperature[i]) + file_ending + ' temp' + file_ending)\n wavenumbers = Wvn.Call_Wavenumbers(Method, min_RMS_gradient, Program=Program,\n Coordinate_file=Output + '_' + Method + 'T' + str(Temperature[i])\n + file_ending,\n Parameter_file=keyword_parameters['Parameter_file'],\n Gruneisen=keyword_parameters['Gruneisen'],\n Wavenumber_Reference=keyword_parameters['Wavenumber_Reference'],\n Volume_Reference=keyword_parameters['Volume_Reference'],\n Crystal_matrix_Reference=keyword_parameters['Crystal_matrix_Reference'],\n New_Crystal_matrix=spline_points[i],\n New_Volume=spline_points[i])\n properties = np.insert(properties, upper_bound, Pr.Properties(Output + '_' + Method + 'T' +\n str(Temperature[i]) + file_ending,\n wavenumbers, Temperature[i], Pressure,\n Program, Statistical_mechanics,\n molecules_in_coord, Parameter_file=\n keyword_parameters['Parameter_file']),\n axis=0)\n os.system('mv ' + Output + '_' + Method + 'T' + str(Temperature[i]) + file_ending + ' Cords/')\n else:\n os.system('cp Cords/' + Output + '_' + Method + 'T' + str(Temperature[i]) + file_ending + ' ./temp' + file_ending)\n os.system('rm temp' + file_ending)\n return properties", "def model_spline(train_x, train_y, test_x, deg):\n train_x = train_x.rename('x', axis=1)\n train_y = train_y.rename('y', axis=1)\n train_df = pd.concat([train_x, train_y], axis=1)\n model_fit = sm.formula.ols(\"y ~ cr(x, df=\" + str(deg) + \", constraints='center')\", data=train_df).fit()\n model_info = {'model': 'spline', 'const': model_fit.params.xs('Intercept')}\n test_x = test_x.rename('x')\n predictions = model_fit.predict(test_x)\n return predictions, model_info", "def __draw_curve(self, points):\n x_pts = []\n y_pts = []\n curvex = []\n curvey = []\n self.debug += 1\n for point in points:\n x_pts.append(point[0])\n y_pts.append(point[1])\n curve = scipy.interpolate.interp1d(x_pts, y_pts, 'cubic')\n if self.debug == 1 or self.debug == 2:\n for i in np.arange(x_pts[0], x_pts[len(x_pts) - 1] + 1, 1):\n curvex.append(i)\n curvey.append(int(curve(i)))\n else:\n for i in np.arange(x_pts[len(x_pts) - 1] + 1, x_pts[0], 1):\n curvex.append(i)\n curvey.append(int(curve(i)))\n return curvex, curvey", "def HermiteC1(x0,y0,y0p,x1,y1,y1p,color):\n x = np.linspace(x0,x1,100)\n t = (x-x0)/(x1-x0)\n y = y0 * H0(t) + y0p * (x1-x0) * H1(t) + y1p * (x1-x0) * H2(t) + y1 * H3(t)\n plt.plot(x,y,color,label='cubic interpolant')", "def interpolateCubicNatural() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th paramter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolateSpline(keytime, y)\n S.append(interpolants)\n return S", "def interpolateCubic( t):\n curframe = []\n frame = np.searchsorted( keytime, t, side='right') - 1\n\n for i in range(11):\n poly = S[i]\n res = poly[frame](t)\n curframe.append(res)\n\n return curframe", "def _interp_2d(grid1,grid2,value,xi1,xi2,j1,j2):\r\n\r\n # a. left/right\r\n nom_1_left = grid1[j1+1]-xi1\r\n nom_1_right = xi1-grid1[j1]\r\n\r\n nom_2_left = grid2[j2+1]-xi2\r\n nom_2_right = xi2-grid2[j2]\r\n\r\n # b. interpolation\r\n denom = (grid1[j1+1]-grid1[j1])*(grid2[j2+1]-grid2[j2])\r\n nom = 0\r\n for k1 in range(2):\r\n nom_1 = nom_1_left if k1 == 0 else nom_1_right\r\n for k2 in range(2):\r\n nom_2 = nom_2_left if k2 == 0 else nom_2_right \r\n nom += nom_1*nom_2*value[j1+k1,j2+k2]\r\n\r\n return nom/denom", "def interpolate(m):\n \n x1 = m[0]\n x2 = m[1]\n x3 = m[2]\n y1 = m[3]\n y2 = m[4]\n y3 = m[5]\n denom = (x1 - x2)*(x1 - x3)*(x2 - x3)\n A = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom\n B = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / denom\n C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom\n xext = -B/(2*A)\n yext = A*xext**2 + B*xext + C\n \n return(np.array([xext,yext]))", "def spltopp(xk, cvals, k):\n return ppform.fromspline(xk, cvals, k)", "def linear_spline_interpolation(q_, t_, m = 100):\n n = q_.shape[0]\n dof = q_.shape[1]\n\n t_ = t_ / m\n timesteps = np.linspace(0, 1, num = m)\n\n a = 100\n time_segments = np.zeros((n, dof))\n blend_times = np.zeros((n, dof))\n velocities = np.zeros((n, dof))\n accelerations = np.zeros((n, dof))\n\n # Initial time segment\n accelerations[0] = np.sign(q_[1] - q_[0]) * a\n blend_times[0] = t_[0] - np.sqrt(\n t_[0] * t_[0] - 2 * (q_[1] - q_[0]) / accelerations[0])\n velocities[0] = (q_[1] - q_[0]) / (t_[0] - 0.5 * blend_times[0])\n\n # Final time segment\n accelerations[n - 1] = np.sign(q_[n - 2] - q_[n - 1]) * a\n blend_times[n - 1] = t_[n - 2] - np.sqrt(\n t_[n - 2] * t_[n - 2] + 2 * (q_[n - 1] - q_[n - 2]) / accelerations[n - 1])\n velocities[n - 2] = (q_[n - 1] - q_[n - 2]) / (t_[n - 2] - 0.5 * blend_times[n - 1])\n velocities[n - 1] = 0\n\n # Loop for velocities\n for i in range(1, n - 2):\n velocities[i] = (q_[i + 1] - q_[i]) / t_[i]\n\n # Loop for accelerations and blend times\n for i in range(1, n - 1):\n accelerations[i] = np.sign(velocities[i] - velocities[i - 1]) * a\n blend_times[i] = (velocities[i] - velocities[i - 1]) / accelerations[i]\n\n # Calculate time segments\n time_segments[0] = t_[0] - blend_times[0] - 0.5 * blend_times[1]\n time_segments[n - 2] = t_[n - 2] - blend_times[n - 1] - 0.5 * blend_times[n - 2]\n time_segments[n - 1] = 0\n for i in range(1, n - 2):\n time_segments[i] = t_[i] - 0.5 * blend_times[i + 1] - 0.5 * blend_times[i]\n\n \n # Calculate Trajectories\n q = np.zeros((dof, m))\n qd = np.zeros((dof, m))\n qdd = np.zeros((dof, m))\n\n # Loop for each degree of freedom\n for d in range(dof):\n # j for using above parameters\n # previous_i for saving i of start of a parabola segment\n # previous_ii for saving i of start of a linear segment\n j = 0\n previous_i = 0\n previous_ii = 0\n\n # Loop over the timesteps\n for i in range(len(timesteps)):\n t = timesteps[i] - timesteps[previous_i]\n\n # If t is in the parabola range\n if t <= blend_times[j][d]:\n a = accelerations[j][d]\n\n qdd[d, i] = a\n qd[d, i] = qd[d, previous_i] + a * t\n\n if i == 0:\n q[d, i] = q_[0][d] + 0.5 * a * t * t\n else:\n q[d, i] = q[d, previous_i] + qd[d, previous_i] * t + 0.5 * a * t * t\n\n previous_ii = i\n\n # If t is in the linear range\n elif t <= blend_times[j][d] + time_segments[j][d]:\n t = timesteps[i] - timesteps[previous_ii]\n v = velocities[j][d]\n\n qdd[d, i] = 0\n qd[d, i] = v\n q[d, i] = q[d, previous_ii] + v * t\n\n # If t has crossed the parabola plus the linear range\n else:\n previous_i = i - 1\n j += 1\n\n t = timesteps[i] - timesteps[previous_i]\n\n # Break loop if parameter exceeded\n if j == len(accelerations):\n break\n\n a = accelerations[j][d]\n v = qd[d, previous_i]\n\n qdd[d, i] = a\n qd[d, i] = v + a * t\n q[d, i] = q[d, previous_i] + v * t + 0.5 * a * t * t\n\n previous_ii = i\n\n # Loop over remaining timesteps\n while i < len(timesteps):\n a = accelerations[j - 1][d]\n v = velocities[j - 1][d]\n\n qdd[d, i] = a\n qd[d, i] = v + a * t\n q[d, i] = q[d, previous_i] + v * t + 0.5 * a * t * t\n\n i += 1\n\n return q, qd, qdd", "def __call__(self, x, y):\n #- TODO: compare speed to solution at\n #- http://stackoverflow.com/questions/12729228/simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python\n \n #- Find where we are in grid\n #- clip to 1 because we will use i and i-1\n #- clip to len(x)-1 to allow extrapolation beyond grid boundary\n ix = np.searchsorted(self.x, x).clip(1, len(self.x)-1)\n iy = np.searchsorted(self.y, y).clip(1, len(self.y)-1)\n \n #- Interpolation distances from points\n dx = (x - self.x[ix-1]) / (self.x[ix] - self.x[ix-1])\n dy = (y - self.y[iy-1]) / (self.y[iy] - self.y[iy-1])\n\n #- Interpolate, allowing x and/or y to be multi-dimensional\n #- NOTE: these are the slow steps, about equal time each\n \n #- Original code with what appears to be vestigial transposes\n # data1 = (self.data[ix-1,iy-1].T*(1-dx) + self.data[ix,iy-1].T*dx).T\n # data2 = (self.data[ix-1,iy].T*(1-dx) + self.data[ix,iy].T*dx).T\n # dataxy = (data1.T*(1-dy) + data2.T*dy).T\n\n #- Updated without transposes\n data1 = (self.data[ix-1,iy-1]*(1-dx) + self.data[ix,iy-1]*dx)\n data2 = (self.data[ix-1,iy]*(1-dx) + self.data[ix,iy]*dx)\n dataxy = (data1*(1-dy) + data2*dy)\n\n return dataxy", "def _derY(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.y_list[x_pos] - self.x_list[x_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n dfdy = (\n (\n (1 - alpha)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + (1 - alpha)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n + alpha * gamma * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n - (\n (1 - alpha)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + alpha\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha * gamma * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n )\n ) / (self.y_list[y_pos] - self.y_list[y_pos - 1])\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdy = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n gamma = (z[c] - self.z_list[k - 1]) / (\n self.z_list[k] - self.z_list[k - 1]\n )\n dfdy[c] = (\n (\n (1 - alpha)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + (1 - alpha)\n * gamma\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - gamma)\n * self.wInterpolators[i][j][k - 1](w[c])\n + alpha * gamma * self.wInterpolators[i][j][k](w[c])\n )\n - (\n (1 - alpha)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * gamma\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + alpha\n * (1 - gamma)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * gamma\n * self.wInterpolators[i][j - 1][k](w[c])\n )\n ) / (self.y_list[j] - self.y_list[j - 1])\n return dfdy", "def d2y(self, k, x, y):\n return 1 / self.spHy ** 2 * self.spline_base(abs(x / self.spHx - self.kx[k-1])) * self.d2_spline(\n abs(y / self.spHy - self.ky[k-1]))", "def cspline(self, z, deriv_flag=1, int_flag=1, func_flag=1):\n z = np.asarray(z)\n s = np.zeros(z.shape)\n si = np.zeros(z.shape)\n sd = np.zeros(z.shape)\n b = self.cb\n c = self.cc\n d = self.cd\n for j in range(z.size):\n p, i = self.binarysearch(z[j])\n if func_flag is not None:\n s[j] = (self.y[i] + b[i] * (z[j] - self.x[i]) + c[i] * (z[j] - self.x[i]) ** 2 \n + d[i] * (z[j] - self.x[i]) ** 3) \n if int_flag is not None:\n si[j] = self.qspline_integ(z[j])\n if deriv_flag is not None:\n sd[j] = b[i] + 2 * c[i] * (z[j] - self.x[i]) + 3 * d[i] * (z[j] - self.x[i]) ** 2\n return s, si, sd" ]
[ "0.75392544", "0.73000425", "0.7247718", "0.70745677", "0.70485204", "0.6896351", "0.67513216", "0.65251607", "0.6505519", "0.64778066", "0.645612", "0.6421897", "0.62957704", "0.62941617", "0.62836426", "0.62663037", "0.62661314", "0.62505496", "0.6230855", "0.6228699", "0.61706233", "0.61495084", "0.61162674", "0.6110365", "0.6093666", "0.6067921", "0.6062165", "0.60565275", "0.6051966", "0.6045261", "0.6035001", "0.6027335", "0.60269964", "0.60240155", "0.6020917", "0.60148185", "0.6005798", "0.5990277", "0.59878266", "0.59776", "0.59661734", "0.59585625", "0.59563965", "0.595504", "0.5952088", "0.5943533", "0.5932515", "0.59129566", "0.59128535", "0.5911012", "0.5908362", "0.58903486", "0.58863723", "0.5881357", "0.58794874", "0.5877716", "0.58723414", "0.5850825", "0.5844064", "0.5830071", "0.58293366", "0.5827427", "0.5822875", "0.5805504", "0.5800935", "0.57959044", "0.57946557", "0.5786361", "0.5776064", "0.5767307", "0.5765249", "0.57608926", "0.57461", "0.57415664", "0.57303786", "0.57290727", "0.5728646", "0.57212985", "0.57207614", "0.5712132", "0.57037777", "0.57009053", "0.56770235", "0.5676265", "0.5665121", "0.5664243", "0.56610084", "0.5661003", "0.56549907", "0.56548446", "0.56532186", "0.56452394", "0.5645228", "0.564414", "0.5638526", "0.56238425", "0.5622016", "0.5620578", "0.56197", "0.5617141" ]
0.68838024
6
The set of arguments for constructing a Invocation resource.
def __init__(__self__, *, function_name: pulumi.Input[str], input: pulumi.Input[str], qualifier: Optional[pulumi.Input[str]] = None, triggers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): pulumi.set(__self__, "function_name", function_name) pulumi.set(__self__, "input", input) if qualifier is not None: pulumi.set(__self__, "qualifier", qualifier) if triggers is not None: pulumi.set(__self__, "triggers", triggers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, namespace, listOfArgumentNames):\n self.namespace = namespace\n self.listOfArgumentNames = listOfArgumentNames", "def CreateArgs(run_task_request, args):\n if getattr(args, \"ARGS\", None):\n args_ref = dataplex_api.FetchExecutionSpecArgs(args.ARGS)\n if len(args_ref) > 0:\n return run_task_request.ArgsValue(\n additionalProperties=[\n run_task_request.ArgsValue.AdditionalProperty(\n key=key, value=value\n )\n for key, value in sorted(args_ref.items())\n ]\n )\n return None", "def _set_arguments(self):\n cert_location = f\"dependencies{sep}certificates{sep}localuser.crt\"\n key_location = f\"dependencies{sep}certificates{sep}localuser.key\"\n assert Path(cert_location).exists(), (\n f\"The certificate isn't \"\n f\"present at location {Path(cert_location).absolute()}\"\n )\n assert Path(key_location).exists(), (\n f\"The certificate key isn't \"\n f\"present at location {Path(key_location).absolute()}\"\n )\n self._arguments = [\n (\n \"test-certificate-verify\",\n [\"-k\", key_location, \"-c\", cert_location],\n ),\n (\n \"test-sig-algs\",\n [],\n ),\n (\n \"test-clienthello-md5\",\n [],\n ),\n (\n \"test-tls13-pkcs-signature\",\n [],\n ),\n ]", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs", "def __init__(self, *args, **kwargs):\n self._args = args\n self._kwargs = kwargs", "def create_argument_list(self):\n raise NotImplementedError", "def __init__(\n self,\n name: Optional[str] = None,\n aliases: Iterable[str] = (),\n args: Iterable[Argument] = (),\n ) -> None:\n self.args = Lexicon()\n self.positional_args: List[Argument] = []\n self.flags = Lexicon()\n self.inverse_flags: Dict[str, str] = {} # No need for Lexicon here\n self.name = name\n self.aliases = aliases\n for arg in args:\n self.add_arg(arg)", "def generate_init_args(self):\n return self.generator(self.constructor_spec)", "def __init__(self, resource, *args):\n self.args = list(args)\n self.flags = OrderedDict()\n self.additional_flags = []\n self._AddCommonFlags(resource)", "async def set_args(self, **kwargs):\n self.original_arguments = kwargs\n Args = namedtuple('Args', [k for k, v in self.arguments.items()])\n Args.__new__.__defaults__ = (None,) * len(self.arguments.items())\n\n valid = {}\n for k, arg in self.arguments.items():\n val = kwargs.get(k, None)\n if val is None and arg.required:\n raise Exception('{0} is required'.format(k))\n\n if arg.options and val not in arg.options:\n raise Exception('{0} provided for {1}. Expected {2}'.format(\n val,\n k,\n arg.options\n ))\n\n if callable(arg.validator):\n val = arg.validator(val, k)\n\n valid[k] = val\n\n self._args = Args(**valid)", "def base_arguments(self):\n raise NotImplementedError()", "def _get_reproducing_arguments(self):\n reproducing_arguments = {\n 'include': self.include,\n 'exclude': self.exclude,\n 'copy': self.copy,\n }\n args_names = {name: getattr(self, name) for name in self.args_names}\n reproducing_arguments.update(args_names)\n return reproducing_arguments", "def __init__(self, args, kwargs):\n self._args_dec = list(args)\n self._kwargs_dec = dict(kwargs)", "def getArguments(self):\n ApiCli.getArguments(self)\n\n if self.args.alarm_name is not None:\n self.alarm_name = self.args.alarm_name\n\n if self.args.metric_name is not None:\n self.metric_name = self.args.metric_name\n\n if self.args.aggregate is not None:\n self.aggregate = self.args.aggregate\n\n if self.args.operation is not None:\n self.operation = self.args.operation\n\n if self.args.threshold is not None:\n self.threshold = self.args.threshold\n\n if self.args.interval is not None:\n self.interval = self.args.interval\n\n if self.args.host_group_id is not None:\n self.host_group_id = self.args.host_group_id\n\n if self.args.actions is not None:\n self.actions = self.args.actions\n\n if self.args.note is not None:\n self.note = self.args.note\n\n if self.args.per_host_notify is not None:\n self.per_host_notify = self.args.per_host_notify\n\n if self.args.is_disabled is not None:\n self.is_disabled = self.args.is_disabled\n\n payload = {}\n\n # Create trigger predicate dictionary\n predicate = {}\n\n if self.aggregate is not None:\n predicate['agg'] = self.aggregate\n\n if self.operation is not None:\n predicate['op'] = self.operation\n\n if self.threshold is not None:\n predicate['val'] = self.threshold\n\n if 'agg' in predicate or 'op' in predicate or 'val' in predicate:\n payload['triggerPredicate'] = predicate\n\n # Create payload dictionary\n if self.alarm_name:\n payload['name'] = self.alarm_name\n\n if self.host_group_id is not None:\n payload['hostgroupId'] = self.host_group_id\n\n if self.interval is not None:\n payload['interval'] = self.intervals[self.interval]\n\n if self.metric_name is not None:\n payload['metricName'] = self.metric_name\n\n if self.note is not None:\n payload['note'] = self.note\n\n if self.actions is not None:\n payload['actions'] = self.actions\n\n if self.per_host_notify is not None:\n payload['perHostNotify'] = True if self.per_host_notify == 'yes' else False\n\n if self.is_disabled is not None:\n payload['isDisabled'] = True if self.is_disabled == 'yes' else False\n\n self.data = json.dumps(payload, sort_keys=True)\n self.headers = {'Content-Type': 'application/json'}", "def construct_params(self):\n\n return {\"expand\": self.get_expand()}", "def get_cli_arguments(self):\n pass", "def _get_init_args(self):\n signature = inspect.signature(self.__init__)\n parameters = signature.parameters\n args = [arg for arg, p in parameters.items()\n if p.kind is p.POSITIONAL_OR_KEYWORD]\n\n return {arg: getattr(self, arg) for arg in args if arg != 'self'}", "def _create_arguments(self, args):\n assert isinstance(args, (list, tuple))\n\n arguments = []\n index = 0\n for arg in args:\n assert isinstance(arg, (list, tuple))\n assert len(arg) == 2 or len(arg) == 3\n\n identifier = arg[0]\n if isinstance(arg[1], str):\n idl_type = self._create_type(\n arg[1], is_optional=(len(arg) == 3))\n else:\n idl_type = arg[1]\n\n default_value = None\n if len(arg) == 3:\n default_value = self._create_literal_constant(arg[2])\n\n arguments.append(\n Argument.IR(\n identifier,\n index=index,\n idl_type=idl_type,\n default_value=default_value))\n\n index += 1\n\n return arguments", "def _invocation_params(self) -> Dict[str, Any]:\n return self._default_params", "def templateargs(self, target_jar, confs=None):\r\n raise NotImplementedError()", "def arguments(self, args=[]):\n if args is None:\n args = []\n\n if not isinstance(args, (list, tuple)):\n args = (args,)\n\n serialize_me = []\n\n for arg in args:\n if isinstance(arg, str):\n serialize_me.append({'str': arg})\n else:\n serialize_me.append(dict(((unicode(arg._meta), arg.pk),)))\n\n self._arguments = json.dumps(serialize_me)", "def define_args(self, env, *args):\n if (len(self.params) != len(args) and not self.rest_name) or len(self.params) > len(args):\n raise SnekEvaluationError(\"wrong number of arguments (expected {}{}, got {})\".format(len(self.params), \"+\" if self.rest_name else \"\", len(args)))\n for (i, param) in enumerate(self.params):\n env.define(param, args[i])\n if self.rest_name:\n env.define(self.rest_name, list_snek(*args[len(self.params):]))", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)", "def __init__(self, **kwargs):\n Interaction.__init__(self, **kwargs)\n self._produces = [] # the resource(s) produced by this interaction\n self._consumes = [] # the resource(s) consumed by this interaction", "def lease_create_args(name=None, start='now', length=None, end=None,\n nodes=1, resource_properties=''):\n if name is None:\n name = 'lease-{}'.format(random_base32(6))\n\n if start == 'now':\n start = datetime.datetime.now(tz=tz.tzutc()) + datetime.timedelta(seconds=70)\n\n if length is None and end is None:\n length = DEFAULT_LEASE_LENGTH\n elif length is not None and end is not None:\n raise ValueError(\"provide either 'length' or 'end', not both\")\n\n if end is None:\n if isinstance(length, numbers.Number):\n length = datetime.timedelta(seconds=length)\n end = start + length\n\n if resource_properties:\n resource_properties = json.dumps(resource_properties)\n\n reservations = [{\n 'resource_type': 'physical:host',\n 'resource_properties': resource_properties,\n 'hypervisor_properties': '',\n 'min': str(nodes), 'max': str(nodes),\n }]\n\n query = {\n 'name': name,\n 'start': start.strftime(BLAZAR_TIME_FORMAT),\n 'end': end.strftime(BLAZAR_TIME_FORMAT),\n 'reservations': reservations,\n 'events': [],\n }\n return query", "def __init__ (self, *args, **kw):\n self.__args = args\n self.__kw = kw", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def args(self, args):\n self._instructions_setter('ARG', args)", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def setup_args(self):\n self.parser = argparse.ArgumentParser()\n self.group = self.parser.add_mutually_exclusive_group()\n\n self.group.add_argument('-a', '--add', help='Adds a new task to the task list', action='store_true')\n self.group.add_argument('-r', '--remove', help='Removes a task from the task list', action='store_true')\n self.group.add_argument('-f', '--finish', help='Sets a task to be finished', action='store_true')\n self.group.add_argument('-u', '--unfinish', help='Sets a task to be not finished', action='store_true')\n self.group.add_argument('-c', '--change', help='Updates an existing task', action='store_true')\n self.group.add_argument('-v', '--view', help='View your current task list', action='store_true')\n\n return self.parser", "def args(self):\n return self._args.copy()", "def build_args(self, parser):\n raise NotImplementedError('build_args() must be implemented')", "def __init__(__self__,\n resource_name: str,\n args: AccessConfigurationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, args: argparse.Namespace):\n self._args = args", "def sync_set_args(self, **kwargs):\n self.original_arguments = kwargs\n Args = namedtuple('Args', [k for k, v in self.arguments.items()])\n Args.__new__.__defaults__ = (None,) * len(self.arguments.items())\n\n valid = {}\n for k, arg in self.arguments.items():\n val = kwargs.get(k, None)\n if val is None and arg.required:\n raise Exception('{0} is required'.format(k))\n\n if arg.options and val not in arg.options:\n raise Exception('{0} provided for {1}. Expected {2}'.format(\n val,\n k,\n arg.options\n ))\n\n if callable(arg.validator):\n val = arg.validator(val, k)\n\n valid[k] = val\n\n self._args = Args(**valid)", "def definearguments(self, customparser):\n if not customparser:\n return\n\n add_login_arguments_group(customparser)\n\n customparser.add_argument(\n '--serviceaccount',\n dest='serviceacc',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to created account \"\\\n \"to be a service account.\",\n default=False\n )\n customparser.add_argument(\n '--addprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want added to the iLO account. This overrides the default of \"\\\n \"duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --addprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--removeprivs',\n dest='optprivs',\n nargs='*',\n action=_AccountParse,\n type=str,\n help=\"Optionally include this flag if you wish to specify \"\\\n \"which privileges you want removed from the iLO account. This overrides the default of\"\\\n \" duplicating privileges of the currently logged in account on the new account. Pick \"\\\n \"privileges from the privilege list in the above help text. EX: --removeprivs=1,2,4\",\n default=None\n )\n customparser.add_argument(\n '--role',\n dest='role',\n choices=['Administrator', 'ReadOnly', 'Operator'],\n help=\"Optionally include this flag if you would like to specify Privileges by role. \"\\\n \"Valid choices are: Administrator, ReadOnly, Operator\",\n default=None\n )\n customparser.add_argument(\n '-j',\n '--json',\n dest='json',\n action=\"store_true\",\n help=\"Optionally include this flag if you wish to change the\"\\\n \" displayed output to JSON format. Preserving the JSON data\"\\\n \" structure makes the information easier to parse.\",\n default=False\n )", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def get_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}", "def add_args(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n args: UserArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: UserArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def __init__(self, *args):\n\n self.args = args", "def __init__(self, *args):\n\n self.args = args", "def args(self) -> List[str]:\n return self.__args", "def args(cls):\n try:\n args = getfullargspec(cls.__init__)\n except TypeError:\n return []\n return args[0]", "def extra_target_arguments(self):\n return {}", "def _input_args(self, args: List[str]):\n assert self._call is None, f\"You need to specify all inputs before calling `{self._call}`\"\n assert isinstance(args, list), f\"{args} is a {type(args)}, expected a list of strings!\"\n assert len(args) > 0, f\"Expected a non-empty argument list!\"\n assert all(isinstance(a, str) for a in args), f\"Expected a list of strings, not {[type(a) for a in args]}!\"\n # all arguments could potentially be filenames that we write to, so let's just add them\n self._write_files |= set(args)\n # add dummy argument zero\n args = [\"\"] + args\n # allocate args in memory\n arg_strings = [self._str(a, \"arg\") for a in args]\n # allocate a pointer array for argv\n self.data += [f\"argv: .word \" + \" \".join(\"0\" for _ in range(len(args)))]\n # load argc and argv\n self._args += [\"\", \"# argument count in a0\", f\"li a0, {len(args)}\"]\n self._args += [\"\", \"# load pointers to argument strings into argv\", f\"la a1, argv\"]\n for ii, aa in enumerate(arg_strings):\n self._args += [f\"la t1, {aa}\", f\"sw t1, {ii * 4}(a1)\"]", "def get_arguments_string(self):\n result = self.__get_client_server_arg_string('')\n result = self.__get_x_args_string(result)\n result = self.__get_xx_args_string(result)\n result = self.__get_system_property_args_string(result)\n result = self.__get_unsorted_args_string(result)\n return result", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n action: Optional[pulumi.Input[str]] = None,\n layer_name: Optional[pulumi.Input[str]] = None,\n organization_id: Optional[pulumi.Input[str]] = None,\n principal: Optional[pulumi.Input[str]] = None,\n statement_id: Optional[pulumi.Input[str]] = None,\n version_number: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def args(self) -> tuple[Basic, ...]:\n return self._args", "def get_args():\n # create the parser\n parser = argparse.ArgumentParser()\n # Add the arguments to be parsed\n parser.add_argument(\"--num_rollouts\", type=int, default=1, help=\"Number of times to rollout agent in env\")\n parser.add_argument(\"--render\", choices=('True','False'), help=\"Render the rollout\")\n parser.add_argument(\"--seed\", type=int, default=4)\n parser.add_argument(\"--x_thresh\", type=float, default=1.5)\n args = parser.parse_args()\n args.render = True if args.render == 'True' else False\n\n return args", "def get_args():\n # Strip anything other than characters listed\n starting_view = pattern.sub(\"\", request.form.get(\"starting_view\"))\n envelope_id = \"envelope_id\" in session and session[\"envelope_id\"]\n args = {\n \"envelope_id\": envelope_id,\n \"starting_view\": starting_view,\n \"account_id\": session[\"ds_account_id\"],\n \"base_path\": session[\"ds_base_path\"],\n \"access_token\": session[\"ds_access_token\"],\n \"ds_return_url\": url_for(\"ds.ds_return\", _external=True),\n }\n\n return args", "def args(self):\n return self._parse_args", "def __init__(self, args):\n self.args = args", "def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n admin_role_values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n allowed_organizations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n editor_role_values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n email_assertion: Optional[pulumi.Input[str]] = None,\n groups_assertion: Optional[pulumi.Input[str]] = None,\n idp_metadata_url: Optional[pulumi.Input[str]] = None,\n idp_metadata_xml: Optional[pulumi.Input[str]] = None,\n login_assertion: Optional[pulumi.Input[str]] = None,\n login_validity_duration: Optional[pulumi.Input[int]] = None,\n name_assertion: Optional[pulumi.Input[str]] = None,\n org_assertion: Optional[pulumi.Input[str]] = None,\n role_assertion: Optional[pulumi.Input[str]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def gen_args(self, obj, pa_names = False):\n\n pal, kwal = get_class_total_args(type(obj))\n\n try:\n get_val = type(obj).__get_init_arg_val__\n except AttributeError:\n get_val = getattr\n\n for pa in pal:\n v = get_val(obj, pa)\n self.gen_field((pa + \" = \") if pa_names else \"\")\n self.pprint(v)\n\n for kwa, default in kwal.items():\n try:\n v = get_val(obj, kwa)\n except AttributeError:\n # If value cannot be obtained, skip the argument generation\n continue\n\n # generate only arguments with non-default values\n if (v is default) or (v == default):\n continue\n\n self.gen_field(kwa + \" = \")\n self.pprint(v)", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def _set_arguments(self):\n self._arguments = []", "def ReviewServiceArgs(cls, filename = '', registrationname = '', readonly = True, user = '', password = ''):\n return filename, registrationname, readonly, user, password", "def __init__(__self__,\n resource_name: str,\n args: ProductArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n accept_language: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distributor: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner: Optional[pulumi.Input[str]] = None,\n provisioning_artifact_parameters: Optional[pulumi.Input[pulumi.InputType['ProductProvisioningArtifactParametersArgs']]] = None,\n support_description: Optional[pulumi.Input[str]] = None,\n support_email: Optional[pulumi.Input[str]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(self, *args, **kwargs):\n Cli.__init__(self, *args, **kwargs)\n # Set common arguments\n self.add_argument(\n '--build', env_var=\"BUILD\", default='latest',\n help='Override build id, defaults to latest')\n self.add_argument(\n '--buildroot', env_var=\"BUILD_ROOT\", default='builds',\n help='Build directory')\n self.add_argument(\n '--schema', env_var=\"META_SCHEMA\",\n default='/usr/lib/coreos-assembler/v1.json',\n help='Schema to use. Set to NONE to skip all validation')", "def __init__(self, name=\"alpha\", attr=None):\n Arg.__init__(self, name, attr)", "def __getinitargs__(self):\n\n return (self.name, self.value)", "def __init__(__self__,\n resource_name: str,\n args: EndpointArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: Optional[AclArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self):\n self._parser = argparse.ArgumentParser(description='Arguments for talking to vCenter')\n self._standard_args_group = self._parser.add_argument_group('standard arguments')\n self._specific_args_group = self._parser.add_argument_group('sample-specific arguments')\n\n # because -h is reserved for 'help' we use -s for service\n self._standard_args_group.add_argument('-s', '--host',\n required=True,\n action='store',\n help='vSphere service address to connect to')\n\n # because we want -p for password, we use -o for port\n self._standard_args_group.add_argument('-o', '--port',\n type=int,\n default=443,\n action='store',\n help='Port to connect on')\n\n self._standard_args_group.add_argument('-u', '--user',\n required=True,\n action='store',\n help='User name to use when connecting to host')\n\n self._standard_args_group.add_argument('-p', '--password',\n required=False,\n action='store',\n help='Password to use when connecting to host')\n\n self._standard_args_group.add_argument('-nossl', '--disable-ssl-verification',\n required=False,\n action='store_true',\n help='Disable ssl host certificate verification')", "def args(self):\n return (\n self.species_names,\n self.rxn_names,\n self.react_stoic,\n self.prod_stoic,\n self.init_state,\n self.k_det,\n self.chem_flag,\n self.volume,\n )", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def arguments(self):\n return parse_arguments(self['data'])", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__args = args\n self.__kwargs = kwargs", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n key: Optional[pulumi.Input[str]] = None,\n values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n consumer_id: Optional[pulumi.Input[str]] = None,\n hash_secret: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n redirect_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(self, args=False):\n self.args = args", "def __init__(__self__, *,\n roles: pulumi.Input[Sequence[pulumi.Input[str]]],\n source: pulumi.Input['AuthorizationPolicySpecSourceArgs'],\n target: pulumi.Input['AuthorizationPolicySpecTargetArgs']):\n pulumi.set(__self__, \"roles\", roles)\n pulumi.set(__self__, \"source\", source)\n pulumi.set(__self__, \"target\", target)", "def __init__(__self__,\n resource_name: str,\n args: LayerVersionPermissionArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def command_line_arguments():\n _parser.add_argument('-l', '--list', nargs='+',\n help='<Required> Set flag', required=True)\n _parser.add_argument(\"-A\", \"--access\", required=True,\n help=\"access to host => grant/revoke\")", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def cmd_args(self):\n return [\"0\", self.name]", "def args(self):\n return self.cmd_args", "def init_args():\n parser = argparse.ArgumentParser(description='Create xls for Tom')\n parser.add_argument('start', metavar='N', type=int, help='starting '\n 'number')\n parser.add_argument('total_x', metavar='N', type=int,\n help='total number of x rows')\n parser.add_argument('total_y', metavar='N', type=int,\n help='total number of y columns')\n parser.add_argument('filename', metavar='NAME', default='test.csv',\n type=str, help='file name to write to, should end in '\n 'csv')\n return parser.parse_args()", "def __init__(__self__,\n resource_name: str,\n args: OAuthArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__, *,\n agent_pool: Optional[pulumi.Input[str]] = None,\n apms: Optional[pulumi.Input[Sequence[pulumi.Input['ApmReferenceArgs']]]] = None,\n builder: Optional[pulumi.Input[str]] = None,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input['CertificateReferenceArgs']]]] = None,\n env: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n relative_path: Optional[pulumi.Input[str]] = None,\n resource_requests: Optional[pulumi.Input['BuildResourceRequestsArgs']] = None):\n if agent_pool is not None:\n pulumi.set(__self__, \"agent_pool\", agent_pool)\n if apms is not None:\n pulumi.set(__self__, \"apms\", apms)\n if builder is not None:\n pulumi.set(__self__, \"builder\", builder)\n if certificates is not None:\n pulumi.set(__self__, \"certificates\", certificates)\n if env is not None:\n pulumi.set(__self__, \"env\", env)\n if relative_path is not None:\n pulumi.set(__self__, \"relative_path\", relative_path)\n if resource_requests is not None:\n pulumi.set(__self__, \"resource_requests\", resource_requests)", "def __init__(__self__,\n resource_name: str,\n args: WorkflowArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: ScriptArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__, *,\n owner: pulumi.Input[str],\n provisioning_artifact_parameters: pulumi.Input['ProductProvisioningArtifactParametersArgs'],\n type: pulumi.Input[str],\n accept_language: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distributor: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n support_description: Optional[pulumi.Input[str]] = None,\n support_email: Optional[pulumi.Input[str]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"owner\", owner)\n pulumi.set(__self__, \"provisioning_artifact_parameters\", provisioning_artifact_parameters)\n pulumi.set(__self__, \"type\", type)\n if accept_language is not None:\n pulumi.set(__self__, \"accept_language\", accept_language)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if distributor is not None:\n pulumi.set(__self__, \"distributor\", distributor)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if support_description is not None:\n pulumi.set(__self__, \"support_description\", support_description)\n if support_email is not None:\n pulumi.set(__self__, \"support_email\", support_email)\n if support_url is not None:\n pulumi.set(__self__, \"support_url\", support_url)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __add_arguments__(cls, parser):", "def get_args(cls) -> List[argparse.ArgumentParser]:\n raise NotImplementedError()", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n \"subscribed\": self.subnode_subscribed.makeClone(),\n \"subscript\": self.subnode_subscript.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")" ]
[ "0.6244963", "0.6064142", "0.6025504", "0.5993122", "0.59805226", "0.59673625", "0.59494746", "0.5927861", "0.59101176", "0.5889685", "0.58754504", "0.5861338", "0.5853028", "0.5831971", "0.5760043", "0.5757983", "0.5754587", "0.5748414", "0.5736711", "0.5730506", "0.5728922", "0.5702739", "0.5694897", "0.56813794", "0.56813353", "0.56714153", "0.5665719", "0.56517786", "0.5649568", "0.5641863", "0.5636722", "0.5632483", "0.5630324", "0.56175536", "0.5612331", "0.56032336", "0.5601809", "0.55954105", "0.55954105", "0.55954105", "0.5595406", "0.5583956", "0.5568773", "0.5568773", "0.5563448", "0.55483484", "0.55483484", "0.55419505", "0.5539517", "0.5538701", "0.5538542", "0.5535705", "0.5532005", "0.55315673", "0.55237305", "0.5519552", "0.5518667", "0.5516022", "0.55121344", "0.5510536", "0.5507364", "0.55071306", "0.5496793", "0.5494959", "0.5494141", "0.54935986", "0.5493256", "0.5488328", "0.54820573", "0.5477506", "0.5473247", "0.54727185", "0.547237", "0.5470563", "0.5468446", "0.5454001", "0.5453586", "0.5451553", "0.5449597", "0.5444567", "0.54445297", "0.5442025", "0.54418826", "0.54405266", "0.5438899", "0.5435064", "0.5433609", "0.5431173", "0.5427693", "0.5426132", "0.5421175", "0.5417455", "0.54167354", "0.5413231", "0.5411425", "0.5409535", "0.5409535", "0.5409535", "0.5409535", "0.5409535", "0.5409535" ]
0.0
-1
Name of the lambda function.
def function_name(self) -> pulumi.Input[str]: return pulumi.get(self, "function_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n\t\treturn self._func_name", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")", "def get_function_name(self):\n return self.__function", "def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])", "def name(cls):\n return arg.s()(cls.func).func.__name__", "def function_name(cls):\n function_name = String(cls.__name__).snakecase().lower()\n return function_name", "def getName(self):\n return _libsbml.FunctionDefinition_getName(self)", "def islambda(func):\n return getattr(func, 'func_name', False) == '<lambda>'", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def function_name(parameters):", "def get_function_name_at(self, address):\n pass", "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def _callable_name(f):\n try:\n return f.__name__\n except AttributeError:\n if isinstance(f, partial):\n return f.func.__name__\n return f.__class__.__name__", "def function(self) -> str:\n return pulumi.get(self, \"function\")", "def display_name(self) -> str:\n return f\"{self.func.__module__}.{self.func.__qualname__}\"", "def funcName():\r\n import sys\r\n return sys._getframe(1).f_code.co_name", "def method_name(self) -> str:\n if isinstance(self.view_func, str):\n return self.view_func\n return self.view_func.__name__", "def get_function_name(wrapped, instance, args, kwargs):\n return wrapped.__name__", "def get_callable_name(func):\n if isinstance(func, functools.partial):\n return get_callable_name(func.func)\n else:\n return func.__name__", "def function_name(func):\n return log(level=\"info\", message=_function_name(func))", "def name(self):\n name = self.function_name\n\n # Feature type is based on additional data that used\n # for example if insight is for Healthsites Facilities\n # than feature type is Healthsites Facilities\n\n if self.feature_type:\n name = '%s for %s' % (name, self.feature_type)\n return name", "def _set_name_scope(self):\n if self.name is None:\n self._name_scope = self.__class__.__name__\n elif self.name == '<lambda>':\n self._name_scope = 'lambda'\n else:\n # E.g. '_my_loss' => 'my_loss'\n self._name_scope = self.name.strip('_')", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def _state_name(self):\n return '{}_{}'.format(self.function_name, self.STATE_CONFIG_SUFFIX)", "def getName(self):\n return _libsbml.Event_getName(self)", "def event_name(self):\n return self._event_name", "def this_func_input_name():\n\treturn input_name_from_func_name(inspect.stack()[1][3])", "def get_function_raw_name_at(self, address):\n pass", "def get_function_name():\n\n # inspect.stack()[0][2] returns name of this function\n function_name = inspect.stack()[1][3]\n\n return function_name", "def lambda_func_doc(self, label):\n latex = (\n r'0 = \\lambda - '\n r'\\frac{\\dot{m}_\\mathrm{air}}{\\dot{m}_\\mathrm{air,min}}'\n )\n return generate_latex_eq(self, latex, label)", "def _name(self):\n return self.arguments[0].split('(')[0]", "def _name(self):\n return self._arguments[0].split('(')[0]", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def name(self) -> str:\n return f\"{self.class_object.__name__}.{self.method_str}\"", "def funcname(func):\n try:\n return '%s()' % func.__name__\n except AttributeError:\n return repr(func)", "def name(self):\n return signal_base_get_name(self.obj)", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def is_lambda(fun):\n return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__", "def name(self) -> str:\n return self._formal_name", "def GetCurrentFuncName():\n return sys._getframe(1).f_code.co_name", "def getElementName(self):\n return _libsbml.FunctionDefinition_getElementName(self)", "def node_name(self) -> str:\n op_name = f\"{self.name.name}_{self.name.overload_name}\".lower()\n return \"\".join(word.capitalize() or \"\" for word in op_name.split(\"_\"))", "def cal_name(self):\n return self.event_name", "def _generateLambda(self, string):\n derivation = self.fieldNames.sub(r'parent.getSampleValue(stats, \"\\1\")',\n string)\n return lambda stats, parent: eval(derivation)", "def trigger_name(self) -> \"str\":\n return self._attrs.get(\"triggerName\")", "def getName(self):\n return signal_base_get_name(self.obj)", "def get_lambdas(self):\n return self.graph.build_lambdas.output.lambda_pairs[-1][:, 0]", "def name(self):\n # type: () -> str\n return self._name", "def name(self):\n return self.__name__", "def destination_function(self):\n return self._event['destination_function_name']", "def get_fun_name(line):\n match = re.match(r'(function|macro)\\s*\\((\\w+)', line)\n if not match:\n return\n return match.groups()[1]", "def name(self):\n # type: () -> string_types\n return self._name", "def name(self):\n # type: () -> string_types\n return self._name", "def event_name(self):\n return dict.get(self, 'event_name', None)", "def get_function(self,attr):\n func_name = self._user_funcs[attr] \n if hasattr(func_name,'__call__'):\n func_name = func_name(self)\n return func_name", "def fname(func: Callable) -> str:\n return \"{}.{}\".format(func.__module__, func.__name__)", "def getElementName(self):\n return _libsbml.ListOfFunctionDefinitions_getElementName(self)", "def getElementName(self):\n return _libsbml.Trigger_getElementName(self)", "def as_function_name(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_CNAME)", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def get_function_name(ifunc, *, scoped=False, mangle=False):\n\n name = _translate_function_name(interrogate_function_name(ifunc), mangle)\n\n if scoped:\n parent = interrogate_function_class(ifunc)\n if parent:\n name = get_type_name(parent, scoped=True, mangle=mangle) + '.' + name\n\n return name", "def method_name(self):\n pass", "def get_name():\n return __name__", "def idp_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idp_lambda_arn\")", "def getElementName(self):\n return _libsbml.FunctionTerm_getElementName(self)", "def fn(self):\n return self._fn", "def test_name_of_func_should_be_passed_name(self):\n scope = self.Test.scope('foo', where='foo')\n self.assertEqual(scope.__name__, 'foo')", "def getName(self):\n return _libsbml.FluxObjective_getName(self)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))", "def name() -> str:\n pass", "def fname(func):\n return \"%s.%s\" % (func.__module__, func.__name__)", "def name ( self ) :\n return self.__name if self.__name else ''", "def get_class_functional_name(name):\n name = _strip_class_name(name)\n return name", "def key(func):\n return func.__func__ if hasattr(func, \"__func__\") else func", "def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def get_name(listener_id):\n return \"{}{}\".format(f5_const.PREFIX_LISTENER, listener_id)", "def signature(function: model.Function) -> str:\n return str(function.signature)", "def get_filter_name(self):\n pass", "def function(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"function\")", "def getName(self):\r\n return self.__name__" ]
[ "0.75944763", "0.7466118", "0.7466118", "0.7391393", "0.73342067", "0.7193053", "0.71084285", "0.7066119", "0.6840698", "0.6837291", "0.6763094", "0.65932614", "0.6569362", "0.6566482", "0.6476233", "0.64046985", "0.6297647", "0.6272834", "0.62716764", "0.626815", "0.6246849", "0.6245427", "0.62365836", "0.6223749", "0.6185127", "0.6185127", "0.6185127", "0.617316", "0.6163987", "0.61544526", "0.61316943", "0.6112015", "0.60907984", "0.6072864", "0.60334545", "0.60253215", "0.59708244", "0.5956935", "0.59521055", "0.5947784", "0.59415597", "0.5922021", "0.59046304", "0.59039205", "0.58983177", "0.5894947", "0.58889276", "0.5833937", "0.5826419", "0.5819011", "0.5818686", "0.58164835", "0.57611364", "0.5759176", "0.57555735", "0.57311624", "0.57311624", "0.5730143", "0.57287866", "0.5727541", "0.5725021", "0.5719363", "0.5716926", "0.5710818", "0.5710818", "0.5706853", "0.56929535", "0.5671996", "0.5666783", "0.566621", "0.5661876", "0.56607056", "0.5656694", "0.5648848", "0.5648848", "0.5648848", "0.5648848", "0.5648178", "0.5643689", "0.5641882", "0.5637602", "0.563328", "0.5630853", "0.563047", "0.56130755", "0.56130755", "0.56130755", "0.56130755", "0.56130755", "0.56130755", "0.56130755", "0.56130755", "0.56130755", "0.56130755", "0.5611054", "0.5606915", "0.56000537", "0.5596378", "0.55859476" ]
0.77050376
1
JSON payload to the lambda function.
def input(self) -> pulumi.Input[str]: return pulumi.get(self, "input")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lambda_payload(self, date: str, symbol: str) -> bytes:\n self._lambda_event['s3_bucket'] = self._config['s3_bucket']\n self._lambda_event['s3_key_quotes'] = (\n self._config['s3_key_input_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_quotes_suffix'])\n self._lambda_event['s3_key_trades'] = (\n self._config['s3_key_input_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_trades_suffix'])\n self._lambda_event['s3_key_output'] = (\n self._config['s3_key_output_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_output_suffix'])\n return json.dumps(self._lambda_event).encode()", "def lambda_handler(event, context):\n\n if not event[\"body\"]:\n return create_output(400, \"Invalid body: body is empty.\")\n\n try:\n item = json.loads(event[\"body\"])\n except json.JSONDecodeError:\n return create_output(400, \"Invalid body: can't decode body.\")\n\n for key in iris_keys:\n if key not in item:\n return create_output(400, f\"Invalid body: missing key {key} in body.\")\n try:\n float(item[key])\n except ValueError:\n return create_output(400, f\"Invalid body: can't parse {key} to float.\")\n\n item[\"id\"] = create_hash(item)\n\n try:\n table.put_item(Item=item)\n except Exception as e:\n logger.error(f\"Error, can't insert item: {e}\")\n return create_output(500, \"Internal error: can't insert item in table.\")\n\n return create_output(200, \"Item created.\")", "def lambda_handler(event, context):\n logger.debug(event)\n\n product_list = PRODUCT_LIST\n\n return {\n \"statusCode\": 200,\n \"headers\": HEADERS,\n \"body\": json.dumps({\"products\": product_list}),\n }", "def lambda_handler(event, context):\n # EOL char append function\n encode_data = lambda x: \"{data}{eol}\".format(data=json.dumps(x), eol=chr(10)).encode(\"UTF-8\")\n \n # Punk API call\n try:\n logger.debug(\"Requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n request = r.get(os.environ[\"API_URL\"])\n except Exception as e:\n logger.error(\"An error occured while requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n raise e\n \n # Send records to kinesis stream\n logger.debug(\"Sending data to stream: {stream}\".format(stream=os.environ[\"STREAM_NAME\"]))\n for data in request.json():\n client.put_record(\n StreamName=os.environ[\"STREAM_NAME\"],\n Data=encode_data(data),\n PartitionKey=\"key\"\n )\n\n return {\n 'statusCode': request.status_code,\n 'body': data\n }", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def lambda_handler(event, context):\n\n operations = {\n 'POST': main,\n }\n\n if event.get('httpMethod', False):\n operation = event['httpMethod']\n else:\n operation = \"not available\"\n\n payload = base64.b64decode(event['body'])\n try:\n payload = json.loads(payload)\n except TypeError:\n pass\n\n if operation in operations:\n return respond(None, operations[operation](payload))\n else:\n return respond(ValueError(f'Unsupported method {operation}'))", "def lambda_handler(event, context):\n\n # return {\n # \"statusCode\": 200,\n # \"body\": json.dumps(\n # {\n # \"message\": \"hello world\",\n # }\n # ),\n # }\n src_bytes = base64.b64decode(event[\"body\"])\n src = cv2.imdecode(np.frombuffer(src_bytes, dtype=np.uint8), cv2.IMREAD_COLOR)\n gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n _, gray_bytes = cv2.imencode(\".jpg\", gray)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(base64.b64encode(gray_bytes).decode(\"UTF-8\")),\n }", "def payload(self):", "def lambda_handler(event, context):\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n 'message': 'API deployed',\n })\n }", "def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)", "def _send_json(self, payload: dict):\n data = json.dumps(payload)\n return self.send(data)", "def lambda_handler(event, context):\n for item in json.loads(event[\"Records\"][0][\"body\"]):\n item[\"id\"] = uuid.uuid1().bytes\n for key, value in item.items():\n if key == \"id\":\n item[key] = {\"B\": bytes(value)}\n elif key == \"fiscal_year\":\n item[key] = {\"N\": str(value)}\n elif key == \"emissions_mtco2e\":\n item[key] = {\"N\": str(value)}\n elif key == \"consumption\":\n item[key] = {\"N\": str(value)}\n else:\n item[key] = {\"S\": str(value)}\n\n time.sleep(0.001)\n\n dynamo.put_item(TableName=\"Greenhouse_gas_emissions\", Item=dict(item))", "def set_payload(self, payload):\n self.payload = json.dumps(payload)", "def lambda_handler(event, context):\n\n # S3 resource invocation\n s3_resource = boto3.resource('s3')\n # S3 bucket selection\n data_bucket_name = \"put_here_data_bucket_name\"\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n request_body_dict = json.loads(event['body'])\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending both ticker and start date if given\n if request_body_dict['start_date'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_future_request(request_body=request_body_dict,\n s3_resource=s3_resource,\n s3_bucket=data_bucket_name, prefix='valid'))\n # or only ticker name if no start date has been provided\n elif request_body_dict['ticker_name'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_request(ticker_name=request_body_dict['ticker_name'],\n s3_resource=s3_resource, s3_bucket=data_bucket_name,\n prefix='train'))\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n # print data for debug purposes\n print(result)\n\n return {\n 'statusCode': 200,\n 'headers': {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},\n 'body': str(result)\n }", "def lambda_handler(event, context):\n\n event_body = json.loads(event['body'])\n print(\"EVENT:\")\n print(event_body)\n\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n recs = flow(event_body, textract, cache = True)\n rval = {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\" : \"hello world\",\n \"textract\" : recs\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n\n return rval", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n\n # Log the values received in the event argument\n logger.info(f'Request event: {event}')\n\n # Define default hard-coded return values\n response = {\n 'uid': 'Example function ID',\n 'return_val01': 'Return value #1',\n 'return_val02': 'Return Value #2',\n }\n\n # Retrieve type of invocation (GET, PUT, etc.)\n if 'http_verb' in event:\n operation = event['http_verb'].upper()\n if operation == 'PUT':\n # Return the values passed to the function\n response = {\n 'uid': event['functionID'],\n 'return_val01': event['parameters']['parm01'],\n 'return_val02': event['parameters']['parm02'],\n }\n\n logger.info(f'Response={response}')\n return response", "def test4():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000147203645/tic000147203645_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def lambda_handler(event, context):\n return {\n 'statusCode': 200,\n 'body': say_hello()\n }", "def lambda_handler(event, context):\n\n print(\"EVENT:\")\n print(event)\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n\n recs = flow(event, s3)\n print(recs)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def to_lambda_output(self):\n resp = {\n 'statusCode': self.status_code,\n 'body': self.body,\n 'headers': self.headers\n }\n\n return resp", "def invoke_lambda(lambda_name, lambda_payload):\n try:\n LOGGER.debug(f\"Sending request to '{lambda_name}' method: {lambda_payload}\")\n client = boto3.client('lambda')\n invoke_response = client.invoke(FunctionName=lambda_name,\n InvocationType=\"RequestResponse\",\n Payload=json.dumps(lambda_payload))\n response = json.loads(invoke_response['Payload'].read())\n except Exception as ex:\n LOGGER.debug(f\"Error encountered while invoking lambda method '{lambda_name}': {repr(ex)}\")\n\n return response", "def lambda_handler(event, context):\n # define initial status code and headers\n statusCode = 400\n try:\n # get the body params\n if type(event) == dict:\n event_body = event.get('body', event)\n else:\n event_body = json.loads(event).get('body', {})\n # generate and store the reservation response result from reservation handler function\n reservation_handler = ReservationHandler(EventBodyData=event_body)\n result = reservation_handler.sabre_reservation_handler()\n # define status code, headers and response\n if type(result) == dict:\n statusCode = result.get(\"statusCode\", statusCode)\n response = result.get(\"body\", \"\")\n else:\n response = result\n except Exception as E:\n response = str(E)\n\n # return the response\n return {\n 'statusCode': statusCode,\n 'body': response\n }", "def test3():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000129646247_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def lambda_handler(event, context):\n blogs = [{\n 'title': 'BETTER UTILITY THAN A TRUCK WITH MORE PERFORMANCE THAN A SPORTS CAR',\n 'description': 'Cybertruck is built with an exterior shell made for ultimate durability and passenger protection. Starting with a nearly impenetrable exoskeleton, every component is designed for superior strength and endurance, from Ultra-Hard 30X Cold-Rolled stainless-steel structural skin to Tesla armor glass.'\n }, {\n 'title': 'ULTRA-HARD 30X COLD-ROLLED STAINLESS STEEL',\n 'description': 'If there was something better, we’d use it. Help eliminate dents, damage and long-term corrosion with a smooth monochrome exoskeleton that puts the shell on the outside of the car and provides you and your passengers maximum protection.'\n }, {\n 'title': 'TESLA ARMOR GLASS',\n 'description': 'Ultra-strong glass and polymer-layered composite can absorb and redirect impact force for improved performance and damage tolerance.'\n }\n ]\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(blogs),\n }", "def lambda_handler(event, context):\n # Boto is always available in AWS lambda, but may not be available in standalone mode\n# import boto3\n# from base64 import b64decode\n\n # To generate the encrypted values, go to AWS IAM Keys and Generate a key\n # Then grant decryption using the key to the IAM Role used for your lambda function.\n #\n # Use the command `aws kms encrypt --key-id alias/<key-alias> --plaintext <value-to-encrypt>\n # Put the encrypted value in the configuration dictionary below\n# encrypted_config = {\n# 'pagerduty_api_key': '<ENCRYPTED VALUE>',\n# 'schedule_ids': '<ENCRYPTED VALUE>'\n# }\n\n# kms = boto3.client('kms')\n# config = {x: kms.decrypt(CiphertextBlob=b64decode(y))['Plaintext'] for x, y in encrypted_config.iteritems()}\n on_call = OnCall(API_KEY, SCHEDULE_IDS)\n output = on_call.run()\n\n return { \"response_type\": \"in_channel\", \"text\": '\\n'.join(output) }", "def lambda_handler(event, context):\n\n try:\n created_item = create_new_table_item(event)\n return {\"statusCode\": 201, \"body\": json.dumps(f\"{created_item}\")}\n\n except BlankRequestBody as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(MISSING_PARAMETERS_MESSAGE)}\n\n except ValidationError as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(INCORRECT_PARAMETERS_MESSAGE)}\n\n except Exception as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 500, \"body\": json.dumps(\"Internal server error\")}", "def lambda_handler(event, context):\n try:\n # Extract the Job ID\n job_id = event['CodePipeline.job']['id']\n\n # Extract the Job Data\n job_data = event['CodePipeline.job']['data']\n\n # Extract the params\n params = get_user_params(job_data)\n\n # Get the lists of artifacts coming in and out of this function\n input_artifacts = job_data['inputArtifacts']\n output_artifacts = job_data['outputArtifacts']\n\n # Perform a build on the source (from source_artifact)\n # and write results to the build_artifact\n s3 = setup_s3_client(job_data)\n source_artifact = find_artifact(input_artifacts, params['source_artifact'])\n src_dir = get_zipped_artifact(s3, source_artifact)\n dest_dir = tempfile.mkdtemp()\n perform_build(os.path.join(src_dir, 'src'), dest_dir)\n build_artifact = find_artifact(output_artifacts, params['build_artifact'])\n put_zipped_artifact(s3, dest_dir, build_artifact)\n\n # Pick the template out of the source code and write it to the\n # template_artifact\n template_artifact = find_artifact(output_artifacts, params['template_artifact'])\n put_zipped_artifact(s3, os.path.join(src_dir, params['template_subdir_path']), template_artifact)\n\n shutil.rmtree(src_dir)\n shutil.rmtree(dest_dir)\n put_job_success(job_id, \"Built code: \" + \", template:\")\n\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail the job and log the exception message.\n print('Function failed due to exception.')\n print(e)\n traceback.print_exc()\n put_job_failure(job_id, 'Function exception: ' + str(e))\n\n print('Function complete.')\n return \"Complete.\"", "def lambda_handler(event, context):\n return dispatch(event)", "def handle_json(self, source, data):\n method, args = json.loads(data)\n try:\n result = self.call(source, method, *args)\n except Exception as exc:\n result = str(exc)\n\n return json.dumps(result)", "def get_payload(self):\n if self.payload == '':\n return {}\n\n return json.loads(self.payload)", "def lambda_inject_context(payload, scope):\n try:\n invoke_payload = payload.get('Payload', {})\n\n if not isinstance(invoke_payload, dict):\n invoke_payload = json.loads(invoke_payload)\n\n tracer.inject(scope.span.context, ot.Format.HTTP_HEADERS, invoke_payload)\n payload['Payload'] = json.dumps(invoke_payload)\n except Exception:\n logger.debug(\"non-fatal lambda_inject_context: \", exc_info=True)", "def lambda_handler(event, context):\n feature = session.query(m.Feature).first()\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": feature.title,\n }),\n }", "def callback(ch, method, properties, body):\n record = json.loads(body.decode()) # decode binary string to dict\n pprint(record)", "def payload(self, payload: \"dict\"):\n self._attrs[\"payload\"] = payload", "def lambda_handler(event, context):\n\n # Check that we were passed the required arguments\n validate_event(event)\n\n try:\n numpy_method_name = event.get('method')\n numpy_argument_array = event.get('arguments')\n\n logger.info(\"Handing call to the NumPy {} method with arguments: {}\".format(numpy_method_name, numpy_argument_array))\n result = getattr(numpy, numpy_method_name)(*numpy_argument_array)\n logger.info(\"Result from NumPy is {}\".format(result))\n return {'result': result}\n except:\n error_message = \"Unexpected error: {}\".format(str(sys.exc_info()))\n logger.error(error_message)\n raise Exception(error_message)", "def get_json_payload(self):\n self._payload_to_obj()\n return self._obj_payload", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n try:\n response = s3.get_object(Bucket=BUCKET, Key=KEY)\n print('CONTENT TYPE:', response['ContentType'])\n print('response:')\n pprint.pprint(response)\n print('event')\n pprint.pprint(event)\n print('payload')\n pprint.pprint(event.get('payload'))\n # return json.loads(json.dumps(response, default=str))\n # defined by https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return {\n 'statusCode': 200,\n 'isBase64Encoded': False,\n 'body': json.dumps(response, default=str)\n }\n # return response['ContentType']\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(KEY, BUCKET))\n raise e", "def pewma():\n try:\n content = request.get_json()\n try:\n data = content[\"data\"]\n except:\n data = content\n result = pewma_model.lambda_handler(data)\n return jsonify(result)\n except Exception as e:\n return jsonify({\"error\": str(e)})", "def on_push(self, payload):\n pass", "def lambda_handler(event, context):\n qpmBucket = event['queryStringParameters']['bucket']\n fn_bucket = list_s3_objects(qpmBucket)\n print(fn_bucket)\n \n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps(str(fn_bucket)),\n }", "def lambda_handler(event, context):\n\n # resolve backend api key from the secrets manager\n sm_client = boto3.client('secretsmanager')\n sm_resp = sm_client.get_secret_value(os.getenv('BACKEND_SERVICE_API_KEY_SECRET_ARN'))\n backend_api_key = json.dumps(sm_resp.get('SecretString')).get('key')\n\n # TODO implement further business logic\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }", "def invokeLambdaFunction(lambdaArn:str, parameters:Dict=None) -> Dict:\n payloadDict = {\n 'http_verb': 'POST',\n 'functionID': lambdaArn,\n }\n if parameters:\n payloadDict['parameters'] = parameters\n return json.loads(_invokeLambdaFunction(lambdaArn, payloadDict)['Payload'].read().decode('utf-8'))", "def lambda_handler(event, context):\n \n filename = None\n fobj = None\n\n try:\n \n filename = 'dlq' + '-' + datetime.datetime.now().strftime(\"%s\")\n fobj = open('/tmp/'+filename, 'w')\n logger.debug('S3 client set up.')\n\n for record in event['Records']:\n fobj.write(json.dumps(record['body']))\n fobj.write(\"\\n\")\n \n except Exception as ex:\n logger.error('Exception in executing ingestion to S3: {}'.format(ex))\n send_sns_alert(str(ex))\n raise\n\n else:\n \n #Saves file to S3\n fobj.close()\n load_data_s3(filename)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Success!')\n }\n\n finally:\n\n # S3 - close temp object\n fobj.close()", "def hello_world(\n event: Dict[str, Any],\n context,\n):\n body_str = event.get(\"body\", \"{}\")\n body_str = body_str if body_str else \"{}\"\n body_obj = json.loads(body_str)\n wiki_search_term = body_obj.get(\"searchTerm\", \"\")\n if not body_obj or not wiki_search_term:\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/handle-errors-in-lambda-integration.html\n response = {\n \"statusCode\": 400,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps({\"message\": \"Wikipedia search term was not provided\"}),\n }\n else:\n summary = wikipedia.summary(wiki_search_term)\n response = {\n \"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps(summary),\n }\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return response", "def invoke_process(fuction_name, account_id, region):\n invoke_payload = (\n json.JSONEncoder().encode(\n {\n \"account\": account_id,\n \"region\": region\n }\n )\n )\n lambda_client.invoke(\n FunctionName=fuction_name,\n InvocationType='Event',\n Payload=invoke_payload,\n )", "def test_generic(key,bucket):\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": bucket\n },\n \"object\": {\n \"key\": key\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n print(out)\n assert out[\"statusCode\"] == 200", "def get_json(payload):\n try:\n the_json = json.dumps(payload, indent=4, sort_keys=True)\n return the_json\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def lambda_handler(event: APIGatewayProxyEvent, context: LambdaContext) -> Dict[str, Any]:\n\n return app.resolve(event, context)", "def generate_payload(req):\n logging.info(f'Do something with {req}')\n return json.dumps({\n \"msg1\": \"Hello world 1!\",\n \"msg2\": \"Hello world 2!\"\n })", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n curr_time = str(int(datetime.now().strftime(\"%s\")) * 1000)\n client.put_item(TableName='demo-table', \n Item={\n 'doi': {'S': curr_time}, \n 'crossref_flag': {'BOOL': False},\n 'europepmc_flag': {'BOOL': False},\n 's3_flag': {'BOOL': False},\n 'indexing_flag': {'BOOL': False}\n })\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def lambda_handler(event, context=None):\n response = {}\n try:\n response = middleware.IdentityAuthMiddleWare.process_request(event, response)\n except Exception as e:\n response[\"message\"] = e.message\n response[\"errors\"] = e.errors\n # removing request_dump data\n if \"request_dump\" in response[\"errors\"]:\n del response[\"errors\"][\"request_dump\"]\n for _k, _v in response[\"errors\"].items():\n response[\"errors\"][_k] = str(_v)\n return response", "def lambda_handler(event, context):\r\n body = json.loads(event[\"body\"].replace(\"'\", '\"'))\r\n # This allows the function to run locally by sending requests to a local DynamoDB. Option one is for when it's\r\n # being run by SAM, option two for when the tests are being run, and three for production\r\n if os.environ.get('AWS_SAM_LOCAL'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://dynamo:8000')\r\n table = dynamodb.Table(\"pollsStorageDB\")\r\n elif 'local' == os.environ.get('APP_STAGE'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')\r\n table = dynamodb.Table(\"pollsStorageDB\")\r\n else:\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table(os.environ[\"DDB_TABLE_NAME\"])\r\n # Create dict to contain the number of votes for each possible response\r\n responses = {}\r\n for answer in body[\"answersList\"]:\r\n responses[answer] = 0\r\n \r\n # Sort out the expiry date\r\n if body.get(\"expiresIn\"):\r\n try:\r\n expiresIn = (datetime.now() + timedelta(days=int(body[\"expiresIn\"]))).timestamp()\r\n except BaseException as e:\r\n print(e)\r\n else:\r\n expiresIn = (datetime.now() + timedelta(days=30)).timestamp()\r\n\r\n # Create unique ID for the poll\r\n randomString = ''.join([random.choice(string.ascii_letters \r\n + string.digits) for n in range(32)]) \r\n poll = {\r\n \"id\": randomString,\r\n 'question': body[\"question\"],\r\n 'answersList': body[\"answersList\"],\r\n 'responses': responses,\r\n 'created': datetime.now().timestamp(),\r\n \"expires\": Decimal(expiresIn)\r\n }\r\n response = table.put_item(\r\n Item=poll\r\n )\r\n \r\n return {\r\n 'headers': {\r\n 'Access-Control-Allow-Headers': 'Content-Type',\r\n 'Access-Control-Allow-Origin': '*',\r\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'\r\n },\r\n \"statusCode\": 200,\r\n \"body\": json.dumps({\r\n \"success\": True,\r\n \"polls\": [poll]\r\n }),\r\n }", "def set_json_payload(self, payload):\n self._str_payload = None\n self._obj_payload = payload", "def lambda_handler(event, context):\n print(event)\n print(context)\n storage_gateway_status()", "def lambda_handler(event, context): # pylint: disable=too-many-locals,too-many-branches,too-many-statements\r\n try: # pylint: disable=too-many-nested-blocks\r\n print(\"Execution started!\")\r\n #print(\"Event: \",event)\r\n # Bucket name and Full path for file - where file will be uploded\r\n source_bucket_name = event[\"detail\"][\"requestParameters\"][\"bucketName\"]\r\n source_key = urllib.parse.unquote_plus(\r\n event[\"detail\"][\"requestParameters\"][\"key\"], encoding='utf-8')\r\n \r\n print(\"file_path: \",source_key)\r\n #Loading master config\r\n print(\"Loading master_config\")\r\n audit_config = {}\r\n config_path = \"./config/\" + \\\r\n os.environ['CCM_ENV'] + \"/master_config.json\"\r\n config_content = open(config_path).read()\r\n config_json = json.loads(config_content)\r\n audit_config = config_json[\"audit_config\"]\r\n snow_params = config_json[\"ERROR_NOTIFICATION_SNOW_PARAMS\"]\r\n athena_query_param = config_json[\"ATHENA_QUERY_PARAMS\"]\r\n athena_table_params = config_json[\"ATHENA_TABLE_PARAMS\"]\r\n\r\n # Audit Parameters Based on the Invoking lambda and its operation involved\r\n audit_config[\"component_type_code\"] = \"ETL\"\r\n audit_config[\"component_name\"] = \"PCP Appflow\"\r\n audit_config[\"source_name\"] = \"Patient Connections Platform\"\r\n audit_config[\"target_name\"] = \"Consumer Consent Management\"\r\n audit_config[\"full_file_path\"] = \"s3://\" + \\\r\n source_bucket_name + \"/\" + source_key\r\n audit_config[\"file_version_id\"] = \"\"\r\n\r\n # Creates Job Entry in ABC Framework\r\n print(\"audit config::\", audit_config)\r\n process_execution_id = audit_helper.\\\r\n invoke_edb_abc_log_process_status_event_job_entry(audit_config)\r\n audit_config[\"process_execution_id\"] = process_execution_id\r\n print(\"process_execution_id ::\", process_execution_id)\r\n #print(\"source_key: \",source_key)\r\n s3_write = boto3.client('s3')\r\n record_dict = {}\r\n file_name = \"\"\r\n final_json = \"\"\r\n # prefix = \"\"\r\n # file_list = []\r\n # client = boto3.client(\"s3\")\r\n # result = client.list_objects(Bucket=source_bucket_name, Prefix=source_key, Delimiter='/')\r\n # #print(result)\r\n # for obj in result.get('CommonPrefixes'):\r\n # prefix = obj.get('Prefix')\r\n # #print(prefix)\r\n # file_list = list_files(client,source_bucket_name,prefix)\r\n # for file in file_list:\r\n # #print(file)\r\n json_read = read_s3_file(source_bucket_name, source_key)\r\n data = json.loads(json_read)\r\n #print(data)\r\n if data != '':\r\n record_dict = {k.lower(): v for k, v in data.items()}\r\n print(\"Record_Dict::\",record_dict)\r\n event_type_param = {}\r\n event_type_list = athena_table_params.keys()\r\n print(\"event_type_list\",event_type_list)\r\n for key in event_type_list:\r\n print(\"key\",key)\r\n if key in source_key:\r\n print(\"key\",key)\r\n event_type_param = athena_table_params[key]\r\n print(event_type_param)\r\n if \"changeeventheader\" in record_dict:\r\n if record_dict[\"changeeventheader\"][\"changeType\"] == \"CREATE\":\r\n #and record_dict[\"dtpc_affiliate__c\"] == 'US':\r\n recordid_create = record_dict[\"changeeventheader\"][\"recordIds\"][0]\r\n print(recordid_create)\r\n if recordid_create != '':\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace(\":\",\".\")\r\n create_json = json.dumps(record_dict)\r\n final_json = create_json\r\n file_name = recordid_create + \"-create-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, Key=final_source_key)\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n elif record_dict[\"changeeventheader\"][\"changeType\"] == \"UPDATE\":\r\n record_ids_list = record_dict[\"changeeventheader\"][\"recordIds\"]\r\n if len(record_ids_list) != 0:\r\n for ele in record_ids_list:\r\n print(ele)\r\n element = \"'\" + ele + \"'\"\r\n payload_condition = event_type_param[\"recordid_condition\"]\r\n query = 'SELECT * FROM '+event_type_param[\"athena_create_table\"]+\\\r\n ' WHERE lastmodifieddate IN(SELECT max(lastmodifieddate) from '\\\r\n +event_type_param[\"athena_create_table\"]+\\\r\n ', UNNEST(\"'+payload_condition[0]+'\".\"'+payload_condition[1]+\\\r\n '\") AS ln(jsondata) WHERE jsondata IN ('+element+'));'\r\n print(query)\r\n athena_query_param['athena_query'] = query\r\n query_result_record_id = athena_helper.perform_athena_search\\\r\n (athena_query_param)\r\n print(\"Athena Query Result for Create Path:::\", query_result_record_id)\r\n update_json = create_complete_payload(data,query_result_record_id)\r\n print(\"update_json: \",update_json)\r\n if len(update_json) != 0:\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace\\\r\n (\":\",\".\")\r\n final_json = json.dumps(update_json)\r\n file_name = ele + \"-update-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, \\\r\n Key=final_source_key)\r\n else:\r\n print(ele,\" does not have a create payload\")\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n else:\r\n raise Exception(\"ChangeEventHeader is missing: \", record_dict)\r\n else:\r\n raise Exception(\"Invalid Payload: \", record_dict)\r\n\r\n except (Exception) as err: # pylint: disable=line-too-long,broad-except\r\n print(\"Error occured: {0}\".format(str(err)))\r\n audit_type = \"error\"\r\n error_msg = sys.exc_info()\r\n exc_type = error_msg\r\n exc_obj = error_msg\r\n snow_params[\"flag\"] = \"FAIL\"\r\n snow_params[\"error_message\"] = str(exc_obj)\r\n snow_params[\"error_type\"] = str(exc_type)\r\n audit_config[\"exception_message\"] = str(exc_obj)\r\n if audit_config != {}:\r\n logging.exception(sys.exc_info())\r\n audit_helper.invoke_edb_abc_log_process_status_event(\r\n audit_type, audit_config) # pylint: disable=line-too-long\r\n audit_helper.raise_snow_incident(snow_params)", "def build_payload():\r\n payload = json.dumps({\"method\": \"ListActivePairedVolumes\",\r\n \"params\": {}, \"id\": 1})\r\n return payload", "def lambda_handler(*_):\n\n # Boto is always available in AWS lambda, but may not be available in\n # standalone mode\n import boto3\n\n # To generate the encrypted values, go to AWS IAM Keys and Generate a key\n # Then grant decryption using the key to the IAM Role used for your lambda\n # function.\n #\n # Use the command `aws kms encrypt --key-id alias/<key-alias> --plaintext <value-to-encrypt>\n # Put the encrypted value in the configuration dictionary below\n encrypted_config = {\n 'slack_token': '<ENCRYPTED VALUE>',\n 'pager_duty_token': '<ENCRYPTED VALUE>'\n 'pager_duty_domain_prefix': '<ENCRYPTED VALUE>'\n }\n\n kms = boto3.client('kms')\n config = {x: kms.decrypt(CiphertextBlob=b64decode(y))['Plaintext'] for x, y in encrypted_config.iteritems()}\n return SlackOnCall(**config).run()", "def body(self):\n return json.dumps(self.data, cls=ServerlessJsonEncoder)", "def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }", "def lambda_handler(event, context):\n logging.info(\"Received event: \" + json.dumps(event, indent=2))\n request_type = event['RequestType']\n if request_type == 'Create':\n attach_policy(event, context)\n elif request_type == 'Delete':\n detach_policy(event, context)\n elif request_type == 'Update':\n update_policy(event, context)", "def __init__(self, payload):\n self.payload = payload", "def payload(self) -> dict:\n return {\n 'event_name': '',\n\n }", "def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass", "def lambda_handler(event, context):\n for record in event[\"Records\"]:\n arn = record[\"Sns\"][\"TopicArn\"].split(\":\")\n message = json.loads(record[\"Sns\"][\"Message\"])\n message_handler(arn[3], message)\n return ''", "def get_payload(self):\n return {'message': 'bar'}", "def payload(self, payload):\n\n self._payload = payload", "def process(self, payload, status_code=0):", "def event(event, context):\n# Sample event:\n #\n # _event = { \"Records\":[\n # {\n # \"eventVersion\":\"2.1\",\n # \"eventSource\":\"aws:s3\",\n # \"awsRegion\":\"us-east-1\",\n # \"eventTime\":\"2021-10-14T07:40:55.113Z\",\n # \"eventName\":\"ObjectCreated:Put\",\n # \"userIdentity\":{\n # \"principalId\":\"AWS:AROA6L2YJX2JCJYHEJ4UI:serverless-image-processing-test-create\"\n # },\n # \"requestParameters\":{\n # \"sourceIPAddress\":\"94.140.8.209\"\n # },\n # \"responseElements\":{\n # \"x-amz-request-id\":\"7CJHSGZ9MZF9995F\",\n # \"x-amz-id-2\":\"X5OtpRb+P9CuYKDHvjT8z9prnqqsH1yatZchN2uw8/158mcRUVhQNSW/z5ffXLqkLhu+4Kc163vZiRgVk3XaGd8H1NhZCu8N\"\n # },\n # \"s3\":{\n # \"s3SchemaVersion\":\"1.0\",\n # \"configurationId\":\"9b8f4135-35d4-4e07-b8a5-7d68cc95870b\",\n # \"bucket\":{\n # \"name\":\"serverless-image-processing-test-serverless-image-processing\",\n # \"ownerIdentity\":{\n # \"principalId\":\"A5IHQSLNTJKZN\"\n # },\n # \"arn\":\"arn:aws:s3:::serverless-image-processing-test-serverless-image-processing\"\n # },\n # \"object\":{\n # \"key\":\"test/6e7ef3f0-dcb6-4db6-9518-3bc6ec0ba492\",\n # \"size\":116716,\n # \"eTag\":\"f04e70e100f653a0e67f32f6098dea1c\",\n # \"sequencer\":\"006167DF06C888A626\"\n # }\n # }\n # }\n # ]\n # }\n\n logger.debug('event: {}'.format(event))\n for record in event['Records']:\n processRecord(record)\n\n return {'statusCode': httplib.ACCEPTED}", "def inner(fn_inner):\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler", "def _get_payload(self, method, **params):\n try:\n payload = params['data']['payload']\n if self.prettyprint:\n payload = \"\\n\" + json.dumps(json.loads(payload),\n indent=self.indent)\n except KeyError:\n payload = \"N/A\" if method == \"Event Channel Message\" else \"None\"\n return payload", "def payload(self) -> dict:\n return {\n # TBD\n }", "def lambda_handler(event, context):\n # Environmental Variables\n CATALOG_BRANCHES_TABLE = anejocommon.set_env_var('CATALOG_BRANCHES_TABLE')\n PRODUCT_INFO_TABLE = anejocommon.set_env_var('PRODUCT_INFO_TABLE')\n S3_BUCKET = anejocommon.set_env_var('S3_BUCKET')\n\n # Loop through event records\n try:\n event_records = event['Records']\n except KeyError:\n event_records = [{'body': event}]\n\n for record in event_records:\n try:\n catalog_sync_info = json.loads(record['body'])\n except TypeError:\n catalog_sync_info = record['body']\n\n # Event Variables\n catalog_url = catalog_sync_info['catalog_url']\n\n apple_bucket_catalog_path = anejocommon.get_path_from_url(\n catalog_url,\n 'html',\n append_to_path='.apple'\n )\n \n catalog = anejocommon.retrieve_url(catalog_url)\n try:\n catalog_plist = plistlib.readPlistFromBytes(catalog.data)\n except plistlib.InvalidFileException:\n print(\"ERROR: Cannot read catalog plist\")\n return\n\n # Write our local (filtered) catalogs\n anejocommon.write_local_catalogs(\n apple_bucket_catalog_path,\n catalog_plist,\n S3_BUCKET,\n CATALOG_BRANCHES_TABLE,\n PRODUCT_INFO_TABLE\n )", "def build_payload(self, **kwargs):\n\n return None", "def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)", "def lambda_handler(event, context):\n logging.info(event)\n current_time = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n is_conversation_result = 'Details' in event\n if is_conversation_result:\n combine_bot_state_to_s3(event, current_time)\n else:\n save_bot_state_to_s3(event, current_time)\n\n # Generate response back to bot\n response = dict()\n if not is_conversation_result:\n response = {\n 'dialogAction': {\n 'type': 'Delegate',\n 'slots': event['currentIntent']['slots']\n }\n }\n logging.info(response)\n return response", "def aws_lambda(f):\n def wrapper(event, context):\n logger = logging.getLogger(__name__)\n try:\n # if no return value is given by wrapped func,\n # return default status code 200 response.\n r = f(event, context)\n if r is None:\n r = {\n 'statusCode': 200,\n 'body': json.dumps({'input': event})\n }\n return r\n except Exception as e:\n # if exception is thrown, log exception,\n # return exception text,\n # and return status code associated with passed\n # exception type\n logger.info(\n 'Call to {} resulted in exception'.format(f.__name__), e)\n exc_type = type(e)\n # get exception type for code lookup and msg\n if exc_type is type:\n exc_type = e\n msg = e.__name__\n else:\n msg = str(e)\n # get default exception code for raised Exception.\n # default to code 500 if exception is not in codes dict.\n code = codes.get(exc_type, DEFAULT_EXCEPTION_CODE)\n return {\n 'statusCode': code,\n 'body': json.dumps({'input': event, 'message': msg})\n }\n\n wrapper.__name__ = f.__name__ + '_wrapper'\n return wrapper", "def lambda_handler(event, context):\n print 'Received event: ' + json.dumps(event, indent=2)\n print \"Context log stream: \"+ context.log_stream_name\n\n try:\n filename = get_latest_agent_filename()\n download_agent_if_missing(filename)\n prepare_agent_input_data(event, context)\n run_agent(filename)\n\n except URLError as ex:\n print 'Error: ', ex", "def lambda_handler(event, context):\n\n client = boto3.client('events')\n event_to_put = {\n \"source\": \"aws-lambda-function\"\n }\n event_to_put.update(**event)\n try:\n response = client.put_events(\n Entries=[\n {\n 'Source': 'learn.eventbridge',\n 'Detail': json.dumps(event_to_put),\n 'DetailType': 'Learning Eventbridge',\n 'EventBusName': 'default'\n },\n ]\n )\n return {\n 'statusCode': 200,\n 'body': json.dumps('Event has been put on event bus successfully.')\n }\n except Exception as ex:\n return {\n 'statusCode': 500,\n 'body': json.dumps(str(ex))\n }", "def lambda_handler(event, context):\n input = event[\"queryStringParameters\"][\"input\"]\n completed_interpretation = subprocess.run(\n [\"./esco\", \"--quiet\", \"--type\", \"ws\", \"baudelaire.ws\"],\n text=True,\n encoding=\"ascii\",\n input=input,\n stdout=subprocess.PIPE)\n\n # Discard the first two lines of the output (they contain the message\n # \"Enter a word and press Enter:\" and then an empty line).\n trimmed_output = completed_interpretation.stdout.split(\"\\n\", 2)[2]\n\n return {\n \"statusCode\": 200,\n \"headers\": {\"Access-Control-Allow-Origin\": \"*\"},\n \"body\": trimmed_output,\n }", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n print(event)\n method=event['httpMethod']\n print(f\"method={method}\")\n print(f\"table_name={table_name}\")\n myTriggerType='instrument_price'\n\n \n if method == \"DELETE\":\n #path=event['path']\n trigger_id=event['pathParameters']['trigger_id']\n print(f\"triggerId={trigger_id}\")\n\n try:\n #see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Table.delete_item\n response = table.delete_item(\n Key={'PK':f\"TR#{myTriggerType}#{trigger_id}\", \"SK\":f\"TR#{myTriggerType}#{trigger_id}\"},\n ConditionExpression=And(Attr('PK').eq(Attr('SK')),Attr('triggerType').eq(myTriggerType)),\n )\n except ClientError as e:\n print(f\"clientError={e}\")\n if e.response['Error']['Code']=='ConditionalCheckFailedException':\n return iftttError(404,\"item not found\")\n raise\n print(f\"response={response}\")\n return {\n \"statusCode\": 200,\n \"body\":\"\",\n }\n \n elif method == \"POST\":\n body=json.loads(event['body'])\n trigger_id=body['trigger_identity']\n print(f\"triggerId={trigger_id}\")\n\n response = table.get_item(\n Key={'PK':f\"TR#{myTriggerType}#{trigger_id}\", \"SK\":f\"TR#{myTriggerType}#{trigger_id}\"},\n ProjectionExpression=\"triggerEvents, triggerType\",\n )\n print(f\"response={response}\")\n\n if \"Item\" not in response:\n #brand new \n print(f\"inserting {trigger_id}\")\n if 'triggerFields' not in body:\n return iftttError(400, \"triggerFields missing from request\")\n triggerFields=body['triggerFields']\n #todo validate trigger fields\n try:\n response = table.put_item(\n Item={\n 'PK':f\"TR#{myTriggerType}#{trigger_id}\", \n \"SK\":f\"TR#{myTriggerType}#{trigger_id}\",\n 'triggerId': trigger_id,\n #hacky string way to avoid having multiple columns\n 'triggerFields': json.dumps(triggerFields),\n 'triggerType': myTriggerType,\n },\n ConditionExpression=Or(Attr('triggerType').eq(myTriggerType),Attr('triggerType').not_exists())\n )\n except ClientError as e:\n print(f\"clientError={e}\")\n #somehow got created with someone elses triggerType\n if e.response['Error']['Code']=='ConditionalCheckFailedException':\n return iftttError(404,\"item not found\")\n raise\n print(\"response \",response)\n triggered=[]\n elif response['Item'].get(\"triggerType\",myTriggerType) != myTriggerType:\n #it exists but it is someone elses\n return iftttError(404,\"item not found\")\n else:\n item=response['Item']\n print(f\"found {item} \")\n #hacky string way to avoid having multiple columns\n #TODO: change this to use a Map? (will allow to add without overwrite)\n events = json.loads(item.get(\"triggerEvents\",\"[]\"))\n triggered= []\n for event in events:\n #TODO: implement limit (not needed now becasue I expect only up to one events)\n triggered.append(event['data'])\n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"data\": triggered,\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n else :\n return iftttError(400, f\"unexpected httpMethod {method}\")", "def payload(self) -> \"dict\":\n return self._attrs.get(\"payload\")", "def handler(context, event):\n\n if _ensure_str(event.trigger.kind) != 'http' or _invoked_by_cron(event):\n body = event.body.decode('utf-8')\n context.logger.info('Received event body: {0}'.format(body))\n\n # serialized record\n serialized_record = json.dumps({\n 'body': body,\n 'headers': {\n _ensure_str(header): _ensure_str(value)\n for header, value in event.headers.items()\n },\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n })\n\n # store in log file\n with open(events_log_file_path, 'a') as events_log_file:\n events_log_file.write(serialized_record + ', ')\n\n else:\n\n # read the log file\n try:\n with open(events_log_file_path, 'r') as events_log_file:\n events_log_file_contents = events_log_file.read()\n except IOError:\n events_log_file_contents = ''\n\n # make this valid JSON by removing last two chars (, ) and enclosing in [ ]\n encoded_event_log = '[' + events_log_file_contents[:-2] + ']'\n\n context.logger.info('Returning events: {0}'.format(encoded_event_log))\n\n # return json.loads(encoded_event_log)\n return encoded_event_log", "def _dispatch_json(\n self,\n action: str, # get, post, put, delete\n url: str,\n payload: Optional[dict] = None,\n file_list: Optional[list] = None,\n ) -> dict:\n result = self._dispatch(action, url, payload, file_list)\n return json.loads(result)", "def execute(self, **payload):\n pass", "def to_json(self) -> Dict[str, Any]:\n raise Exception(\"Attempted to convert an anonymous Action to JSON\")", "def lambda_handler(event, context):\n\n markdown_files = sys.argv[1:]\n logging.info(f'Markdown files to clean = {markdown_files}')\n for file_to_clean in markdown_files:\n clean_file(file_to_clean)\n print(json.dumps(event))\n body = event['body']\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world from aws.\",\n \"commit\": body\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def lambda_handler(event, context):\n #print(\"event.session.application.applicationId=\" + event['session']['application']['applicationId'])\n\n #if event['session']['new']:\n # on_session_started({'requestId': event['request']['requestId']},event['session'])\n \n intent = None\n try:\n intent = Intent(**event)\n return handle_intent(intent)\n except Exception as ex:\n err = traceback.format_exc()\n print(err)\n return error_handler_func(intent,msg=str(err))", "def _wrap_handler(self, handler, body):\n try:\n decoded_body = json.loads(body)\n result = yield handler(decoded_body)\n return result\n except Exception as e:\n return {\"error\": str(e)}", "def on_create(self, payload):\n pass", "def lambda_handler(event=None, context=None):\n logger.info('Lambda function invoked index()')\n\n # Get configuration from environment\n file_name_default = os.environ.get('FILE_NAME') or \"bamboo_employees\"\n api_key = os.environ.get('BAMBOO_TOKEN')\n url = os.environ.get('BAMBOO_API')\n\n # Parameters, take a file name if exists and remove it from the dict\n file_name = request.args.get('file_name') or file_name_default\n\n try:\n # Request data from Bamboo API\n headers = {'Accept': 'application/json'}\n auth = HTTPBasicAuth(api_key, 'x')\n response = requests.get(url=url, headers=headers, auth=auth)\n\n if response.status_code != requests.codes.ok:\n raise Exception('Error making the request to Bamboo\\r\\n')\n\n # Check the result\n result = json.loads(response.text)\n if 'employees' in result:\n # Generate the GeoJSON from API response\n employees = []\n for employee in result['employees']:\n # Bamboo does not provide explicit locations\n point = Point(None, None)\n employees.append(Feature(geometry=point, properties=employee))\n\n # Produce a GeoJSON Feature collection\n body = json.dumps(FeatureCollection(employees))\n attachment = 'attachment; filename={0}.json'.format(file_name)\n\n # Create a response with the proper headers\n # CARTO will use the file name property as the table name\n response = make_response(body)\n response.headers['Content-Type'] = 'application/json'\n response.headers['Content-Disposition'] = attachment\n else:\n raise Exception('No photos on your request')\n\n return response\n\n except Exception as e:\n response = make_response(e.message + \"\\r\\n\")\n response.headers['Content-Type'] = 'text/plain'\n response.status_code = 500\n return response", "def append_json(self, obj: Any, headers: Optional[MultiMapping[str]] = ...) -> Payload:\n ...", "def jwt_encode_handler(payload):\n\n return jwt.encode(\n payload,\n api_settings.JWT_SECRET_KEY,\n api_settings.JWT_ALGORITHM\n ).decode('utf-8')", "def _build_payload(self, body: Dict) -> Dict[str, Any]:\n return {'jsonrpc': '2.0',\n 'id': self._id_count,\n **body}", "def lambda_handler(event, context):\n params = parse_qs(event['body'])\n token = params['token'][0] if 'token' in params else ''\n\n if not verify_slack_token(token):\n logger.error(\"Request token (%s) does not match expected token\", token)\n return lambda_response(Exception('Invalid request token'))\n\n return gauges_app(params)", "def dumps_json(function):\n def f(*args, **kwargs):\n return json.dumps(function(*args, **kwargs))\n return f", "def serialize(self, content):\r\n content = super(JSONPTemplateEmitter, self).serialize(content)\r\n callback = self.request.GET.get('callback', 'callback')\r\n return '%s(%s)' % (callback, content)", "def _send(self, payload):\n return payload", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n #if (event['session']['application']['applicationId'] != \"<APPLICATION_ID>\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])" ]
[ "0.6537401", "0.6435479", "0.6433545", "0.6389464", "0.63662946", "0.63558084", "0.6291786", "0.6266016", "0.6230665", "0.6211438", "0.62085325", "0.62069964", "0.6166684", "0.6158678", "0.61519873", "0.61504626", "0.61504626", "0.61433136", "0.61325645", "0.6110487", "0.6093063", "0.60631365", "0.60375714", "0.6004712", "0.5986585", "0.5983808", "0.5979236", "0.59614736", "0.5931828", "0.5916565", "0.58867884", "0.58647084", "0.5851088", "0.5839614", "0.5822403", "0.5801316", "0.5752273", "0.5738515", "0.57339835", "0.57125425", "0.57054687", "0.57049215", "0.5697259", "0.5682163", "0.5672458", "0.5666686", "0.56562185", "0.56526953", "0.5641862", "0.56357884", "0.56342924", "0.5581171", "0.5573289", "0.5535858", "0.5526098", "0.5525876", "0.5506696", "0.550109", "0.54967475", "0.5492604", "0.54899275", "0.5489767", "0.5481654", "0.54715586", "0.5468634", "0.5463718", "0.54413235", "0.54097813", "0.54064816", "0.540588", "0.5404118", "0.5400371", "0.5395709", "0.53944707", "0.5394124", "0.53941196", "0.53859323", "0.53762656", "0.5369477", "0.5366468", "0.5362872", "0.53525406", "0.5346742", "0.5328357", "0.5324827", "0.5324255", "0.5322641", "0.5289184", "0.5280226", "0.52790916", "0.5274162", "0.5273418", "0.52733576", "0.52696496", "0.5264568", "0.52623194", "0.52608144", "0.5243123", "0.5240586", "0.5237544", "0.5229909" ]
0.0
-1
Qualifier (i.e., version) of the lambda function. Defaults to `$LATEST`.
def qualifier(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "qualifier")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def function_version(self) -> str:\n return pulumi.get(self, \"function_version\")", "def get_lambda_latest_version_num(fn_arn: str, region: str) -> int:\n\n client = boto3.client('lambda', region_name=region)\n response = client.list_versions_by_function(FunctionName=fn_arn)\n\n for v in response['Versions']:\n if v['Version'] == '$LATEST':\n latest_hash = v['CodeSha256']\n break\n\n for v in response['Versions']:\n if v['Version'] != '$LATEST' and v['CodeSha256'] == latest_hash:\n return v['Version']", "def get_boto3_version() -> str:\n return boto3_version", "def get_latest_tf_version(include_prerelease: bool = False) -> str:\n return get_available_tf_versions(include_prerelease)[0]", "def get_bucket_versioning(Bucket=None):\n pass", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def get_botocore_version() -> str:\n return botocore_version", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def version():\n from MotionWise.log_proc import __version__ as log_ver\n from MotionWise.pm_measurement import __version__ as pm_ver\n from MotionWise.MotionWise_perf_proxy import __version__ as proxy_ver \n from MotionWise.MotionWise_perf_client import __version__ as client_ver \n \n ver = \"$Revision: 80204 $\".split()[1]\n batch = max([ pm_instrument.version().split('.')[-1], log_ver\n , ver, pm_ver, proxy_ver, client_ver, FP.__version__])\n return \"3.0.{}\".format(batch)", "def schema_version(self) -> str:\n return self._pipeline_definition.get(\"version\")", "def version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"version\")", "def version_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_name\")", "def schema_transformation_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema_transformation_version\")", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def get_trigger_version(uuid: UUID) -> Optional[float]:\n scenario = store.get_scenario(uuid)\n if scenario:\n return scenario.sections['Triggers'].trigger_version\n return None", "def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")", "def version():\n\n pass", "def resource_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_version\")", "def function_region(self) -> str:\n return pulumi.get(self, \"function_region\")", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def runtime_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"runtime_version\")", "def version(self):\n pass", "def version(self):\n pass", "def version(self):\n pass", "def signature(function: model.Function) -> str:\n return str(function.signature)", "def signature_version(self) -> str:\n return self[\"Sns\"][\"SignatureVersion\"]", "def get_version_tag(self, version: str) -> str:\n return version", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def get_version() -> str:\n return __version__", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def get_trigger(event):\n\n if \"Records\" in event and event[\"Records\"][0][\"eventSource\"] == \"aws:s3\":\n return \"S3\"\n elif \"queryStringParameters\" in event:\n return \"APIGateway\"\n else:\n return \"eval\"", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)", "def pyzmq_version():\n if __revision__:\n return '@'.join([__version__,__revision__[:6]])\n else:\n return __version__", "def pyzmq_version():\n if __revision__:\n return '@'.join([__version__,__revision__[:6]])\n else:\n return __version__", "def version(self) -> str:\n return '0.1'", "def get_current_version(self) -> str:\n raise NotImplementedError()", "def _provided_or_most_recent(self) -> str:\n if self._operator == \">=\" and parse(self._version) <= parse(\n self._reserved_latest_version\n ):\n return f\"{self._reserved_name}=={self._reserved_latest_version}\"\n elif (\n self._operator == \"\"\n and self._version == \"\"\n and self._reserved_latest_version != \"\"\n ):\n return f\"{self._reserved_name}=={self._reserved_latest_version}\"\n return self._raw", "def version(self) -> Optional[pulumi.Input['FhirStoreVersion']]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def qualifier(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"qualifier\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def get_version():\n return 1", "def get_version(self):\n pass", "def probe_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"probe_version\")", "def probe_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"probe_version\")", "def version():\n return __VERSION__", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def version_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_code\")", "def python_branch():\n\n return _sys_version()[2]", "def version(self):\n return self.get_current_version()", "def get_version():\n return \"4.{}\".format(__version__)", "def key_version(self) -> Optional[str]:\n return pulumi.get(self, \"key_version\")", "def get_current_component_version():\n from resource_management.core.exceptions import Fail\n from resource_management.libraries.functions.default import default\n from resource_management.libraries.functions.stack_select import get_role_component_current_stack_version\n from resource_management.libraries.functions.repository_util import CommandRepository\n\n version = default(\"/commandParams/version\", None)\n if not version:\n repository = CommandRepository(default(\"/repositoryFile\", {}))\n if not repository.resolved:\n try:\n version = get_role_component_current_stack_version()\n except (Fail, TypeError):\n pass\n else:\n version = repository.version_string\n\n return version", "def get_version(self):\n return self.cur_config['version']['name']", "def getVersion(self, *args):\n return _libsbml.QualExtension_getVersion(self, *args)", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def version(self):", "def QualExtension_getDefaultVersion():\n return _libsbml.QualExtension_getDefaultVersion()", "def azure_function_endpoint(self) -> Optional[pulumi.Input['EventSubscriptionAzureFunctionEndpointArgs']]:\n return pulumi.get(self, \"azure_function_endpoint\")", "def azure_function_endpoint(self) -> Optional[pulumi.Input['EventSubscriptionAzureFunctionEndpointArgs']]:\n return pulumi.get(self, \"azure_function_endpoint\")", "def version(self):\n raise NotImplementedError('version')", "def type_version_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type_version_arn\")", "def _get_function_path_from_list_functions_endpoint(self, function):\n if 'function_scope' in function and function['function_scope']['bucket'] != '*':\n return f\"{function['function_scope']['bucket']}/{function['function_scope']['scope']}/{function['appname']}\"\n return function['appname']", "def python_revision():\n return _sys_version()[3]", "def version(self, newVersion=None):\n pass", "def set_version(self, version=None, version_fun: Callable[[], str] = None):\n def version_compute():\n fun = version_fun\n if fun is None:\n fun = default_version_hash\n\n if version is None:\n return fun()\n else:\n return version\n\n self.version = version_compute\n return self", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")" ]
[ "0.66411906", "0.63990325", "0.5234373", "0.51821023", "0.5175053", "0.51695466", "0.51431835", "0.50511765", "0.50181675", "0.5002993", "0.49775565", "0.49529707", "0.4950628", "0.4907003", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.490198", "0.49019074", "0.48994243", "0.48897853", "0.4885085", "0.48744717", "0.48518273", "0.4850299", "0.4848702", "0.4848702", "0.4848702", "0.4840926", "0.48228833", "0.48216584", "0.48155078", "0.48155078", "0.4812624", "0.48088837", "0.48088837", "0.48065376", "0.48033282", "0.48014867", "0.48014867", "0.48014867", "0.48014867", "0.47963515", "0.47954845", "0.47954845", "0.47875348", "0.47772455", "0.4769738", "0.4755864", "0.47504756", "0.47504756", "0.47504756", "0.47504756", "0.47504756", "0.47443125", "0.4740842", "0.4740842", "0.4740842", "0.47393692", "0.47321597", "0.47302303", "0.47302303", "0.47274512", "0.47209063", "0.47150984", "0.4708157", "0.46978924", "0.46866015", "0.4679006", "0.46780503", "0.46728104", "0.46680856", "0.4666742", "0.4666742", "0.4666742", "0.46667063", "0.4663384", "0.46610788", "0.46610788", "0.46502882", "0.46484146", "0.46483487", "0.46447754", "0.46392563", "0.4639124", "0.46365383", "0.46365383", "0.46365383" ]
0.48600695
34
Map of arbitrary keys and values that, when changed, will trigger a reinvocation.
def triggers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: return pulumi.get(self, "triggers")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_dict(new,old):", "def test_dictionary_inplace_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value[4] = 5\r\n assert vm.changed", "def test_dict(self):\n event_cache = []\n\n class A(HasTraits):\n x = EventfulDict({c: c for c in 'abc'})\n a = A()\n a.x.on_events(lambda k, v: event_cache.append('add'), \\\n lambda k, v: event_cache.append('set'), \\\n lambda k: event_cache.append('del'))\n\n del a.x['c']\n # ab\n a.x['z'] = 1\n # abz\n a.x['z'] = 'z'\n # abz\n a.x.pop('a')\n # bz \n\n # Were the correct events captured?\n self.assertEqual(event_cache, ['del', 'add', 'set', 'del'])\n\n # Is the output correct?\n self.assertEqual(a.x, {c: c for c in 'bz'})", "def test_dictionary_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value = {4:5}\r\n assert vm.changed", "def _set_toChange(x):\n for key in list(x.keys()):\n self.toChange[key] = True", "def keys(self, value: Dict[str, ValidKVs]) -> None:\n warnings.warn('This is private, call .clear_keys() and update().', DeprecationWarning, stacklevel=2)\n self.clear_keys()\n self.update(value)", "def __setitem__(self, key, value):\n dict.__setitem__(self, key, value)\n\n self.changed()", "def handle_dict(self, object, name, old, new):\n if old is not Uninitialized:\n unregister = self.next.unregister\n for obj in old.values():\n unregister(obj)\n\n register = self.next.register\n for obj in new.values():\n register(obj)", "def renamed_dict(event):\n\n new_dict = thaw(event.data())\n\n for old, new in list(rename_map.items()):\n new_dict[new] = new_dict.pop(old)\n\n return new_dict", "def on_change(key):\n pass", "def dict_change(binary_dict:dict):\r\n dict_change= {}\r\n for key, value in binary_dict.items():\r\n dict_change[value] = key\r\n return dict_change", "def changed_keys(self):\n return self._changed_keys", "def __setitem__(self, key, value):\n super(ReadOnlyDict, self).__setitem__(key, value)", "def handle_dict_items(self, object, name, old, new):\n self.handle_dict(object, name, new.removed, new.added)\n\n if len(new.changed) > 0:\n # If 'name' refers to the '_items' trait, then remove the '_items'\n # suffix to get the actual dictionary trait.\n #\n # fixme: Is there ever a case where 'name' *won't* refer to the\n # '_items' trait?\n if name.endswith(\"_items\"):\n name = name[: -len(\"_items\")]\n\n dict = getattr(object, name)\n unregister = self.next.unregister\n register = self.next.register\n for key, obj in new.changed.items():\n unregister(obj)\n register(dict[key])", "def construct_kv_dict(self):\r\n key1 = user_state_key('field_a')\r\n key2 = user_state_key('field_b')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def update(self, *args, **kwargs):\n super(ReadOnlyDict, self).update(*args, **kwargs) # pragma: no cover", "def update(self, key, value):\n if key in self.map:\n self.map[key] = value", "def __setitem__(self,key,value):\n if key in self.deleted: self.deleted.remove(key)\n if key not in self.changed: self.changed.append(key)\n self.data[key] = value", "def _default_observation_map(self) -> Dict[str, ObservationMapValue]:\n pass", "def handle_dict_items(self, object, name, old, new):\n raise NotImplementedError", "def _metrics_update(orig, new):\n revsd = orig\n for k, v in orig.items():\n if not v:\n revsd[k] = new[k]\n elif new[k]:\n if new[k] != v:\n # LOG ME, do something\n print(orig)\n print(new)\n elif not new[k] or v:\n pass\n else:\n raise Exception(\"_metrics_update error\")\n return revsd", "def __getstate__(self):\n return dict(self.items())", "def __getstate__(self):\n return dict(self.items())", "def __setitem__(key, value):", "def data_dict_update(self, change):\n self.data_dict = change['value']", "def rename_dictkey(self, kwargs, old, new):\n x = kwargs.copy()\n x[new] = x.pop(old)\n return x", "def rekey(self, map_or_fn=None, inplace=False, **kw):\n if not inplace:\n return self.copy().rekey(map_or_fn, inplace=True, **kw)\n if map_or_fn is None:\n map_or_fn = kw\n if isinstance(map_or_fn, dict):\n if map_or_fn is not kw:\n map_or_fn.update(kw)\n func = lambda k: map_or_fn.get(k, k)\n else:\n func = map_or_fn\n if not callable(func):\n raise ValueError('`map_or_fn` must be a dict, a callable, or None. Received %s: %s'\n % (str(type(map_or_fn)), str(map_or_fn)))\n keys = self.peys()\n new_keys = keys.apply(func).puniq()\n if len(keys) != len(new_keys):\n raise ValueError('rekey map must return the same number of unique keys as the original pdict. '\n 'Only found %d of %d expected keys.' % (len(new_keys), len(keys)))\n vals = self.palues().uproot()\n self.clear()\n self[new_keys] = vals\n return self", "def rekey(self, map_or_fn=None, inplace=False, **kw):\n if not inplace:\n return self.copy().rekey(map_or_fn, inplace=True, **kw)\n if map_or_fn is None:\n map_or_fn = kw\n if isinstance(map_or_fn, dict):\n if map_or_fn is not kw:\n map_or_fn.update(kw)\n func = lambda k: map_or_fn.get(k, k)\n else:\n func = map_or_fn\n if not callable(func):\n raise ValueError('`map_or_fn` must be a dict, a callable, or None. Received %s: %s'\n % (str(type(map_or_fn)), str(map_or_fn)))\n keys = self.peys()\n new_keys = keys.apply(func).puniq()\n if len(keys) != len(new_keys):\n raise ValueError('rekey map must return the same number of unique keys as the original pdict. '\n 'Only found %d of %d expected keys.' % (len(new_keys), len(keys)))\n vals = self.palues().uproot()\n self.clear()\n self[new_keys] = vals\n return self", "def substitute_keys_in_functions(functions, new_keys):\n for _, func in functions.items():\n func['ret_type'] = new_keys[func['ret_type']]\n substitute_params_keys(func['params'], new_keys)", "def __setitem__(self, key, val):\n for k,v in list(self.__dict__.items()):\n if k == key:\n self.__dict__[key] = val\n return \n print((\"Item %s could not be updated...\" %key))", "def _observe_simple(self):\n return {}", "def __setitem__(self, key, value):\n self._maps[0][key] = value", "def setChanged(self,key):\n if key not in self.data:\n raise ArgumentError(\"No settings data for \"+key)\n if key not in self.changed:\n self.changed.append(key)", "def __setitem__(self, key, val):\n dict.__setitem__(self, key, val)", "def __setitem__(self, key, value):", "def mutate_dict_in_place(func, mapping):\n for key, value in mapping.items():\n if isinstance(value, dict):\n mutate_dict_in_place(func, value)\n else:\n mapping[key] = func(value)", "def dict(self, keys) -> dict:\n return {k: self(k) for k in keys}", "def remap_keys(ds, new_keys):\n logger.info(\"Remapping keys of every element using config:\\n %s\", _dict_to_logstring(new_keys))\n\n def remap_keys(x):\n return {new_keys.get(k, k): v for k, v in x.items() if new_keys.get(k, k) is not None}\n return ds.map(remap_keys, num_parallel_calls=TF_AUTOTUNE)", "def __getstate__(self):\n return {k: v for k, v in self.__dict__.iteritems() if k not in ['x', 'y', '_x', '_y']}", "def update(self, new_values):\n values_copy = new_values.copy()\n for key in self.SET_KEYS:\n if key in values_copy:\n values_copy[key] = set(values_copy[key])\n super(ConfigDict, self).update(values_copy)", "def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)", "def handle_dict(self, object, name, old, new):\n raise NotImplementedError", "def f(map, key):\n def decorator(function):\n map[key] = function\n return function\n return decorator", "def _modkeys(self, dict, mod):\n newdict = {}\n for (k, v) in dict.items():\n newk = k + mod\n newdict[newk] = v\n return newdict", "def __setitem__(self, key, value):\n if key in self.define:\n warnings.warn('Key {} is being overwritten to {}. It had a value of {}. Hope you know what you are doing.'.format(key, value, self.define[key]))\n self.define[key] = value", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __methodDict(cls, _dict):\n baseList = list(cls.__bases__)\n baseList.reverse()\n for _super in baseList:\n __methodDict(_super, _dict)\n for key, value in cls.__dict__.items():\n if type(value) == types.FunctionType:\n _dict[key] = value", "def _reload_values(self):\r\n raise NotImplementedError", "def __setitem__(self, key, val):\n self()[key] = val", "def __setitem__(self, key, value):\n if key not in self.ordered_keys:\n self.ordered_keys.append(key)\n super().__setitem__(key, value)", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def __setitem__(self, key, value):\n self.__dict__[key] = value", "def update_config(original, new):\n for k, v in new.items():\n if isinstance(v, abc.Mapping):\n original[k] = update_config(original.get(k, {}), v)\n else:\n original[k] = v\n return original", "def __setitem__(self,key,value):\n self._register[key] = value\n self._register.sync()", "def handle_sc_event(store, changed_keys, info):\n\n for key in changed_keys:\n SC_HANDLERS[key](key=key, info=info)", "def _reference(self):\r\n return {1:2, \"key1\":\"value1\", \"key2\":(1,2,3)}", "def test_load_updates_dict(self):\n new_dict = {\n 'test_new_key': 'test_new_value',\n 'test_key1': 'new_value',\n }\n self.extension.registration.settings = new_dict\n self.settings.load()\n\n # Should have added test_new_key, and modified test_key1\n self.assertEqual(new_dict['test_new_key'],\n self.settings['test_new_key'])\n self.assertEqual(new_dict['test_key1'], self.settings['test_key1'])\n\n # Should have left test_key2 alone\n self.assertEqual(self.test_dict['test_key2'],\n self.settings['test_key2'])", "def update_keymap(self, new_keymap):\n self.keymap.update(new_keymap)", "def append_event_to_params_dict(self, new_name_and_parameters):\n\n params_dict.update(new_name_and_parameters)", "def testMapUpdate(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap({'step': data_types.BuildStats()})\n with self.assertRaises(AssertionError):\n m.update({1: 2})\n with self.assertRaises(AssertionError):\n m.update(step2=1)\n m.update(step=data_types.BuildStats())\n self.assertEqual(m, {'step': data_types.BuildStats()})", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def set(self, key, value):", "def set(self, key, value):", "def ENFORCEMENT_FUNCTIONS_AS_MODIFIERS(EnforcerDict):\n\n class Other:\n def __init__(self):\n self.keystring = ''\n self.valuesum = 0\n def add_function(self, items):\n for key, value in items:\n if value % 2 == 0 and key.islower():\n self.valuesum += value\n self.keystring += key\n yield key, value\n # noinspection PyUnreachableCode\n def remove_function(self, items):\n return\n yield\n\n global other\n other = Other()\n\n enforcer_dict = EnforcerDict(\n dict(A=1, b=2, c=3, D=4),\n add_function=other.add_function,\n remove_function=other.remove_function\n )\n\n assert other.keystring == 'b'\n assert other.valuesum == 2\n\n del enforcer_dict['b']\n\n assert other.keystring == 'b'\n assert other.valuesum == 2\n\n return enforcer_dict", "def __setitem__(self, key, value):\n self.other[key] = value", "def updated_with(orig_dict, *new_values):\n newdict = dict(orig_dict)\n for vals in new_values:\n if vals:\n newdict.update(vals)\n return newdict", "def update_key(self):\n self.__prev_key = self.__new_key", "def _map___iter__(self):\n return self.iterkeys()", "def change_config(self, changesDict):\n for key in sorted(changesDict.keys()):\n self.configDict.update({key: changesDict.get(key)})", "def update(self,dict):\n for key in list(dict.keys()):\n print(\"Hey, I'm updating something\")\n self.d[key]=dict[key]", "def changed(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, key, value):\r\n self.setdefault(key, []).append(value)", "def _reset_changes(self):\r\n self._original = {}\r\n if self.last_updated is not None:\r\n self._original['last_updated'] = self.last_updated", "def tsc_change(self) -> Dict[str, str]:\n return {\n \"type\": self._action.value,\n \"name\": \", \".join(self._names),\n \"link\": self._link,\n }", "def __update_params(self,**kwargs):\n updatedArgSet = set(self._updateParamsArgs) & kwargs.viewkeys()\n if len(updatedArgSet) > 0:\n args = self._subDictionary(self._updateParamsArgs)\n newArgs = self._onParamsUpdate(**args)\n updatedArgs =dict()\n for k in updatedArgSet:\n try:\n updatedArgs[k] = newArgs[k]\n except:\n pass\n\n self.__dictionary.update(newArgs)\n else:\n pass", "def pre_update(self, **values):\r\n pass", "def modify_on(class_reference, from_dict, to_dict, all=False, custom_condition='', custom_function=False):\n _entries = select_from(class_reference, all, custom_condition, **from_dict)\n _modify = 0\n if custom_function:\n for _entry in _entries:\n for _key in to_dict:\n _entry.__dict__['_'+_key] = to_dict[_key](_entry.__dict__['_'+_key])\n _entry.put()\n _modify += 1\n else:\n for _entry in _entries:\n for _key in to_dict:\n _entry.__dict__['_'+_key] = to_dict[_key]\n _entry.put()\n _modify += 1\n return _modify", "def _update(self, *keys_and_val):\n if len(xxx) < 2:\n raise NotEnoughInfo\n value, *location = xxx[::-1]\n location.reverse()\n final_key = location.pop()\n ptr__target_dct = get_target_dct(location)\n ptr__target_dct[final_key] = value\n return", "def __setstate__(self, dict):\n self.__dict__.update(dict)\n self.start_callback = None\n self.finalize_callback = None", "def __setitem__(self, key, value) -> None:\n # Allows value modification only in __init__.\n caller_method = inspect.getouterframes(inspect.currentframe(), 2)[1][3]\n if caller_method != \"__init__\":\n raise AttributeError\n\n self.__stash[key] = value", "def inverse_update(self, data):\n if not isinstance(data, dict) or not isinstance(self, transforms.MapTransform):\n return data\n d = dict(data)\n for k in self.key_iterator(data):\n transform_key = transforms.TraceableTransform.trace_key(k)\n if transform_key not in data or not data[transform_key]:\n continue\n d = transforms.sync_meta_info(k, data, t=False)\n return d", "def restart_function_map():\n rfunc_map = {}\n if run_in_apache():\n rfunc_map['apache2'] = restart_pid_check\n return rfunc_map", "def rename_state_dict_keys(source, key_transformation, target=None):\n if target is None:\n target = source\n\n state_dict = torch.load(source)\n # state_dict = state_dict.state_dict() \n new_state_dict = OrderedDict()\n\n for key, value in state_dict.items():\n new_key = key_transformation(key)\n new_state_dict[new_key] = value\n\n torch.save(new_state_dict, target)", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def observation(self):\n return {k: observer(self._state)\n for k, observer in self.observers.items()}", "def replace_cfs(old_key, new_key):\n altered_methods = []\n for name in methods:\n changed = False\n data = Method(name).load()\n for line in data:\n if line[0] == old_key:\n line[0], changed = new_key, True\n if changed:\n Method(name).write(data)\n altered_methods.append(name)\n return altered_methods", "def _modified(self):\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__modified__'):\n for subkey, value in getattr(self, key)._modified():\n yield (\"%s.%s\" % (key, subkey), value)\n else:\n if key in self.__modified__:\n yield (key, getattr(self, key))", "def update(self, key, new_value):\n raise NotImplementedError", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def update(self, changes, pipe=None):\n if not changes:\n return\n\n if self.key_name in changes:\n raise InvalidOperation('cannot update the redis key')\n\n deletes = {k for k, v in changes.items() if IS(v, None)}\n updates = {k: v for k, v in changes.items() if k not in deletes}\n\n with self._pipe(pipe) as pipe:\n\n core = self.core(pipe=pipe)\n\n def build(k, v):\n core.hset(self.key, k, v)\n\n def cb():\n self._data[k] = v\n\n pipe.on_execute(cb)\n\n for k, v in updates.items():\n build(k, v)\n\n self.remove(deletes, pipe=pipe)", "def updateSimState(self):\n self.sim_state = {k: v for k,v in self.state.iteritems()}", "def edit_work(self, new_values):\n self.eisenhower_priority()\n self.work_refresh()\n\n for attr, new_val in new_values.items():\n self.__dict__[attr] = new_val\n return self.__dict__", "def change(self, key, old_value, new_value):\n try:\n parts = self.list(key)\n try: parts[parts.index(old_value)] = new_value\n except ValueError:\n self[key] = new_value\n else:\n self[key] = \"\\n\".join(parts)\n except KeyError: self[key] = new_value", "def fill_cache(cache, values_dict):\n cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)", "def _update_key(cls, spec):\n if cls.KEY is not None:\n cls._set_key(spec, spec[\"keys\"].popleft())\n elif cls.REF is not None:\n spec[\"ref\"] = cls.REF", "def _update_proxy(self, change):\n # The superclass implementation is sufficient.\n super(MapMarker, self)._update_proxy(change)", "def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')" ]
[ "0.645274", "0.6258521", "0.616605", "0.6099212", "0.60495317", "0.59831303", "0.5854459", "0.585022", "0.5841429", "0.57649016", "0.57519615", "0.5711017", "0.55702174", "0.5550343", "0.5543891", "0.5535223", "0.54757035", "0.54222584", "0.54219085", "0.54190826", "0.5399805", "0.5397984", "0.5397984", "0.5392318", "0.5380758", "0.53718084", "0.53602445", "0.53602445", "0.5358211", "0.53489554", "0.53425384", "0.533187", "0.53269076", "0.53049445", "0.527872", "0.5267525", "0.5252011", "0.52495915", "0.523931", "0.52362126", "0.5227129", "0.5220679", "0.51730657", "0.5155813", "0.5153452", "0.5149735", "0.51445395", "0.51445395", "0.51445395", "0.51345426", "0.5114005", "0.5113844", "0.5104606", "0.5102856", "0.51013637", "0.50867224", "0.50776255", "0.50570095", "0.50466377", "0.5039575", "0.50324285", "0.5024085", "0.5022563", "0.5019064", "0.5018644", "0.5018644", "0.5016088", "0.50151145", "0.50148565", "0.5013379", "0.50117147", "0.50059354", "0.5005628", "0.50049275", "0.5001771", "0.49976677", "0.49959445", "0.49852628", "0.49832192", "0.49821043", "0.49765837", "0.4972342", "0.49681988", "0.49680173", "0.49677268", "0.49545053", "0.49525982", "0.49516788", "0.49489188", "0.49432385", "0.49414957", "0.49369472", "0.49369472", "0.49361652", "0.49333268", "0.49332115", "0.4930842", "0.4927699", "0.49262625", "0.49261916", "0.4924727" ]
0.0
-1
Input properties used for looking up and filtering Invocation resources.
def __init__(__self__, *, function_name: Optional[pulumi.Input[str]] = None, input: Optional[pulumi.Input[str]] = None, qualifier: Optional[pulumi.Input[str]] = None, result: Optional[pulumi.Input[str]] = None, triggers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): if function_name is not None: pulumi.set(__self__, "function_name", function_name) if input is not None: pulumi.set(__self__, "input", input) if qualifier is not None: pulumi.set(__self__, "qualifier", qualifier) if result is not None: pulumi.set(__self__, "result", result) if triggers is not None: pulumi.set(__self__, "triggers", triggers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, operation_inputs):\n\n full_operation_name = ctx.operation.name\n self.operation_name = full_operation_name.split('.').pop()\n\n # These should not make their way into the Operation inputs.\n os.environ['_PAGINATION_OFFSET'] = \\\n text_type(operation_inputs.pop('pagination_offset', 0))\n os.environ['_PAGINATION_SIZE'] = \\\n text_type(operation_inputs.pop('pagination_size', 1000))\n\n # cloudify client\n self.client_config = get_desired_value(\n 'client', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n if self.client_config:\n self.client = CloudifyClient(**self.client_config)\n else:\n self.client = manager.get_rest_client()\n\n # plugins\n self.plugins = get_desired_value(\n 'plugins', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n # secrets\n self.secrets = get_desired_value(\n 'secrets', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n # resource_config\n self.config = get_desired_value(\n 'resource_config', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties)\n\n # Blueprint-related properties\n self.blueprint = self.config.get('blueprint', {})\n self.blueprint_id = self.blueprint.get('id') or ctx.instance.id\n self.blueprint_file_name = self.blueprint.get('main_file_name')\n self.blueprint_archive = self.blueprint.get('blueprint_archive')\n\n # Deployment-related properties\n self.deployment = self.config.get('deployment', {})\n self.deployment_id = self.deployment.get('id') or ctx.instance.id\n self.deployment_inputs = self.deployment.get('inputs', {})\n self.deployment_outputs = self.deployment.get('outputs')\n self.deployment_all_outputs = self.deployment.get('all_outputs', True)\n self.deployment_logs = self.deployment.get('logs', {})\n\n # Node-instance-related properties\n self.node_instance_proxy = self.config.get('node_instance')\n\n # Execution-related properties\n self.workflow_id = \\\n operation_inputs.get('workflow_id',\n 'create_deployment_environment')\n self.workflow_state = \\\n operation_inputs.get(\n 'workflow_state',\n 'terminated')\n self.reexecute = \\\n self.config.get('reexecute') \\\n or ctx.instance.runtime_properties.get('reexecute') \\\n or False\n\n # Polling-related properties\n self.interval = operation_inputs.get('interval', POLLING_INTERVAL)\n self.state = operation_inputs.get('state', 'terminated')\n self.timeout = operation_inputs.get('timeout', EXECUTIONS_TIMEOUT)\n\n # This ``execution_id`` will be set once execute workflow done\n # successfully\n self.execution_id = None", "def __init__(self, **kwargs):\n Interaction.__init__(self, **kwargs)\n self._produces = [] # the resource(s) produced by this interaction\n self._consumes = [] # the resource(s) consumed by this interaction", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n display_name: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n hub_name: Optional[pulumi.Input[str]] = None,\n link_name: Optional[pulumi.Input[str]] = None,\n mappings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TypePropertiesMappingArgs']]]]] = None,\n operation_type: Optional[pulumi.Input['InstanceOperationType']] = None,\n participant_property_references: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ParticipantPropertyReferenceArgs']]]]] = None,\n reference_only: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n source_entity_type: Optional[pulumi.Input['EntityType']] = None,\n source_entity_type_name: Optional[pulumi.Input[str]] = None,\n target_entity_type: Optional[pulumi.Input['EntityType']] = None,\n target_entity_type_name: Optional[pulumi.Input[str]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['description'] = description\n __props__['display_name'] = display_name\n if hub_name is None and not opts.urn:\n raise TypeError(\"Missing required property 'hub_name'\")\n __props__['hub_name'] = hub_name\n __props__['link_name'] = link_name\n __props__['mappings'] = mappings\n __props__['operation_type'] = operation_type\n if participant_property_references is None and not opts.urn:\n raise TypeError(\"Missing required property 'participant_property_references'\")\n __props__['participant_property_references'] = participant_property_references\n __props__['reference_only'] = reference_only\n if resource_group_name is None and not opts.urn:\n raise TypeError(\"Missing required property 'resource_group_name'\")\n __props__['resource_group_name'] = resource_group_name\n if source_entity_type is None and not opts.urn:\n raise TypeError(\"Missing required property 'source_entity_type'\")\n __props__['source_entity_type'] = source_entity_type\n if source_entity_type_name is None and not opts.urn:\n raise TypeError(\"Missing required property 'source_entity_type_name'\")\n __props__['source_entity_type_name'] = source_entity_type_name\n if target_entity_type is None and not opts.urn:\n raise TypeError(\"Missing required property 'target_entity_type'\")\n __props__['target_entity_type'] = target_entity_type\n if target_entity_type_name is None and not opts.urn:\n raise TypeError(\"Missing required property 'target_entity_type_name'\")\n __props__['target_entity_type_name'] = target_entity_type_name\n __props__['name'] = None\n __props__['provisioning_state'] = None\n __props__['tenant_id'] = None\n __props__['type'] = None\n alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_=\"azure-nextgen:customerinsights:Link\"), pulumi.Alias(type_=\"azure-native:customerinsights/latest:Link\"), pulumi.Alias(type_=\"azure-nextgen:customerinsights/latest:Link\"), pulumi.Alias(type_=\"azure-native:customerinsights/v20170101:Link\"), pulumi.Alias(type_=\"azure-nextgen:customerinsights/v20170101:Link\"), pulumi.Alias(type_=\"azure-native:customerinsights/v20170426:Link\"), pulumi.Alias(type_=\"azure-nextgen:customerinsights/v20170426:Link\")])\n opts = pulumi.ResourceOptions.merge(opts, alias_opts)\n super(Link, __self__).__init__(\n 'azure-native:customerinsights:Link',\n resource_name,\n __props__,\n opts)", "def __init__(__self__, *,\n roles: pulumi.Input[Sequence[pulumi.Input[str]]],\n source: pulumi.Input['AuthorizationPolicySpecSourceArgs'],\n target: pulumi.Input['AuthorizationPolicySpecTargetArgs']):\n pulumi.set(__self__, \"roles\", roles)\n pulumi.set(__self__, \"source\", source)\n pulumi.set(__self__, \"target\", target)", "def __init__(self, *properties):\n self._properties = properties", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n billing_account: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disabled: Optional[pulumi.Input[bool]] = None,\n filter: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def properties(self):", "def properties(self):", "def properties(self):", "def get_properties():", "def __init__(__self__, *,\n location: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n access_policies: Optional[pulumi.Input[Sequence[pulumi.Input['KeyVaultSpecAccessPoliciesArgs']]]] = None,\n enable_soft_delete: Optional[pulumi.Input[bool]] = None,\n network_policies: Optional[pulumi.Input['KeyVaultSpecNetworkPoliciesArgs']] = None,\n sku: Optional[pulumi.Input['KeyVaultSpecSkuArgs']] = None):\n pulumi.set(__self__, \"location\", location)\n pulumi.set(__self__, \"resource_group\", resource_group)\n if access_policies is not None:\n pulumi.set(__self__, \"access_policies\", access_policies)\n if enable_soft_delete is not None:\n pulumi.set(__self__, \"enable_soft_delete\", enable_soft_delete)\n if network_policies is not None:\n pulumi.set(__self__, \"network_policies\", network_policies)\n if sku is not None:\n pulumi.set(__self__, \"sku\", sku)", "def _invocation_params(self) -> Dict[str, Any]:\n return self._default_params", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n key: Optional[pulumi.Input[str]] = None,\n values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def get_resource_params():\n return Parameter.list()", "def __init__(__self__, *,\n properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if properties is not None:\n pulumi.set(__self__, \"properties\", properties)\n if secrets is not None:\n pulumi.set(__self__, \"secrets\", secrets)", "def __init__(__self__, *,\n properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if properties is not None:\n pulumi.set(__self__, \"properties\", properties)\n if secrets is not None:\n pulumi.set(__self__, \"secrets\", secrets)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n event_bus_name: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def properties(self):\n return {\n \"customAnnotatorClass.{}\".format(self.name): \"edu.stanford.nlp.pipeline.GenericWebServiceAnnotator\",\n \"generic.endpoint\": \"http://{}:{}\".format(self.host, self.port),\n \"generic.requires\": \",\".join(self.requires),\n \"generic.provides\": \",\".join(self.provides),\n }", "def getArguments(self):\n ApiCli.getArguments(self)\n\n if self.args.alarm_name is not None:\n self.alarm_name = self.args.alarm_name\n\n if self.args.metric_name is not None:\n self.metric_name = self.args.metric_name\n\n if self.args.aggregate is not None:\n self.aggregate = self.args.aggregate\n\n if self.args.operation is not None:\n self.operation = self.args.operation\n\n if self.args.threshold is not None:\n self.threshold = self.args.threshold\n\n if self.args.interval is not None:\n self.interval = self.args.interval\n\n if self.args.host_group_id is not None:\n self.host_group_id = self.args.host_group_id\n\n if self.args.actions is not None:\n self.actions = self.args.actions\n\n if self.args.note is not None:\n self.note = self.args.note\n\n if self.args.per_host_notify is not None:\n self.per_host_notify = self.args.per_host_notify\n\n if self.args.is_disabled is not None:\n self.is_disabled = self.args.is_disabled\n\n payload = {}\n\n # Create trigger predicate dictionary\n predicate = {}\n\n if self.aggregate is not None:\n predicate['agg'] = self.aggregate\n\n if self.operation is not None:\n predicate['op'] = self.operation\n\n if self.threshold is not None:\n predicate['val'] = self.threshold\n\n if 'agg' in predicate or 'op' in predicate or 'val' in predicate:\n payload['triggerPredicate'] = predicate\n\n # Create payload dictionary\n if self.alarm_name:\n payload['name'] = self.alarm_name\n\n if self.host_group_id is not None:\n payload['hostgroupId'] = self.host_group_id\n\n if self.interval is not None:\n payload['interval'] = self.intervals[self.interval]\n\n if self.metric_name is not None:\n payload['metricName'] = self.metric_name\n\n if self.note is not None:\n payload['note'] = self.note\n\n if self.actions is not None:\n payload['actions'] = self.actions\n\n if self.per_host_notify is not None:\n payload['perHostNotify'] = True if self.per_host_notify == 'yes' else False\n\n if self.is_disabled is not None:\n payload['isDisabled'] = True if self.is_disabled == 'yes' else False\n\n self.data = json.dumps(payload, sort_keys=True)\n self.headers = {'Content-Type': 'application/json'}", "def task_parameters(self):\n yield self.properties", "def properties(self):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None,\n entry: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def getProperties(targets):", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n license_count: Optional[pulumi.Input[int]] = None,\n license_count_hard_limit: Optional[pulumi.Input[bool]] = None,\n license_counting_type: Optional[pulumi.Input[str]] = None,\n license_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def get_resource_query(self):\n pass", "def get_resource_query(self):\n pass", "def properties(self) -> pulumi.Input['RedisCacheFirewallRuleSpecPropertiesArgs']:\n return pulumi.get(self, \"properties\")", "def readProperties(self):\r\n print('not yet implemented')", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[pulumi.InputType['ConfigArgs']]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n definition: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n role_arn: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: AccessConfigurationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self,\n *,\n resources: List['InvitedUser'] = None) -> None:\n self.resources = resources", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n accelerator_id: Optional[pulumi.Input[str]] = None,\n basic_endpoint_group_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_address: Optional[pulumi.Input[str]] = None,\n endpoint_group_region: Optional[pulumi.Input[str]] = None,\n endpoint_sub_address: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n agent_id: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n actions: pulumi.Input[Sequence[pulumi.Input[str]]],\n principal: pulumi.Input[str]):\n pulumi.set(__self__, \"actions\", actions)\n pulumi.set(__self__, \"principal\", principal)", "def __init__(__self__, *,\n actions: pulumi.Input[Sequence[pulumi.Input[str]]],\n principal: pulumi.Input[str]):\n pulumi.set(__self__, \"actions\", actions)\n pulumi.set(__self__, \"principal\", principal)", "def __init__(__self__, *,\n match_resources: Optional['outputs.MatchResourcesPatch'] = None,\n param_ref: Optional['outputs.ParamRefPatch'] = None,\n policy_name: Optional[str] = None,\n validation_actions: Optional[Sequence[str]] = None):\n if match_resources is not None:\n pulumi.set(__self__, \"match_resources\", match_resources)\n if param_ref is not None:\n pulumi.set(__self__, \"param_ref\", param_ref)\n if policy_name is not None:\n pulumi.set(__self__, \"policy_name\", policy_name)\n if validation_actions is not None:\n pulumi.set(__self__, \"validation_actions\", validation_actions)", "def process_property(self, resources, resource, model, prop, context):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[pulumi.InputType['SyntheticsPrivateLocationMetadataArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n api_management_id: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n accept_language: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n created_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distributor: Optional[pulumi.Input[str]] = None,\n has_default_path: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner: Optional[pulumi.Input[str]] = None,\n provisioning_artifact_parameters: Optional[pulumi.Input['ProductProvisioningArtifactParametersArgs']] = None,\n status: Optional[pulumi.Input[str]] = None,\n support_description: Optional[pulumi.Input[str]] = None,\n support_email: Optional[pulumi.Input[str]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n type: Optional[pulumi.Input[str]] = None):\n if accept_language is not None:\n pulumi.set(__self__, \"accept_language\", accept_language)\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if created_time is not None:\n pulumi.set(__self__, \"created_time\", created_time)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if distributor is not None:\n pulumi.set(__self__, \"distributor\", distributor)\n if has_default_path is not None:\n pulumi.set(__self__, \"has_default_path\", has_default_path)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if owner is not None:\n pulumi.set(__self__, \"owner\", owner)\n if provisioning_artifact_parameters is not None:\n pulumi.set(__self__, \"provisioning_artifact_parameters\", provisioning_artifact_parameters)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if support_description is not None:\n pulumi.set(__self__, \"support_description\", support_description)\n if support_email is not None:\n pulumi.set(__self__, \"support_email\", support_email)\n if support_url is not None:\n pulumi.set(__self__, \"support_url\", support_url)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def _get_resource_list(self, blueprint):\n additional_inputs = {}\n\n inputs = blueprint.get('inputs')\n if inputs:\n for section in ['agent_package_urls', 'plugin_resources',\n 'dsl_resources']:\n additional_inputs[section] = inputs[section]['default']\n\n additional_inputs.update(self._get_modules_and_components(inputs))\n\n return additional_inputs", "def _get_search_args(self, args, env, context):\n fields_dict = {'employee_id': context.get('employee_id', False),\n 'holiday_status_id': context.get('holiday_status_id', False),\n 'department_id': context.get('department_id', False),\n 'category_id': context.get('category_id', False),\n 'mode_company_id': context.get('mode_company_id', False),\n 'holiday_type': context.get('holiday_type', False)\n }\n\n _allocation_ids=self._get_allocation_ids(env, fields_dict)\n args.append(['id','in',_allocation_ids])\n return args", "def getProperties():", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n accept_language: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distributor: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner: Optional[pulumi.Input[str]] = None,\n provisioning_artifact_parameters: Optional[pulumi.Input[pulumi.InputType['ProductProvisioningArtifactParametersArgs']]] = None,\n support_description: Optional[pulumi.Input[str]] = None,\n support_email: Optional[pulumi.Input[str]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n app_resource_id: Optional[pulumi.Input[str]] = None,\n filters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n open_api: Optional[pulumi.Input['GatewayRouteConfigOpenApiPropertiesArgs']] = None,\n predicates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n protocol: Optional[pulumi.Input[Union[str, 'GatewayRouteConfigProtocol']]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['GatewayApiRouteArgs']]]] = None,\n sso_enabled: Optional[pulumi.Input[bool]] = None):\n if app_resource_id is not None:\n pulumi.set(__self__, \"app_resource_id\", app_resource_id)\n if filters is not None:\n pulumi.set(__self__, \"filters\", filters)\n if open_api is not None:\n pulumi.set(__self__, \"open_api\", open_api)\n if predicates is not None:\n pulumi.set(__self__, \"predicates\", predicates)\n if protocol is None:\n protocol = 'HTTP'\n if protocol is not None:\n pulumi.set(__self__, \"protocol\", protocol)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if sso_enabled is not None:\n pulumi.set(__self__, \"sso_enabled\", sso_enabled)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n name: Optional[pulumi.Input[str]] = None,\n network_connection: Optional[pulumi.Input[str]] = None,\n network_excludes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n network_includes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n password_change: Optional[pulumi.Input[str]] = None,\n password_reset: Optional[pulumi.Input[str]] = None,\n password_unlock: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n users_excludeds: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n actions: pulumi.Input[Sequence[pulumi.Input[str]]],\n description: pulumi.Input[str],\n display_name: pulumi.Input[str],\n role_name: pulumi.Input[str],\n service_class: pulumi.Input[str]):\n pulumi.set(__self__, \"actions\", actions)\n pulumi.set(__self__, \"description\", description)\n pulumi.set(__self__, \"display_name\", display_name)\n pulumi.set(__self__, \"role_name\", role_name)\n pulumi.set(__self__, \"service_class\", service_class)", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterPropList, self).__init__(*args, **kwargs)\n\n # Construct the regular expression tag evaluators.\n nameregextag = self.thistag.find('PropNameRegex')\n if nameregextag != None:\n self.nameregex = RegexTag(nameregextag)\n else:\n self.nameregex = None\n\n valueregextag = self.thistag.find('PropValueRegex')\n if valueregextag != None:\n self.valueregex = RegexTag(valueregextag)\n else:\n self.valueregex = None\n\n # Make sure that at least one regular expression is specified.\n if self.nameregex == None and self.valueregex == None:\n raise ValueError('Required tag missing: '\\\n 'PropNameRegex or PropValueRegex')\n\n # Get the \"look for the first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchfirst = {0}'.format(self.matchfirst))\n\n # Get the path name.\n self.path = self.context.tokens['Path']\n logger.debug('path = {0}'.format(self.path))", "def __init__(__self__, *,\n match_resources: Optional['outputs.MatchResources'] = None,\n param_ref: Optional['outputs.ParamRef'] = None,\n policy_name: Optional[str] = None,\n validation_actions: Optional[Sequence[str]] = None):\n if match_resources is not None:\n pulumi.set(__self__, \"match_resources\", match_resources)\n if param_ref is not None:\n pulumi.set(__self__, \"param_ref\", param_ref)\n if policy_name is not None:\n pulumi.set(__self__, \"policy_name\", policy_name)\n if validation_actions is not None:\n pulumi.set(__self__, \"validation_actions\", validation_actions)", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n policy: Optional[pulumi.Input[str]] = None,\n resource_arn: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n __props__=None):\n ...", "def properties(self):\n raise NotImplementedError", "def get_resource_terms(self):\n return # osid.resource.ResourceQueryInspector", "def get_resource_terms(self):\n return # osid.resource.ResourceQueryInspector", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n channel: Optional[pulumi.Input[str]] = None,\n destination: Optional[pulumi.Input[pulumi.InputType['DestinationArgs']]] = None,\n event_data_content_type: Optional[pulumi.Input[str]] = None,\n event_filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EventFilterArgs']]]]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n service_account: Optional[pulumi.Input[str]] = None,\n transport: Optional[pulumi.Input[pulumi.InputType['TransportArgs']]] = None,\n trigger_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n active: Optional[pulumi.Input[bool]] = None,\n annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n builtin: Optional[pulumi.Input[bool]] = None,\n checksum: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n external_id: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n ui_url: Optional[pulumi.Input[str]] = None,\n url: Optional[pulumi.Input[str]] = None,\n whitelist_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def property_setup(self, properties):\n return properties", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n key: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def properties_owns(cls, *args):\n return cls.graph_traversal(\n None, None, Bytecode()).properties_owns(*args)", "def input_parameters(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"input_parameters\")", "def input_parameters(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"input_parameters\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n certificate: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n additional_properties: Optional[pulumi.Input[str]] = None):\n if additional_properties is not None:\n pulumi.set(__self__, \"additional_properties\", additional_properties)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n approved_subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n sleep: Optional[pulumi.Input[int]] = None,\n timeout: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n interception_port: Optional[pulumi.Input[int]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n mesh_id: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_alias: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n app_id: Optional[pulumi.Input[str]] = None,\n index: Optional[pulumi.Input[str]] = None,\n master: Optional[pulumi.Input[str]] = None,\n pattern: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[str]] = None,\n required: Optional[pulumi.Input[bool]] = None,\n title: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n user_type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_object_id: Optional[pulumi.Input[str]] = None,\n audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n issuer: Optional[pulumi.Input[str]] = None,\n subject: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n type: pulumi.Input[str],\n properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"type\", type)\n if properties is not None:\n pulumi.set(__self__, \"properties\", properties)\n if secrets is not None:\n pulumi.set(__self__, \"secrets\", secrets)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n config_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n organization_id: Optional[pulumi.Input[str]] = None,\n pubsub_topic: Optional[pulumi.Input[str]] = None,\n streaming_config: Optional[pulumi.Input[pulumi.InputType['StreamingConfigArgs']]] = None,\n __props__=None):\n ...", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def __init__(__self__,\n resource_name: str,\n args: EndpointArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, resource, *args):\n self.args = list(args)\n self.flags = OrderedDict()\n self.additional_flags = []\n self._AddCommonFlags(resource)", "def get_hyperparams(self):", "def __init__(__self__, *,\n resource_id: pulumi.Input[str],\n datasource_type: Optional[pulumi.Input[str]] = None,\n object_type: Optional[pulumi.Input[str]] = None,\n resource_location: Optional[pulumi.Input[str]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n resource_type: Optional[pulumi.Input[str]] = None,\n resource_uri: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)", "def __init__(__self__, *,\n resource_id: pulumi.Input[str],\n datasource_type: Optional[pulumi.Input[str]] = None,\n object_type: Optional[pulumi.Input[str]] = None,\n resource_location: Optional[pulumi.Input[str]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n resource_type: Optional[pulumi.Input[str]] = None,\n resource_uri: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)", "def __init__(__self__, *,\n resource_id: pulumi.Input[str],\n datasource_type: Optional[pulumi.Input[str]] = None,\n object_type: Optional[pulumi.Input[str]] = None,\n resource_location: Optional[pulumi.Input[str]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n resource_type: Optional[pulumi.Input[str]] = None,\n resource_uri: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)", "def __init__(__self__, *,\n resource_id: pulumi.Input[str],\n datasource_type: Optional[pulumi.Input[str]] = None,\n object_type: Optional[pulumi.Input[str]] = None,\n resource_location: Optional[pulumi.Input[str]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n resource_type: Optional[pulumi.Input[str]] = None,\n resource_uri: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auth_method: Optional[pulumi.Input[str]] = None,\n bind_name: Optional[pulumi.Input[str]] = None,\n bind_type: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n partition: Optional[pulumi.Input[str]] = None,\n selector: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n config_bundle: Optional[pulumi.Input[str]] = None,\n detect_md5hash: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n org_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n policy_type: Optional[pulumi.Input[str]] = None,\n tag_policy: Optional[pulumi.Input[pulumi.InputType['MonitorConfigPolicyTagPolicyArgs']]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n metadata_endpoint: Optional[pulumi.Input[str]] = None,\n opid: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n use_in_api_documentation: Optional[pulumi.Input[bool]] = None,\n use_in_test_console: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def test_properties_get(self):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n message_type: Optional[pulumi.Input[str]] = None,\n queue_regex: Optional[pulumi.Input[str]] = None,\n recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n reminder_interval: Optional[pulumi.Input[int]] = None,\n time_threshold: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_calculation: Optional[pulumi.Input[str]] = None,\n value_threshold: Optional[pulumi.Input[int]] = None,\n vhost_regex: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ..." ]
[ "0.5469962", "0.54293776", "0.5392672", "0.53824884", "0.5275353", "0.5259073", "0.5255024", "0.5255024", "0.5255024", "0.5230854", "0.51913863", "0.51881474", "0.51731974", "0.51597875", "0.5158078", "0.5158078", "0.5121914", "0.5116958", "0.51032776", "0.510278", "0.5102727", "0.5077048", "0.5073238", "0.50684047", "0.505061", "0.5036405", "0.50356144", "0.50356144", "0.5029351", "0.5023177", "0.501862", "0.5013682", "0.5013682", "0.5004513", "0.49871135", "0.49871135", "0.49871135", "0.49871135", "0.49871135", "0.4985017", "0.49746856", "0.49668822", "0.49666178", "0.4953709", "0.4953212", "0.4953212", "0.4948784", "0.4944145", "0.49349305", "0.49339542", "0.49327254", "0.49314335", "0.49283457", "0.49264124", "0.49224642", "0.49209216", "0.49187794", "0.4905891", "0.49056396", "0.4903951", "0.48945013", "0.48931724", "0.48923615", "0.48787627", "0.48681659", "0.48660332", "0.4861245", "0.4861245", "0.48583046", "0.48574626", "0.48431262", "0.48429096", "0.48347485", "0.48313063", "0.48313063", "0.4827061", "0.482257", "0.48215857", "0.4821149", "0.48129597", "0.48113543", "0.4808392", "0.4805722", "0.48050654", "0.47997546", "0.47977015", "0.479064", "0.47852606", "0.47805655", "0.4779002", "0.47772196", "0.47693065", "0.47693065", "0.47693065", "0.47693065", "0.47692183", "0.47690967", "0.47683296", "0.47659445", "0.47598296", "0.47487494" ]
0.0
-1
Name of the lambda function.
def function_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "function_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def name(self):\n\t\treturn self._func_name", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def get_function_name(self):\n return self.__function", "def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])", "def name(cls):\n return arg.s()(cls.func).func.__name__", "def function_name(cls):\n function_name = String(cls.__name__).snakecase().lower()\n return function_name", "def getName(self):\n return _libsbml.FunctionDefinition_getName(self)", "def islambda(func):\n return getattr(func, 'func_name', False) == '<lambda>'", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def function_name(parameters):", "def get_function_name_at(self, address):\n pass", "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def _callable_name(f):\n try:\n return f.__name__\n except AttributeError:\n if isinstance(f, partial):\n return f.func.__name__\n return f.__class__.__name__", "def function(self) -> str:\n return pulumi.get(self, \"function\")", "def display_name(self) -> str:\n return f\"{self.func.__module__}.{self.func.__qualname__}\"", "def funcName():\r\n import sys\r\n return sys._getframe(1).f_code.co_name", "def method_name(self) -> str:\n if isinstance(self.view_func, str):\n return self.view_func\n return self.view_func.__name__", "def get_function_name(wrapped, instance, args, kwargs):\n return wrapped.__name__", "def get_callable_name(func):\n if isinstance(func, functools.partial):\n return get_callable_name(func.func)\n else:\n return func.__name__", "def function_name(func):\n return log(level=\"info\", message=_function_name(func))", "def name(self):\n name = self.function_name\n\n # Feature type is based on additional data that used\n # for example if insight is for Healthsites Facilities\n # than feature type is Healthsites Facilities\n\n if self.feature_type:\n name = '%s for %s' % (name, self.feature_type)\n return name", "def _set_name_scope(self):\n if self.name is None:\n self._name_scope = self.__class__.__name__\n elif self.name == '<lambda>':\n self._name_scope = 'lambda'\n else:\n # E.g. '_my_loss' => 'my_loss'\n self._name_scope = self.name.strip('_')", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def _state_name(self):\n return '{}_{}'.format(self.function_name, self.STATE_CONFIG_SUFFIX)", "def getName(self):\n return _libsbml.Event_getName(self)", "def event_name(self):\n return self._event_name", "def this_func_input_name():\n\treturn input_name_from_func_name(inspect.stack()[1][3])", "def get_function_raw_name_at(self, address):\n pass", "def get_function_name():\n\n # inspect.stack()[0][2] returns name of this function\n function_name = inspect.stack()[1][3]\n\n return function_name", "def lambda_func_doc(self, label):\n latex = (\n r'0 = \\lambda - '\n r'\\frac{\\dot{m}_\\mathrm{air}}{\\dot{m}_\\mathrm{air,min}}'\n )\n return generate_latex_eq(self, latex, label)", "def _name(self):\n return self.arguments[0].split('(')[0]", "def _name(self):\n return self._arguments[0].split('(')[0]", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def name(self) -> str:\n return f\"{self.class_object.__name__}.{self.method_str}\"", "def funcname(func):\n try:\n return '%s()' % func.__name__\n except AttributeError:\n return repr(func)", "def name(self):\n return signal_base_get_name(self.obj)", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def is_lambda(fun):\n return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__", "def name(self) -> str:\n return self._formal_name", "def GetCurrentFuncName():\n return sys._getframe(1).f_code.co_name", "def getElementName(self):\n return _libsbml.FunctionDefinition_getElementName(self)", "def node_name(self) -> str:\n op_name = f\"{self.name.name}_{self.name.overload_name}\".lower()\n return \"\".join(word.capitalize() or \"\" for word in op_name.split(\"_\"))", "def cal_name(self):\n return self.event_name", "def _generateLambda(self, string):\n derivation = self.fieldNames.sub(r'parent.getSampleValue(stats, \"\\1\")',\n string)\n return lambda stats, parent: eval(derivation)", "def trigger_name(self) -> \"str\":\n return self._attrs.get(\"triggerName\")", "def getName(self):\n return signal_base_get_name(self.obj)", "def get_lambdas(self):\n return self.graph.build_lambdas.output.lambda_pairs[-1][:, 0]", "def name(self):\n # type: () -> str\n return self._name", "def name(self):\n return self.__name__", "def destination_function(self):\n return self._event['destination_function_name']", "def get_fun_name(line):\n match = re.match(r'(function|macro)\\s*\\((\\w+)', line)\n if not match:\n return\n return match.groups()[1]", "def name(self):\n # type: () -> string_types\n return self._name", "def name(self):\n # type: () -> string_types\n return self._name", "def event_name(self):\n return dict.get(self, 'event_name', None)", "def fname(func: Callable) -> str:\n return \"{}.{}\".format(func.__module__, func.__name__)", "def get_function(self,attr):\n func_name = self._user_funcs[attr] \n if hasattr(func_name,'__call__'):\n func_name = func_name(self)\n return func_name", "def getElementName(self):\n return _libsbml.ListOfFunctionDefinitions_getElementName(self)", "def getElementName(self):\n return _libsbml.Trigger_getElementName(self)", "def as_function_name(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_CNAME)", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def get_function_name(ifunc, *, scoped=False, mangle=False):\n\n name = _translate_function_name(interrogate_function_name(ifunc), mangle)\n\n if scoped:\n parent = interrogate_function_class(ifunc)\n if parent:\n name = get_type_name(parent, scoped=True, mangle=mangle) + '.' + name\n\n return name", "def method_name(self):\n pass", "def get_name():\n return __name__", "def idp_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idp_lambda_arn\")", "def getElementName(self):\n return _libsbml.FunctionTerm_getElementName(self)", "def test_name_of_func_should_be_passed_name(self):\n scope = self.Test.scope('foo', where='foo')\n self.assertEqual(scope.__name__, 'foo')", "def fn(self):\n return self._fn", "def getName(self):\n return _libsbml.FluxObjective_getName(self)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))", "def name() -> str:\n pass", "def fname(func):\n return \"%s.%s\" % (func.__module__, func.__name__)", "def name ( self ) :\n return self.__name if self.__name else ''", "def get_class_functional_name(name):\n name = _strip_class_name(name)\n return name", "def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])", "def key(func):\n return func.__func__ if hasattr(func, \"__func__\") else func", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def get_name(listener_id):\n return \"{}{}\".format(f5_const.PREFIX_LISTENER, listener_id)", "def signature(function: model.Function) -> str:\n return str(function.signature)", "def get_filter_name(self):\n pass", "def function(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"function\")", "def getName(self):\r\n return self.__name__" ]
[ "0.77034545", "0.77034545", "0.7593706", "0.7464853", "0.7464853", "0.73329914", "0.7193673", "0.7108169", "0.70655984", "0.6839772", "0.68352073", "0.67619866", "0.6591472", "0.65683824", "0.6567803", "0.6475904", "0.6402211", "0.6297765", "0.6272437", "0.62723136", "0.6268716", "0.62462837", "0.6244579", "0.62361825", "0.6224806", "0.6182332", "0.6182332", "0.6182332", "0.6173039", "0.6165112", "0.61562943", "0.61312586", "0.6110205", "0.60902673", "0.6070161", "0.60338116", "0.60258424", "0.5970842", "0.5958052", "0.5950451", "0.59492046", "0.59424484", "0.592031", "0.59045196", "0.5903703", "0.5897504", "0.58945054", "0.5889464", "0.58314306", "0.58274144", "0.58200705", "0.58184564", "0.581762", "0.57626164", "0.5759648", "0.57526284", "0.5732005", "0.5732005", "0.57317984", "0.57263786", "0.57263625", "0.57248193", "0.57200485", "0.5715866", "0.5708168", "0.5708168", "0.57057124", "0.5693515", "0.56733423", "0.56669897", "0.56647336", "0.566042", "0.56593794", "0.5656557", "0.5649768", "0.5649768", "0.5649768", "0.5649768", "0.5647923", "0.56448925", "0.564054", "0.56388444", "0.5632923", "0.563136", "0.5629419", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.561274", "0.56053305", "0.56021523", "0.559393", "0.5587055" ]
0.73899704
5
JSON payload to the lambda function.
def input(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "input")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lambda_payload(self, date: str, symbol: str) -> bytes:\n self._lambda_event['s3_bucket'] = self._config['s3_bucket']\n self._lambda_event['s3_key_quotes'] = (\n self._config['s3_key_input_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_quotes_suffix'])\n self._lambda_event['s3_key_trades'] = (\n self._config['s3_key_input_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_trades_suffix'])\n self._lambda_event['s3_key_output'] = (\n self._config['s3_key_output_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_output_suffix'])\n return json.dumps(self._lambda_event).encode()", "def lambda_handler(event, context):\n\n if not event[\"body\"]:\n return create_output(400, \"Invalid body: body is empty.\")\n\n try:\n item = json.loads(event[\"body\"])\n except json.JSONDecodeError:\n return create_output(400, \"Invalid body: can't decode body.\")\n\n for key in iris_keys:\n if key not in item:\n return create_output(400, f\"Invalid body: missing key {key} in body.\")\n try:\n float(item[key])\n except ValueError:\n return create_output(400, f\"Invalid body: can't parse {key} to float.\")\n\n item[\"id\"] = create_hash(item)\n\n try:\n table.put_item(Item=item)\n except Exception as e:\n logger.error(f\"Error, can't insert item: {e}\")\n return create_output(500, \"Internal error: can't insert item in table.\")\n\n return create_output(200, \"Item created.\")", "def lambda_handler(event, context):\n logger.debug(event)\n\n product_list = PRODUCT_LIST\n\n return {\n \"statusCode\": 200,\n \"headers\": HEADERS,\n \"body\": json.dumps({\"products\": product_list}),\n }", "def lambda_handler(event, context):\n # EOL char append function\n encode_data = lambda x: \"{data}{eol}\".format(data=json.dumps(x), eol=chr(10)).encode(\"UTF-8\")\n \n # Punk API call\n try:\n logger.debug(\"Requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n request = r.get(os.environ[\"API_URL\"])\n except Exception as e:\n logger.error(\"An error occured while requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n raise e\n \n # Send records to kinesis stream\n logger.debug(\"Sending data to stream: {stream}\".format(stream=os.environ[\"STREAM_NAME\"]))\n for data in request.json():\n client.put_record(\n StreamName=os.environ[\"STREAM_NAME\"],\n Data=encode_data(data),\n PartitionKey=\"key\"\n )\n\n return {\n 'statusCode': request.status_code,\n 'body': data\n }", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def lambda_handler(event, context):\n\n operations = {\n 'POST': main,\n }\n\n if event.get('httpMethod', False):\n operation = event['httpMethod']\n else:\n operation = \"not available\"\n\n payload = base64.b64decode(event['body'])\n try:\n payload = json.loads(payload)\n except TypeError:\n pass\n\n if operation in operations:\n return respond(None, operations[operation](payload))\n else:\n return respond(ValueError(f'Unsupported method {operation}'))", "def lambda_handler(event, context):\n\n # return {\n # \"statusCode\": 200,\n # \"body\": json.dumps(\n # {\n # \"message\": \"hello world\",\n # }\n # ),\n # }\n src_bytes = base64.b64decode(event[\"body\"])\n src = cv2.imdecode(np.frombuffer(src_bytes, dtype=np.uint8), cv2.IMREAD_COLOR)\n gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n _, gray_bytes = cv2.imencode(\".jpg\", gray)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(base64.b64encode(gray_bytes).decode(\"UTF-8\")),\n }", "def payload(self):", "def lambda_handler(event, context):\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n 'message': 'API deployed',\n })\n }", "def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)", "def _send_json(self, payload: dict):\n data = json.dumps(payload)\n return self.send(data)", "def lambda_handler(event, context):\n for item in json.loads(event[\"Records\"][0][\"body\"]):\n item[\"id\"] = uuid.uuid1().bytes\n for key, value in item.items():\n if key == \"id\":\n item[key] = {\"B\": bytes(value)}\n elif key == \"fiscal_year\":\n item[key] = {\"N\": str(value)}\n elif key == \"emissions_mtco2e\":\n item[key] = {\"N\": str(value)}\n elif key == \"consumption\":\n item[key] = {\"N\": str(value)}\n else:\n item[key] = {\"S\": str(value)}\n\n time.sleep(0.001)\n\n dynamo.put_item(TableName=\"Greenhouse_gas_emissions\", Item=dict(item))", "def set_payload(self, payload):\n self.payload = json.dumps(payload)", "def lambda_handler(event, context):\n\n # S3 resource invocation\n s3_resource = boto3.resource('s3')\n # S3 bucket selection\n data_bucket_name = \"put_here_data_bucket_name\"\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n request_body_dict = json.loads(event['body'])\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending both ticker and start date if given\n if request_body_dict['start_date'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_future_request(request_body=request_body_dict,\n s3_resource=s3_resource,\n s3_bucket=data_bucket_name, prefix='valid'))\n # or only ticker name if no start date has been provided\n elif request_body_dict['ticker_name'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_request(ticker_name=request_body_dict['ticker_name'],\n s3_resource=s3_resource, s3_bucket=data_bucket_name,\n prefix='train'))\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n # print data for debug purposes\n print(result)\n\n return {\n 'statusCode': 200,\n 'headers': {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},\n 'body': str(result)\n }", "def lambda_handler(event, context):\n\n event_body = json.loads(event['body'])\n print(\"EVENT:\")\n print(event_body)\n\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n recs = flow(event_body, textract, cache = True)\n rval = {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\" : \"hello world\",\n \"textract\" : recs\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n\n return rval", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n\n # Log the values received in the event argument\n logger.info(f'Request event: {event}')\n\n # Define default hard-coded return values\n response = {\n 'uid': 'Example function ID',\n 'return_val01': 'Return value #1',\n 'return_val02': 'Return Value #2',\n }\n\n # Retrieve type of invocation (GET, PUT, etc.)\n if 'http_verb' in event:\n operation = event['http_verb'].upper()\n if operation == 'PUT':\n # Return the values passed to the function\n response = {\n 'uid': event['functionID'],\n 'return_val01': event['parameters']['parm01'],\n 'return_val02': event['parameters']['parm02'],\n }\n\n logger.info(f'Response={response}')\n return response", "def test4():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000147203645/tic000147203645_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def lambda_handler(event, context):\n return {\n 'statusCode': 200,\n 'body': say_hello()\n }", "def lambda_handler(event, context):\n\n print(\"EVENT:\")\n print(event)\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n\n recs = flow(event, s3)\n print(recs)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def to_lambda_output(self):\n resp = {\n 'statusCode': self.status_code,\n 'body': self.body,\n 'headers': self.headers\n }\n\n return resp", "def invoke_lambda(lambda_name, lambda_payload):\n try:\n LOGGER.debug(f\"Sending request to '{lambda_name}' method: {lambda_payload}\")\n client = boto3.client('lambda')\n invoke_response = client.invoke(FunctionName=lambda_name,\n InvocationType=\"RequestResponse\",\n Payload=json.dumps(lambda_payload))\n response = json.loads(invoke_response['Payload'].read())\n except Exception as ex:\n LOGGER.debug(f\"Error encountered while invoking lambda method '{lambda_name}': {repr(ex)}\")\n\n return response", "def lambda_handler(event, context):\n # define initial status code and headers\n statusCode = 400\n try:\n # get the body params\n if type(event) == dict:\n event_body = event.get('body', event)\n else:\n event_body = json.loads(event).get('body', {})\n # generate and store the reservation response result from reservation handler function\n reservation_handler = ReservationHandler(EventBodyData=event_body)\n result = reservation_handler.sabre_reservation_handler()\n # define status code, headers and response\n if type(result) == dict:\n statusCode = result.get(\"statusCode\", statusCode)\n response = result.get(\"body\", \"\")\n else:\n response = result\n except Exception as E:\n response = str(E)\n\n # return the response\n return {\n 'statusCode': statusCode,\n 'body': response\n }", "def test3():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000129646247_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def lambda_handler(event, context):\n blogs = [{\n 'title': 'BETTER UTILITY THAN A TRUCK WITH MORE PERFORMANCE THAN A SPORTS CAR',\n 'description': 'Cybertruck is built with an exterior shell made for ultimate durability and passenger protection. Starting with a nearly impenetrable exoskeleton, every component is designed for superior strength and endurance, from Ultra-Hard 30X Cold-Rolled stainless-steel structural skin to Tesla armor glass.'\n }, {\n 'title': 'ULTRA-HARD 30X COLD-ROLLED STAINLESS STEEL',\n 'description': 'If there was something better, we’d use it. Help eliminate dents, damage and long-term corrosion with a smooth monochrome exoskeleton that puts the shell on the outside of the car and provides you and your passengers maximum protection.'\n }, {\n 'title': 'TESLA ARMOR GLASS',\n 'description': 'Ultra-strong glass and polymer-layered composite can absorb and redirect impact force for improved performance and damage tolerance.'\n }\n ]\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(blogs),\n }", "def lambda_handler(event, context):\n # Boto is always available in AWS lambda, but may not be available in standalone mode\n# import boto3\n# from base64 import b64decode\n\n # To generate the encrypted values, go to AWS IAM Keys and Generate a key\n # Then grant decryption using the key to the IAM Role used for your lambda function.\n #\n # Use the command `aws kms encrypt --key-id alias/<key-alias> --plaintext <value-to-encrypt>\n # Put the encrypted value in the configuration dictionary below\n# encrypted_config = {\n# 'pagerduty_api_key': '<ENCRYPTED VALUE>',\n# 'schedule_ids': '<ENCRYPTED VALUE>'\n# }\n\n# kms = boto3.client('kms')\n# config = {x: kms.decrypt(CiphertextBlob=b64decode(y))['Plaintext'] for x, y in encrypted_config.iteritems()}\n on_call = OnCall(API_KEY, SCHEDULE_IDS)\n output = on_call.run()\n\n return { \"response_type\": \"in_channel\", \"text\": '\\n'.join(output) }", "def lambda_handler(event, context):\n\n try:\n created_item = create_new_table_item(event)\n return {\"statusCode\": 201, \"body\": json.dumps(f\"{created_item}\")}\n\n except BlankRequestBody as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(MISSING_PARAMETERS_MESSAGE)}\n\n except ValidationError as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(INCORRECT_PARAMETERS_MESSAGE)}\n\n except Exception as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 500, \"body\": json.dumps(\"Internal server error\")}", "def lambda_handler(event, context):\n try:\n # Extract the Job ID\n job_id = event['CodePipeline.job']['id']\n\n # Extract the Job Data\n job_data = event['CodePipeline.job']['data']\n\n # Extract the params\n params = get_user_params(job_data)\n\n # Get the lists of artifacts coming in and out of this function\n input_artifacts = job_data['inputArtifacts']\n output_artifacts = job_data['outputArtifacts']\n\n # Perform a build on the source (from source_artifact)\n # and write results to the build_artifact\n s3 = setup_s3_client(job_data)\n source_artifact = find_artifact(input_artifacts, params['source_artifact'])\n src_dir = get_zipped_artifact(s3, source_artifact)\n dest_dir = tempfile.mkdtemp()\n perform_build(os.path.join(src_dir, 'src'), dest_dir)\n build_artifact = find_artifact(output_artifacts, params['build_artifact'])\n put_zipped_artifact(s3, dest_dir, build_artifact)\n\n # Pick the template out of the source code and write it to the\n # template_artifact\n template_artifact = find_artifact(output_artifacts, params['template_artifact'])\n put_zipped_artifact(s3, os.path.join(src_dir, params['template_subdir_path']), template_artifact)\n\n shutil.rmtree(src_dir)\n shutil.rmtree(dest_dir)\n put_job_success(job_id, \"Built code: \" + \", template:\")\n\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail the job and log the exception message.\n print('Function failed due to exception.')\n print(e)\n traceback.print_exc()\n put_job_failure(job_id, 'Function exception: ' + str(e))\n\n print('Function complete.')\n return \"Complete.\"", "def lambda_handler(event, context):\n return dispatch(event)", "def handle_json(self, source, data):\n method, args = json.loads(data)\n try:\n result = self.call(source, method, *args)\n except Exception as exc:\n result = str(exc)\n\n return json.dumps(result)", "def get_payload(self):\n if self.payload == '':\n return {}\n\n return json.loads(self.payload)", "def lambda_inject_context(payload, scope):\n try:\n invoke_payload = payload.get('Payload', {})\n\n if not isinstance(invoke_payload, dict):\n invoke_payload = json.loads(invoke_payload)\n\n tracer.inject(scope.span.context, ot.Format.HTTP_HEADERS, invoke_payload)\n payload['Payload'] = json.dumps(invoke_payload)\n except Exception:\n logger.debug(\"non-fatal lambda_inject_context: \", exc_info=True)", "def lambda_handler(event, context):\n feature = session.query(m.Feature).first()\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": feature.title,\n }),\n }", "def callback(ch, method, properties, body):\n record = json.loads(body.decode()) # decode binary string to dict\n pprint(record)", "def payload(self, payload: \"dict\"):\n self._attrs[\"payload\"] = payload", "def lambda_handler(event, context):\n\n # Check that we were passed the required arguments\n validate_event(event)\n\n try:\n numpy_method_name = event.get('method')\n numpy_argument_array = event.get('arguments')\n\n logger.info(\"Handing call to the NumPy {} method with arguments: {}\".format(numpy_method_name, numpy_argument_array))\n result = getattr(numpy, numpy_method_name)(*numpy_argument_array)\n logger.info(\"Result from NumPy is {}\".format(result))\n return {'result': result}\n except:\n error_message = \"Unexpected error: {}\".format(str(sys.exc_info()))\n logger.error(error_message)\n raise Exception(error_message)", "def get_json_payload(self):\n self._payload_to_obj()\n return self._obj_payload", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n try:\n response = s3.get_object(Bucket=BUCKET, Key=KEY)\n print('CONTENT TYPE:', response['ContentType'])\n print('response:')\n pprint.pprint(response)\n print('event')\n pprint.pprint(event)\n print('payload')\n pprint.pprint(event.get('payload'))\n # return json.loads(json.dumps(response, default=str))\n # defined by https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return {\n 'statusCode': 200,\n 'isBase64Encoded': False,\n 'body': json.dumps(response, default=str)\n }\n # return response['ContentType']\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(KEY, BUCKET))\n raise e", "def pewma():\n try:\n content = request.get_json()\n try:\n data = content[\"data\"]\n except:\n data = content\n result = pewma_model.lambda_handler(data)\n return jsonify(result)\n except Exception as e:\n return jsonify({\"error\": str(e)})", "def on_push(self, payload):\n pass", "def lambda_handler(event, context):\n qpmBucket = event['queryStringParameters']['bucket']\n fn_bucket = list_s3_objects(qpmBucket)\n print(fn_bucket)\n \n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps(str(fn_bucket)),\n }", "def lambda_handler(event, context):\n\n # resolve backend api key from the secrets manager\n sm_client = boto3.client('secretsmanager')\n sm_resp = sm_client.get_secret_value(os.getenv('BACKEND_SERVICE_API_KEY_SECRET_ARN'))\n backend_api_key = json.dumps(sm_resp.get('SecretString')).get('key')\n\n # TODO implement further business logic\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }", "def invokeLambdaFunction(lambdaArn:str, parameters:Dict=None) -> Dict:\n payloadDict = {\n 'http_verb': 'POST',\n 'functionID': lambdaArn,\n }\n if parameters:\n payloadDict['parameters'] = parameters\n return json.loads(_invokeLambdaFunction(lambdaArn, payloadDict)['Payload'].read().decode('utf-8'))", "def lambda_handler(event, context):\n \n filename = None\n fobj = None\n\n try:\n \n filename = 'dlq' + '-' + datetime.datetime.now().strftime(\"%s\")\n fobj = open('/tmp/'+filename, 'w')\n logger.debug('S3 client set up.')\n\n for record in event['Records']:\n fobj.write(json.dumps(record['body']))\n fobj.write(\"\\n\")\n \n except Exception as ex:\n logger.error('Exception in executing ingestion to S3: {}'.format(ex))\n send_sns_alert(str(ex))\n raise\n\n else:\n \n #Saves file to S3\n fobj.close()\n load_data_s3(filename)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Success!')\n }\n\n finally:\n\n # S3 - close temp object\n fobj.close()", "def hello_world(\n event: Dict[str, Any],\n context,\n):\n body_str = event.get(\"body\", \"{}\")\n body_str = body_str if body_str else \"{}\"\n body_obj = json.loads(body_str)\n wiki_search_term = body_obj.get(\"searchTerm\", \"\")\n if not body_obj or not wiki_search_term:\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/handle-errors-in-lambda-integration.html\n response = {\n \"statusCode\": 400,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps({\"message\": \"Wikipedia search term was not provided\"}),\n }\n else:\n summary = wikipedia.summary(wiki_search_term)\n response = {\n \"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps(summary),\n }\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return response", "def invoke_process(fuction_name, account_id, region):\n invoke_payload = (\n json.JSONEncoder().encode(\n {\n \"account\": account_id,\n \"region\": region\n }\n )\n )\n lambda_client.invoke(\n FunctionName=fuction_name,\n InvocationType='Event',\n Payload=invoke_payload,\n )", "def test_generic(key,bucket):\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": bucket\n },\n \"object\": {\n \"key\": key\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n print(out)\n assert out[\"statusCode\"] == 200", "def get_json(payload):\n try:\n the_json = json.dumps(payload, indent=4, sort_keys=True)\n return the_json\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def lambda_handler(event: APIGatewayProxyEvent, context: LambdaContext) -> Dict[str, Any]:\n\n return app.resolve(event, context)", "def generate_payload(req):\n logging.info(f'Do something with {req}')\n return json.dumps({\n \"msg1\": \"Hello world 1!\",\n \"msg2\": \"Hello world 2!\"\n })", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n curr_time = str(int(datetime.now().strftime(\"%s\")) * 1000)\n client.put_item(TableName='demo-table', \n Item={\n 'doi': {'S': curr_time}, \n 'crossref_flag': {'BOOL': False},\n 'europepmc_flag': {'BOOL': False},\n 's3_flag': {'BOOL': False},\n 'indexing_flag': {'BOOL': False}\n })\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def lambda_handler(event, context=None):\n response = {}\n try:\n response = middleware.IdentityAuthMiddleWare.process_request(event, response)\n except Exception as e:\n response[\"message\"] = e.message\n response[\"errors\"] = e.errors\n # removing request_dump data\n if \"request_dump\" in response[\"errors\"]:\n del response[\"errors\"][\"request_dump\"]\n for _k, _v in response[\"errors\"].items():\n response[\"errors\"][_k] = str(_v)\n return response", "def lambda_handler(event, context):\r\n body = json.loads(event[\"body\"].replace(\"'\", '\"'))\r\n # This allows the function to run locally by sending requests to a local DynamoDB. Option one is for when it's\r\n # being run by SAM, option two for when the tests are being run, and three for production\r\n if os.environ.get('AWS_SAM_LOCAL'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://dynamo:8000')\r\n table = dynamodb.Table(\"pollsStorageDB\")\r\n elif 'local' == os.environ.get('APP_STAGE'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')\r\n table = dynamodb.Table(\"pollsStorageDB\")\r\n else:\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table(os.environ[\"DDB_TABLE_NAME\"])\r\n # Create dict to contain the number of votes for each possible response\r\n responses = {}\r\n for answer in body[\"answersList\"]:\r\n responses[answer] = 0\r\n \r\n # Sort out the expiry date\r\n if body.get(\"expiresIn\"):\r\n try:\r\n expiresIn = (datetime.now() + timedelta(days=int(body[\"expiresIn\"]))).timestamp()\r\n except BaseException as e:\r\n print(e)\r\n else:\r\n expiresIn = (datetime.now() + timedelta(days=30)).timestamp()\r\n\r\n # Create unique ID for the poll\r\n randomString = ''.join([random.choice(string.ascii_letters \r\n + string.digits) for n in range(32)]) \r\n poll = {\r\n \"id\": randomString,\r\n 'question': body[\"question\"],\r\n 'answersList': body[\"answersList\"],\r\n 'responses': responses,\r\n 'created': datetime.now().timestamp(),\r\n \"expires\": Decimal(expiresIn)\r\n }\r\n response = table.put_item(\r\n Item=poll\r\n )\r\n \r\n return {\r\n 'headers': {\r\n 'Access-Control-Allow-Headers': 'Content-Type',\r\n 'Access-Control-Allow-Origin': '*',\r\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'\r\n },\r\n \"statusCode\": 200,\r\n \"body\": json.dumps({\r\n \"success\": True,\r\n \"polls\": [poll]\r\n }),\r\n }", "def set_json_payload(self, payload):\n self._str_payload = None\n self._obj_payload = payload", "def lambda_handler(event, context):\n print(event)\n print(context)\n storage_gateway_status()", "def lambda_handler(event, context): # pylint: disable=too-many-locals,too-many-branches,too-many-statements\r\n try: # pylint: disable=too-many-nested-blocks\r\n print(\"Execution started!\")\r\n #print(\"Event: \",event)\r\n # Bucket name and Full path for file - where file will be uploded\r\n source_bucket_name = event[\"detail\"][\"requestParameters\"][\"bucketName\"]\r\n source_key = urllib.parse.unquote_plus(\r\n event[\"detail\"][\"requestParameters\"][\"key\"], encoding='utf-8')\r\n \r\n print(\"file_path: \",source_key)\r\n #Loading master config\r\n print(\"Loading master_config\")\r\n audit_config = {}\r\n config_path = \"./config/\" + \\\r\n os.environ['CCM_ENV'] + \"/master_config.json\"\r\n config_content = open(config_path).read()\r\n config_json = json.loads(config_content)\r\n audit_config = config_json[\"audit_config\"]\r\n snow_params = config_json[\"ERROR_NOTIFICATION_SNOW_PARAMS\"]\r\n athena_query_param = config_json[\"ATHENA_QUERY_PARAMS\"]\r\n athena_table_params = config_json[\"ATHENA_TABLE_PARAMS\"]\r\n\r\n # Audit Parameters Based on the Invoking lambda and its operation involved\r\n audit_config[\"component_type_code\"] = \"ETL\"\r\n audit_config[\"component_name\"] = \"PCP Appflow\"\r\n audit_config[\"source_name\"] = \"Patient Connections Platform\"\r\n audit_config[\"target_name\"] = \"Consumer Consent Management\"\r\n audit_config[\"full_file_path\"] = \"s3://\" + \\\r\n source_bucket_name + \"/\" + source_key\r\n audit_config[\"file_version_id\"] = \"\"\r\n\r\n # Creates Job Entry in ABC Framework\r\n print(\"audit config::\", audit_config)\r\n process_execution_id = audit_helper.\\\r\n invoke_edb_abc_log_process_status_event_job_entry(audit_config)\r\n audit_config[\"process_execution_id\"] = process_execution_id\r\n print(\"process_execution_id ::\", process_execution_id)\r\n #print(\"source_key: \",source_key)\r\n s3_write = boto3.client('s3')\r\n record_dict = {}\r\n file_name = \"\"\r\n final_json = \"\"\r\n # prefix = \"\"\r\n # file_list = []\r\n # client = boto3.client(\"s3\")\r\n # result = client.list_objects(Bucket=source_bucket_name, Prefix=source_key, Delimiter='/')\r\n # #print(result)\r\n # for obj in result.get('CommonPrefixes'):\r\n # prefix = obj.get('Prefix')\r\n # #print(prefix)\r\n # file_list = list_files(client,source_bucket_name,prefix)\r\n # for file in file_list:\r\n # #print(file)\r\n json_read = read_s3_file(source_bucket_name, source_key)\r\n data = json.loads(json_read)\r\n #print(data)\r\n if data != '':\r\n record_dict = {k.lower(): v for k, v in data.items()}\r\n print(\"Record_Dict::\",record_dict)\r\n event_type_param = {}\r\n event_type_list = athena_table_params.keys()\r\n print(\"event_type_list\",event_type_list)\r\n for key in event_type_list:\r\n print(\"key\",key)\r\n if key in source_key:\r\n print(\"key\",key)\r\n event_type_param = athena_table_params[key]\r\n print(event_type_param)\r\n if \"changeeventheader\" in record_dict:\r\n if record_dict[\"changeeventheader\"][\"changeType\"] == \"CREATE\":\r\n #and record_dict[\"dtpc_affiliate__c\"] == 'US':\r\n recordid_create = record_dict[\"changeeventheader\"][\"recordIds\"][0]\r\n print(recordid_create)\r\n if recordid_create != '':\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace(\":\",\".\")\r\n create_json = json.dumps(record_dict)\r\n final_json = create_json\r\n file_name = recordid_create + \"-create-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, Key=final_source_key)\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n elif record_dict[\"changeeventheader\"][\"changeType\"] == \"UPDATE\":\r\n record_ids_list = record_dict[\"changeeventheader\"][\"recordIds\"]\r\n if len(record_ids_list) != 0:\r\n for ele in record_ids_list:\r\n print(ele)\r\n element = \"'\" + ele + \"'\"\r\n payload_condition = event_type_param[\"recordid_condition\"]\r\n query = 'SELECT * FROM '+event_type_param[\"athena_create_table\"]+\\\r\n ' WHERE lastmodifieddate IN(SELECT max(lastmodifieddate) from '\\\r\n +event_type_param[\"athena_create_table\"]+\\\r\n ', UNNEST(\"'+payload_condition[0]+'\".\"'+payload_condition[1]+\\\r\n '\") AS ln(jsondata) WHERE jsondata IN ('+element+'));'\r\n print(query)\r\n athena_query_param['athena_query'] = query\r\n query_result_record_id = athena_helper.perform_athena_search\\\r\n (athena_query_param)\r\n print(\"Athena Query Result for Create Path:::\", query_result_record_id)\r\n update_json = create_complete_payload(data,query_result_record_id)\r\n print(\"update_json: \",update_json)\r\n if len(update_json) != 0:\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace\\\r\n (\":\",\".\")\r\n final_json = json.dumps(update_json)\r\n file_name = ele + \"-update-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, \\\r\n Key=final_source_key)\r\n else:\r\n print(ele,\" does not have a create payload\")\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n else:\r\n raise Exception(\"ChangeEventHeader is missing: \", record_dict)\r\n else:\r\n raise Exception(\"Invalid Payload: \", record_dict)\r\n\r\n except (Exception) as err: # pylint: disable=line-too-long,broad-except\r\n print(\"Error occured: {0}\".format(str(err)))\r\n audit_type = \"error\"\r\n error_msg = sys.exc_info()\r\n exc_type = error_msg\r\n exc_obj = error_msg\r\n snow_params[\"flag\"] = \"FAIL\"\r\n snow_params[\"error_message\"] = str(exc_obj)\r\n snow_params[\"error_type\"] = str(exc_type)\r\n audit_config[\"exception_message\"] = str(exc_obj)\r\n if audit_config != {}:\r\n logging.exception(sys.exc_info())\r\n audit_helper.invoke_edb_abc_log_process_status_event(\r\n audit_type, audit_config) # pylint: disable=line-too-long\r\n audit_helper.raise_snow_incident(snow_params)", "def build_payload():\r\n payload = json.dumps({\"method\": \"ListActivePairedVolumes\",\r\n \"params\": {}, \"id\": 1})\r\n return payload", "def lambda_handler(*_):\n\n # Boto is always available in AWS lambda, but may not be available in\n # standalone mode\n import boto3\n\n # To generate the encrypted values, go to AWS IAM Keys and Generate a key\n # Then grant decryption using the key to the IAM Role used for your lambda\n # function.\n #\n # Use the command `aws kms encrypt --key-id alias/<key-alias> --plaintext <value-to-encrypt>\n # Put the encrypted value in the configuration dictionary below\n encrypted_config = {\n 'slack_token': '<ENCRYPTED VALUE>',\n 'pager_duty_token': '<ENCRYPTED VALUE>'\n 'pager_duty_domain_prefix': '<ENCRYPTED VALUE>'\n }\n\n kms = boto3.client('kms')\n config = {x: kms.decrypt(CiphertextBlob=b64decode(y))['Plaintext'] for x, y in encrypted_config.iteritems()}\n return SlackOnCall(**config).run()", "def body(self):\n return json.dumps(self.data, cls=ServerlessJsonEncoder)", "def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }", "def lambda_handler(event, context):\n logging.info(\"Received event: \" + json.dumps(event, indent=2))\n request_type = event['RequestType']\n if request_type == 'Create':\n attach_policy(event, context)\n elif request_type == 'Delete':\n detach_policy(event, context)\n elif request_type == 'Update':\n update_policy(event, context)", "def __init__(self, payload):\n self.payload = payload", "def payload(self) -> dict:\n return {\n 'event_name': '',\n\n }", "def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass", "def lambda_handler(event, context):\n for record in event[\"Records\"]:\n arn = record[\"Sns\"][\"TopicArn\"].split(\":\")\n message = json.loads(record[\"Sns\"][\"Message\"])\n message_handler(arn[3], message)\n return ''", "def get_payload(self):\n return {'message': 'bar'}", "def payload(self, payload):\n\n self._payload = payload", "def process(self, payload, status_code=0):", "def event(event, context):\n# Sample event:\n #\n # _event = { \"Records\":[\n # {\n # \"eventVersion\":\"2.1\",\n # \"eventSource\":\"aws:s3\",\n # \"awsRegion\":\"us-east-1\",\n # \"eventTime\":\"2021-10-14T07:40:55.113Z\",\n # \"eventName\":\"ObjectCreated:Put\",\n # \"userIdentity\":{\n # \"principalId\":\"AWS:AROA6L2YJX2JCJYHEJ4UI:serverless-image-processing-test-create\"\n # },\n # \"requestParameters\":{\n # \"sourceIPAddress\":\"94.140.8.209\"\n # },\n # \"responseElements\":{\n # \"x-amz-request-id\":\"7CJHSGZ9MZF9995F\",\n # \"x-amz-id-2\":\"X5OtpRb+P9CuYKDHvjT8z9prnqqsH1yatZchN2uw8/158mcRUVhQNSW/z5ffXLqkLhu+4Kc163vZiRgVk3XaGd8H1NhZCu8N\"\n # },\n # \"s3\":{\n # \"s3SchemaVersion\":\"1.0\",\n # \"configurationId\":\"9b8f4135-35d4-4e07-b8a5-7d68cc95870b\",\n # \"bucket\":{\n # \"name\":\"serverless-image-processing-test-serverless-image-processing\",\n # \"ownerIdentity\":{\n # \"principalId\":\"A5IHQSLNTJKZN\"\n # },\n # \"arn\":\"arn:aws:s3:::serverless-image-processing-test-serverless-image-processing\"\n # },\n # \"object\":{\n # \"key\":\"test/6e7ef3f0-dcb6-4db6-9518-3bc6ec0ba492\",\n # \"size\":116716,\n # \"eTag\":\"f04e70e100f653a0e67f32f6098dea1c\",\n # \"sequencer\":\"006167DF06C888A626\"\n # }\n # }\n # }\n # ]\n # }\n\n logger.debug('event: {}'.format(event))\n for record in event['Records']:\n processRecord(record)\n\n return {'statusCode': httplib.ACCEPTED}", "def inner(fn_inner):\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler", "def _get_payload(self, method, **params):\n try:\n payload = params['data']['payload']\n if self.prettyprint:\n payload = \"\\n\" + json.dumps(json.loads(payload),\n indent=self.indent)\n except KeyError:\n payload = \"N/A\" if method == \"Event Channel Message\" else \"None\"\n return payload", "def payload(self) -> dict:\n return {\n # TBD\n }", "def lambda_handler(event, context):\n # Environmental Variables\n CATALOG_BRANCHES_TABLE = anejocommon.set_env_var('CATALOG_BRANCHES_TABLE')\n PRODUCT_INFO_TABLE = anejocommon.set_env_var('PRODUCT_INFO_TABLE')\n S3_BUCKET = anejocommon.set_env_var('S3_BUCKET')\n\n # Loop through event records\n try:\n event_records = event['Records']\n except KeyError:\n event_records = [{'body': event}]\n\n for record in event_records:\n try:\n catalog_sync_info = json.loads(record['body'])\n except TypeError:\n catalog_sync_info = record['body']\n\n # Event Variables\n catalog_url = catalog_sync_info['catalog_url']\n\n apple_bucket_catalog_path = anejocommon.get_path_from_url(\n catalog_url,\n 'html',\n append_to_path='.apple'\n )\n \n catalog = anejocommon.retrieve_url(catalog_url)\n try:\n catalog_plist = plistlib.readPlistFromBytes(catalog.data)\n except plistlib.InvalidFileException:\n print(\"ERROR: Cannot read catalog plist\")\n return\n\n # Write our local (filtered) catalogs\n anejocommon.write_local_catalogs(\n apple_bucket_catalog_path,\n catalog_plist,\n S3_BUCKET,\n CATALOG_BRANCHES_TABLE,\n PRODUCT_INFO_TABLE\n )", "def build_payload(self, **kwargs):\n\n return None", "def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)", "def lambda_handler(event, context):\n logging.info(event)\n current_time = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n is_conversation_result = 'Details' in event\n if is_conversation_result:\n combine_bot_state_to_s3(event, current_time)\n else:\n save_bot_state_to_s3(event, current_time)\n\n # Generate response back to bot\n response = dict()\n if not is_conversation_result:\n response = {\n 'dialogAction': {\n 'type': 'Delegate',\n 'slots': event['currentIntent']['slots']\n }\n }\n logging.info(response)\n return response", "def aws_lambda(f):\n def wrapper(event, context):\n logger = logging.getLogger(__name__)\n try:\n # if no return value is given by wrapped func,\n # return default status code 200 response.\n r = f(event, context)\n if r is None:\n r = {\n 'statusCode': 200,\n 'body': json.dumps({'input': event})\n }\n return r\n except Exception as e:\n # if exception is thrown, log exception,\n # return exception text,\n # and return status code associated with passed\n # exception type\n logger.info(\n 'Call to {} resulted in exception'.format(f.__name__), e)\n exc_type = type(e)\n # get exception type for code lookup and msg\n if exc_type is type:\n exc_type = e\n msg = e.__name__\n else:\n msg = str(e)\n # get default exception code for raised Exception.\n # default to code 500 if exception is not in codes dict.\n code = codes.get(exc_type, DEFAULT_EXCEPTION_CODE)\n return {\n 'statusCode': code,\n 'body': json.dumps({'input': event, 'message': msg})\n }\n\n wrapper.__name__ = f.__name__ + '_wrapper'\n return wrapper", "def lambda_handler(event, context):\n print 'Received event: ' + json.dumps(event, indent=2)\n print \"Context log stream: \"+ context.log_stream_name\n\n try:\n filename = get_latest_agent_filename()\n download_agent_if_missing(filename)\n prepare_agent_input_data(event, context)\n run_agent(filename)\n\n except URLError as ex:\n print 'Error: ', ex", "def lambda_handler(event, context):\n\n client = boto3.client('events')\n event_to_put = {\n \"source\": \"aws-lambda-function\"\n }\n event_to_put.update(**event)\n try:\n response = client.put_events(\n Entries=[\n {\n 'Source': 'learn.eventbridge',\n 'Detail': json.dumps(event_to_put),\n 'DetailType': 'Learning Eventbridge',\n 'EventBusName': 'default'\n },\n ]\n )\n return {\n 'statusCode': 200,\n 'body': json.dumps('Event has been put on event bus successfully.')\n }\n except Exception as ex:\n return {\n 'statusCode': 500,\n 'body': json.dumps(str(ex))\n }", "def lambda_handler(event, context):\n input = event[\"queryStringParameters\"][\"input\"]\n completed_interpretation = subprocess.run(\n [\"./esco\", \"--quiet\", \"--type\", \"ws\", \"baudelaire.ws\"],\n text=True,\n encoding=\"ascii\",\n input=input,\n stdout=subprocess.PIPE)\n\n # Discard the first two lines of the output (they contain the message\n # \"Enter a word and press Enter:\" and then an empty line).\n trimmed_output = completed_interpretation.stdout.split(\"\\n\", 2)[2]\n\n return {\n \"statusCode\": 200,\n \"headers\": {\"Access-Control-Allow-Origin\": \"*\"},\n \"body\": trimmed_output,\n }", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n print(event)\n method=event['httpMethod']\n print(f\"method={method}\")\n print(f\"table_name={table_name}\")\n myTriggerType='instrument_price'\n\n \n if method == \"DELETE\":\n #path=event['path']\n trigger_id=event['pathParameters']['trigger_id']\n print(f\"triggerId={trigger_id}\")\n\n try:\n #see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Table.delete_item\n response = table.delete_item(\n Key={'PK':f\"TR#{myTriggerType}#{trigger_id}\", \"SK\":f\"TR#{myTriggerType}#{trigger_id}\"},\n ConditionExpression=And(Attr('PK').eq(Attr('SK')),Attr('triggerType').eq(myTriggerType)),\n )\n except ClientError as e:\n print(f\"clientError={e}\")\n if e.response['Error']['Code']=='ConditionalCheckFailedException':\n return iftttError(404,\"item not found\")\n raise\n print(f\"response={response}\")\n return {\n \"statusCode\": 200,\n \"body\":\"\",\n }\n \n elif method == \"POST\":\n body=json.loads(event['body'])\n trigger_id=body['trigger_identity']\n print(f\"triggerId={trigger_id}\")\n\n response = table.get_item(\n Key={'PK':f\"TR#{myTriggerType}#{trigger_id}\", \"SK\":f\"TR#{myTriggerType}#{trigger_id}\"},\n ProjectionExpression=\"triggerEvents, triggerType\",\n )\n print(f\"response={response}\")\n\n if \"Item\" not in response:\n #brand new \n print(f\"inserting {trigger_id}\")\n if 'triggerFields' not in body:\n return iftttError(400, \"triggerFields missing from request\")\n triggerFields=body['triggerFields']\n #todo validate trigger fields\n try:\n response = table.put_item(\n Item={\n 'PK':f\"TR#{myTriggerType}#{trigger_id}\", \n \"SK\":f\"TR#{myTriggerType}#{trigger_id}\",\n 'triggerId': trigger_id,\n #hacky string way to avoid having multiple columns\n 'triggerFields': json.dumps(triggerFields),\n 'triggerType': myTriggerType,\n },\n ConditionExpression=Or(Attr('triggerType').eq(myTriggerType),Attr('triggerType').not_exists())\n )\n except ClientError as e:\n print(f\"clientError={e}\")\n #somehow got created with someone elses triggerType\n if e.response['Error']['Code']=='ConditionalCheckFailedException':\n return iftttError(404,\"item not found\")\n raise\n print(\"response \",response)\n triggered=[]\n elif response['Item'].get(\"triggerType\",myTriggerType) != myTriggerType:\n #it exists but it is someone elses\n return iftttError(404,\"item not found\")\n else:\n item=response['Item']\n print(f\"found {item} \")\n #hacky string way to avoid having multiple columns\n #TODO: change this to use a Map? (will allow to add without overwrite)\n events = json.loads(item.get(\"triggerEvents\",\"[]\"))\n triggered= []\n for event in events:\n #TODO: implement limit (not needed now becasue I expect only up to one events)\n triggered.append(event['data'])\n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"data\": triggered,\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n else :\n return iftttError(400, f\"unexpected httpMethod {method}\")", "def payload(self) -> \"dict\":\n return self._attrs.get(\"payload\")", "def handler(context, event):\n\n if _ensure_str(event.trigger.kind) != 'http' or _invoked_by_cron(event):\n body = event.body.decode('utf-8')\n context.logger.info('Received event body: {0}'.format(body))\n\n # serialized record\n serialized_record = json.dumps({\n 'body': body,\n 'headers': {\n _ensure_str(header): _ensure_str(value)\n for header, value in event.headers.items()\n },\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n })\n\n # store in log file\n with open(events_log_file_path, 'a') as events_log_file:\n events_log_file.write(serialized_record + ', ')\n\n else:\n\n # read the log file\n try:\n with open(events_log_file_path, 'r') as events_log_file:\n events_log_file_contents = events_log_file.read()\n except IOError:\n events_log_file_contents = ''\n\n # make this valid JSON by removing last two chars (, ) and enclosing in [ ]\n encoded_event_log = '[' + events_log_file_contents[:-2] + ']'\n\n context.logger.info('Returning events: {0}'.format(encoded_event_log))\n\n # return json.loads(encoded_event_log)\n return encoded_event_log", "def _dispatch_json(\n self,\n action: str, # get, post, put, delete\n url: str,\n payload: Optional[dict] = None,\n file_list: Optional[list] = None,\n ) -> dict:\n result = self._dispatch(action, url, payload, file_list)\n return json.loads(result)", "def execute(self, **payload):\n pass", "def to_json(self) -> Dict[str, Any]:\n raise Exception(\"Attempted to convert an anonymous Action to JSON\")", "def lambda_handler(event, context):\n\n markdown_files = sys.argv[1:]\n logging.info(f'Markdown files to clean = {markdown_files}')\n for file_to_clean in markdown_files:\n clean_file(file_to_clean)\n print(json.dumps(event))\n body = event['body']\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world from aws.\",\n \"commit\": body\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def lambda_handler(event, context):\n #print(\"event.session.application.applicationId=\" + event['session']['application']['applicationId'])\n\n #if event['session']['new']:\n # on_session_started({'requestId': event['request']['requestId']},event['session'])\n \n intent = None\n try:\n intent = Intent(**event)\n return handle_intent(intent)\n except Exception as ex:\n err = traceback.format_exc()\n print(err)\n return error_handler_func(intent,msg=str(err))", "def _wrap_handler(self, handler, body):\n try:\n decoded_body = json.loads(body)\n result = yield handler(decoded_body)\n return result\n except Exception as e:\n return {\"error\": str(e)}", "def on_create(self, payload):\n pass", "def lambda_handler(event=None, context=None):\n logger.info('Lambda function invoked index()')\n\n # Get configuration from environment\n file_name_default = os.environ.get('FILE_NAME') or \"bamboo_employees\"\n api_key = os.environ.get('BAMBOO_TOKEN')\n url = os.environ.get('BAMBOO_API')\n\n # Parameters, take a file name if exists and remove it from the dict\n file_name = request.args.get('file_name') or file_name_default\n\n try:\n # Request data from Bamboo API\n headers = {'Accept': 'application/json'}\n auth = HTTPBasicAuth(api_key, 'x')\n response = requests.get(url=url, headers=headers, auth=auth)\n\n if response.status_code != requests.codes.ok:\n raise Exception('Error making the request to Bamboo\\r\\n')\n\n # Check the result\n result = json.loads(response.text)\n if 'employees' in result:\n # Generate the GeoJSON from API response\n employees = []\n for employee in result['employees']:\n # Bamboo does not provide explicit locations\n point = Point(None, None)\n employees.append(Feature(geometry=point, properties=employee))\n\n # Produce a GeoJSON Feature collection\n body = json.dumps(FeatureCollection(employees))\n attachment = 'attachment; filename={0}.json'.format(file_name)\n\n # Create a response with the proper headers\n # CARTO will use the file name property as the table name\n response = make_response(body)\n response.headers['Content-Type'] = 'application/json'\n response.headers['Content-Disposition'] = attachment\n else:\n raise Exception('No photos on your request')\n\n return response\n\n except Exception as e:\n response = make_response(e.message + \"\\r\\n\")\n response.headers['Content-Type'] = 'text/plain'\n response.status_code = 500\n return response", "def append_json(self, obj: Any, headers: Optional[MultiMapping[str]] = ...) -> Payload:\n ...", "def jwt_encode_handler(payload):\n\n return jwt.encode(\n payload,\n api_settings.JWT_SECRET_KEY,\n api_settings.JWT_ALGORITHM\n ).decode('utf-8')", "def _build_payload(self, body: Dict) -> Dict[str, Any]:\n return {'jsonrpc': '2.0',\n 'id': self._id_count,\n **body}", "def lambda_handler(event, context):\n params = parse_qs(event['body'])\n token = params['token'][0] if 'token' in params else ''\n\n if not verify_slack_token(token):\n logger.error(\"Request token (%s) does not match expected token\", token)\n return lambda_response(Exception('Invalid request token'))\n\n return gauges_app(params)", "def dumps_json(function):\n def f(*args, **kwargs):\n return json.dumps(function(*args, **kwargs))\n return f", "def serialize(self, content):\r\n content = super(JSONPTemplateEmitter, self).serialize(content)\r\n callback = self.request.GET.get('callback', 'callback')\r\n return '%s(%s)' % (callback, content)", "def _send(self, payload):\n return payload", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n #if (event['session']['application']['applicationId'] != \"<APPLICATION_ID>\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])" ]
[ "0.6537401", "0.6435479", "0.6433545", "0.6389464", "0.63662946", "0.63558084", "0.6291786", "0.6266016", "0.6230665", "0.6211438", "0.62085325", "0.62069964", "0.6166684", "0.6158678", "0.61519873", "0.61504626", "0.61504626", "0.61433136", "0.61325645", "0.6110487", "0.6093063", "0.60631365", "0.60375714", "0.6004712", "0.5986585", "0.5983808", "0.5979236", "0.59614736", "0.5931828", "0.5916565", "0.58867884", "0.58647084", "0.5851088", "0.5839614", "0.5822403", "0.5801316", "0.5752273", "0.5738515", "0.57339835", "0.57125425", "0.57054687", "0.57049215", "0.5697259", "0.5682163", "0.5672458", "0.5666686", "0.56562185", "0.56526953", "0.5641862", "0.56357884", "0.56342924", "0.5581171", "0.5573289", "0.5535858", "0.5526098", "0.5525876", "0.5506696", "0.550109", "0.54967475", "0.5492604", "0.54899275", "0.5489767", "0.5481654", "0.54715586", "0.5468634", "0.5463718", "0.54413235", "0.54097813", "0.54064816", "0.540588", "0.5404118", "0.5400371", "0.5395709", "0.53944707", "0.5394124", "0.53941196", "0.53859323", "0.53762656", "0.5369477", "0.5366468", "0.5362872", "0.53525406", "0.5346742", "0.5328357", "0.5324827", "0.5324255", "0.5322641", "0.5289184", "0.5280226", "0.52790916", "0.5274162", "0.5273418", "0.52733576", "0.52696496", "0.5264568", "0.52623194", "0.52608144", "0.5243123", "0.5240586", "0.5237544", "0.5229909" ]
0.0
-1
Qualifier (i.e., version) of the lambda function. Defaults to `$LATEST`.
def qualifier(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "qualifier")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def function_version(self) -> str:\n return pulumi.get(self, \"function_version\")", "def get_lambda_latest_version_num(fn_arn: str, region: str) -> int:\n\n client = boto3.client('lambda', region_name=region)\n response = client.list_versions_by_function(FunctionName=fn_arn)\n\n for v in response['Versions']:\n if v['Version'] == '$LATEST':\n latest_hash = v['CodeSha256']\n break\n\n for v in response['Versions']:\n if v['Version'] != '$LATEST' and v['CodeSha256'] == latest_hash:\n return v['Version']", "def get_boto3_version() -> str:\n return boto3_version", "def get_latest_tf_version(include_prerelease: bool = False) -> str:\n return get_available_tf_versions(include_prerelease)[0]", "def get_bucket_versioning(Bucket=None):\n pass", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def get_botocore_version() -> str:\n return botocore_version", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def version():\n from MotionWise.log_proc import __version__ as log_ver\n from MotionWise.pm_measurement import __version__ as pm_ver\n from MotionWise.MotionWise_perf_proxy import __version__ as proxy_ver \n from MotionWise.MotionWise_perf_client import __version__ as client_ver \n \n ver = \"$Revision: 80204 $\".split()[1]\n batch = max([ pm_instrument.version().split('.')[-1], log_ver\n , ver, pm_ver, proxy_ver, client_ver, FP.__version__])\n return \"3.0.{}\".format(batch)", "def schema_version(self) -> str:\n return self._pipeline_definition.get(\"version\")", "def version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"version\")", "def version_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_name\")", "def schema_transformation_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema_transformation_version\")", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def get_trigger_version(uuid: UUID) -> Optional[float]:\n scenario = store.get_scenario(uuid)\n if scenario:\n return scenario.sections['Triggers'].trigger_version\n return None", "def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")", "def version():\n\n pass", "def resource_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_version\")", "def function_region(self) -> str:\n return pulumi.get(self, \"function_region\")", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def runtime_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"runtime_version\")", "def version(self):\n pass", "def version(self):\n pass", "def version(self):\n pass", "def signature(function: model.Function) -> str:\n return str(function.signature)", "def signature_version(self) -> str:\n return self[\"Sns\"][\"SignatureVersion\"]", "def get_version_tag(self, version: str) -> str:\n return version", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def get_version() -> str:\n return __version__", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def get_trigger(event):\n\n if \"Records\" in event and event[\"Records\"][0][\"eventSource\"] == \"aws:s3\":\n return \"S3\"\n elif \"queryStringParameters\" in event:\n return \"APIGateway\"\n else:\n return \"eval\"", "def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)", "def pyzmq_version():\n if __revision__:\n return '@'.join([__version__,__revision__[:6]])\n else:\n return __version__", "def pyzmq_version():\n if __revision__:\n return '@'.join([__version__,__revision__[:6]])\n else:\n return __version__", "def version(self) -> str:\n return '0.1'", "def get_current_version(self) -> str:\n raise NotImplementedError()", "def _provided_or_most_recent(self) -> str:\n if self._operator == \">=\" and parse(self._version) <= parse(\n self._reserved_latest_version\n ):\n return f\"{self._reserved_name}=={self._reserved_latest_version}\"\n elif (\n self._operator == \"\"\n and self._version == \"\"\n and self._reserved_latest_version != \"\"\n ):\n return f\"{self._reserved_name}=={self._reserved_latest_version}\"\n return self._raw", "def version(self) -> Optional[pulumi.Input['FhirStoreVersion']]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def qualifier(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"qualifier\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def get_version():\n return 1", "def get_version(self):\n pass", "def probe_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"probe_version\")", "def probe_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"probe_version\")", "def version():\n return __VERSION__", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def version_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_code\")", "def python_branch():\n\n return _sys_version()[2]", "def version(self):\n return self.get_current_version()", "def get_version():\n return \"4.{}\".format(__version__)", "def key_version(self) -> Optional[str]:\n return pulumi.get(self, \"key_version\")", "def get_current_component_version():\n from resource_management.core.exceptions import Fail\n from resource_management.libraries.functions.default import default\n from resource_management.libraries.functions.stack_select import get_role_component_current_stack_version\n from resource_management.libraries.functions.repository_util import CommandRepository\n\n version = default(\"/commandParams/version\", None)\n if not version:\n repository = CommandRepository(default(\"/repositoryFile\", {}))\n if not repository.resolved:\n try:\n version = get_role_component_current_stack_version()\n except (Fail, TypeError):\n pass\n else:\n version = repository.version_string\n\n return version", "def get_version(self):\n return self.cur_config['version']['name']", "def getVersion(self, *args):\n return _libsbml.QualExtension_getVersion(self, *args)", "def version(self):", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def QualExtension_getDefaultVersion():\n return _libsbml.QualExtension_getDefaultVersion()", "def azure_function_endpoint(self) -> Optional[pulumi.Input['EventSubscriptionAzureFunctionEndpointArgs']]:\n return pulumi.get(self, \"azure_function_endpoint\")", "def azure_function_endpoint(self) -> Optional[pulumi.Input['EventSubscriptionAzureFunctionEndpointArgs']]:\n return pulumi.get(self, \"azure_function_endpoint\")", "def version(self):\n raise NotImplementedError('version')", "def type_version_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type_version_arn\")", "def _get_function_path_from_list_functions_endpoint(self, function):\n if 'function_scope' in function and function['function_scope']['bucket'] != '*':\n return f\"{function['function_scope']['bucket']}/{function['function_scope']['scope']}/{function['appname']}\"\n return function['appname']", "def python_revision():\n return _sys_version()[3]", "def version(self, newVersion=None):\n pass", "def set_version(self, version=None, version_fun: Callable[[], str] = None):\n def version_compute():\n fun = version_fun\n if fun is None:\n fun = default_version_hash\n\n if version is None:\n return fun()\n else:\n return version\n\n self.version = version_compute\n return self", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")" ]
[ "0.6641541", "0.63990086", "0.52345157", "0.5183876", "0.51753867", "0.5168743", "0.51431197", "0.50533974", "0.5019931", "0.50056404", "0.49792406", "0.49541116", "0.4951745", "0.490851", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.49036467", "0.4903161", "0.48988935", "0.4890315", "0.4886417", "0.4873394", "0.48538014", "0.48514664", "0.4850911", "0.4850911", "0.4850911", "0.48409578", "0.4823517", "0.48225918", "0.48151314", "0.48151314", "0.48133403", "0.48089305", "0.48084554", "0.48084554", "0.48036826", "0.48036826", "0.48036826", "0.48036826", "0.48022097", "0.47984475", "0.4796625", "0.4796625", "0.47891447", "0.477903", "0.47718516", "0.47576052", "0.47521162", "0.47521162", "0.47521162", "0.47521162", "0.47521162", "0.4746952", "0.47405112", "0.47405112", "0.47405112", "0.4740011", "0.47340423", "0.47307068", "0.47307068", "0.47284085", "0.4721715", "0.47151256", "0.4708749", "0.47007853", "0.46871963", "0.46798432", "0.46797127", "0.4674949", "0.4669254", "0.46685246", "0.4667397", "0.4667397", "0.4667397", "0.46640718", "0.4659265", "0.4659265", "0.4652548", "0.4649648", "0.46463662", "0.46463326", "0.46406093", "0.46386957", "0.4637632", "0.4637632", "0.4637632" ]
0.4862319
35
String result of the lambda function invocation.
def result(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "result")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def get_lambda_value(lambda_node):\n return get_call_value(lambda_node.body)", "def function(self) -> str:\n return pulumi.get(self, \"function\")", "def __str__(self):\n header = [\n ' ObjectiveFunction:']\n header += [('Function: {}').format(self.func.__name__)]\n header += [('Objective: {}').format(self.objective)]\n return ('\\n').join(header) + '\\n'", "def function(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"function\")", "def __call__(self, the_instance, *args, **kwargs):\n return str(self._function(the_instance, *args, **kwargs))", "def fn(self):\n return \"Hello\"", "def task_6_insert_function_result_into_string(func: Callable):\n return f'start {func()} finish'", "def _generateLambda(self, string):\n derivation = self.fieldNames.sub(r'parent.getSampleValue(stats, \"\\1\")',\n string)\n return lambda stats, parent: eval(derivation)", "def get_call_string(self) -> Optional[str]: # noqa\n call_repr = get_call_string(self.func_name, self.args, self.kwargs, max_length=75)\n return call_repr", "def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result", "def lambda_func_doc(self, label):\n latex = (\n r'0 = \\lambda - '\n r'\\frac{\\dot{m}_\\mathrm{air}}{\\dot{m}_\\mathrm{air,min}}'\n )\n return generate_latex_eq(self, latex, label)", "def to_lambda_output(self):\n resp = {\n 'statusCode': self.status_code,\n 'body': self.body,\n 'headers': self.headers\n }\n\n return resp", "def callback( context ):\n return '<tag>{}</tag>'.format( function( context ) )", "def __str__(self):\n return str(self._event)", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def __call__( self, *args, **kwargs ):\n\n return self.__str__( )", "def fn(self):\n return self._fn", "def lambda_eval(v):\n return v() if hasattr(v, '__call__') else v", "def __str__(self):\n return self.function_representation", "def __repr__(self):\n return ('ObjectiveFunction({}, {})').format(self.func.__name__, self.objective)", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def __repr__(self):\n result = [\n self.__class__.__name__,\n '(func = ',\n repr(self.func),\n ', timeout = ',\n repr(self.timeout),\n ]\n \n cached = self.cached\n if (cached is not ...):\n result.append(' cached = ')\n result.append(repr(cached))\n \n result.append(')')\n \n return ''.join(result)", "def to_string(self):\r\n return self.command()", "def _(self, node: Call):\n\n args = []\n for n in node.arguments:\n args.append(self.visit(n))\n\n func_args = \" \".join(args)\n\n return f\"( call {node.func.name} {func_args} )\"", "def __str__(self):\n names = [self.name]\n names += [_callable_name(transform) for transform in self.transforms]\n return ' | '.join(names) + f' -> {self.shape} {self.dtype}'", "def lambda_method(self,t): \n return 5*math.sin(2*math.pi*1*t) # I don't see the value of 1 here but this is how lamda is defined in the exercise.", "def __repr__(self) -> str:\n return f\"<Function[{self.name}](line:{self.line})>\"", "def invoke_arn(self) -> str:\n return pulumi.get(self, \"invoke_arn\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def process_target(self):\n assert isinstance(self.target, str)\n return f\"%{super().process_target()}%\"", "def debug():\n def _debug(x):\n return e.String(x.as_source())\n yield (\"(λ any . str)\", _debug)", "def __str__(self):\r\n return self.__call__()", "def __repr__(self):\n return \"{0}(callable_obj = {1})\".format(self.__class__.__name__, repr(self._callable))", "def lambda_output(self) -> Optional[pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationOutputLambdaOutputArgs']]:\n return pulumi.get(self, \"lambda_output\")", "def get_function_name(self):\n return self.__function", "def invoke_url(self) -> pulumi.Output[str]:\n return self.stage.invoke_url # type: ignore[no-any-return]", "def command_string(func, targets, sources, kwds):\n args= [repr(targets[0])] if len(targets) == 1 \\\n else [] if not targets else [repr(targets)]\n if sources:\n args.append(repr(sources[0]) if len(sources) == 1\n else repr(sources))\n if kwds:\n args.append(', '.join(['{}={}'.format(k, repr(v))\n for k, v in kwds.items()]))\n return '{}({})'.format(func.__name__, ', '.join(args))", "def eval(self, expression: str) -> str:\n ret = self.exec_(\"print({})\".format(expression))\n ret = ret.strip()\n return ret", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(UmbrellaSampling.key, self.x0, self.kf, self.n_upd)\n\n return strme", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(TransitionMatrix.key, self.nout, self.n_upd, self.mode)\n\n return strme", "def a_function_of_mine():\n return 'result'", "def event(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"event\")", "def f_val_to_str(self):\n old_locked = self._locked\n try:\n return repr(self.f_get())\n except Exception:\n return \"No Evaluation possible (yet)!\"\n finally:\n self._locked = old_locked", "def string(self):\n return f'y = {self.a.item()}'", "def get_trace_string(self):\n return (\"%s -> %s(0x%s) addr:0x%s\" %\n (self.instr_str, self.rd, self.rd_val, self.addr))", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def result_stdout(result):\n return result[1][0]", "def as_string (self) :\n\n if self.is_machinetag() :\n return \"%s:%s=%s\" % (self.namespace(), self.predicate(), self.value())", "def value(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def __str__(self) -> str:\n st = \"<Output> \"\n if self.inst_out:\n st += f'instance:{self.inst_out};'\n st += f'''{self.output} -> {self.target or '\"\"'} -> '''\n if self.inst_in:\n st += f\"instance:{self.inst_in};\"\n st += self.input\n\n if self.params and not self.inst_in:\n st += f\" ({self.params})\"\n if self.delay != 0:\n st += f\" after {self.delay} seconds\"\n if self.times != -1:\n st += \" (once only)\" if self.times == 1 else f\" ({self.times!s} times only)\"\n return st", "def getCallable():", "def __str__(self):\n return self.result", "def result(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"result\")", "def func_hash(self) -> str:\n\n return self.call_data[:10]", "def log_function_code(func_to_log: Callable) -> str:\n if not callable(func_to_log):\n TypeError(f\"Parameter 'func_to_log' is not function. Actual value: {func_to_log}.\")\n function_definition = inspect.getsource(func_to_log)\n if function_definition.startswith(\"return \"):\n function_definition = function_definition[7:]\n return repr(function_definition.strip())", "def get_result(self, obj):\n return str(obj)", "def get_result(self, obj):\n return str(obj)", "def actionString(self,action):\n return str(self._mdp.A[action])", "def __repr__(self):\n\t\treturn self.func.__doc__", "def __str__(self) -> str:\n # The default str() for Function includes the arity, which is redundant\n # here. Just use the symbol's name.\n root_str = self.root.name\n children_str = ', '.join(str(child) for child in self.children)\n return f'{root_str}({children_str})'", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __call__(self, token_received, **kwargs) -> str:\n print(token_received, flush=True, end=\"\")\n return token_received", "def log_stdout(self, function):\n return function()", "def get_current_value(self) -> str:\n if isinstance(self._function_args, dict):\n # noinspection PyCallingNonCallable\n value = self._value_function(**self._function_args)\n else:\n # noinspection PyCallingNonCallable\n value = self._value_function(*self._function_args)\n\n if callable(self._formatter):\n formatted_value = self._formatter(value)\n elif isinstance(self._formatter, str):\n formatted_value = self._formatter.format(value = value)\n else:\n formatted_value = str(value)\n\n return formatted_value", "def __str__(self):\n return ' '.join([self.source, self.name, str(self.outputs)])", "def __str__(self):\n return str((self.instruction_pointer, self.program,))", "def lambda_rad(self):\n InputFile = self('Meta','InputFile').decode(\"utf-8\")\n d_InputFile = dict([item.replace(' ','').split('=') for item in InputFile.splitlines() if '=' in item])\n if 'lambda' in d_InputFile:\n return float(d_InputFile['lambda'])\n else:\n return self.lambdaref", "def lambda_handler(event, context):\n\n # Log the values received in the event argument\n logger.info(f'Request event: {event}')\n\n # Define default hard-coded return values\n response = {\n 'uid': 'Example function ID',\n 'return_val01': 'Return value #1',\n 'return_val02': 'Return Value #2',\n }\n\n # Retrieve type of invocation (GET, PUT, etc.)\n if 'http_verb' in event:\n operation = event['http_verb'].upper()\n if operation == 'PUT':\n # Return the values passed to the function\n response = {\n 'uid': event['functionID'],\n 'return_val01': event['parameters']['parm01'],\n 'return_val02': event['parameters']['parm02'],\n }\n\n logger.info(f'Response={response}')\n return response", "def __str__(self):\n s = self.prev_error.failures + '\\n' if self.prev_error else ''\n\n s += '%s' % self.message\n if self.args[1:]:\n s += ' %s' % str(self.args[1:])\n\n for task in self.tasktrace:\n s += '\\n in %s %s' % (task.task.__name__, task.name)\n return s", "def task_calc():\n return 'What is the result of the expression?'", "def _log_str(self):\n return (\n \"[name: {}, id: {}]\"\n .format(self._raw['Name'] if self._raw else \"<not retrieved>\", self._id)\n )", "def write(self):\n return self.expr.lhs.base.function", "def code(self) -> str:\n input_names = {id: f\"arg_{i + 1}\" for i, id in enumerate(self.role.input_placeholder_ids)}\n output_names = {id: f\"out_{i + 1}\" for i, id in enumerate(self.role.output_placeholder_ids)}\n state_names = {\n ph.id.value: f\"state_{i + 1}\" for i, ph in enumerate(self.role.state.state_placeholders)\n }\n var_names = {**input_names, **output_names, **state_names}\n\n out = f\"def {self.name}(\"\n out += \", \".join([var_names[id] for id in self.role.input_placeholder_ids])\n out += \"):\\n\"\n for action in self.role.actions:\n out += f\" {action.code(var_names)}\\n\"\n\n out += \" return \"\n out += \", \".join([var_names[id] for id in self.role.output_placeholder_ids])\n\n return out", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __str__(self):\n if not six.PY3:\n return unicode(self.args[0]).encode('utf-8')\n\n return self.args[0]", "def __str__(self):\n iso_time = str(datetime.datetime.fromtimestamp(self.next_time))\n return \"<Job(%s, %ss, %s)>\" % (iso_time, self.interval, self.func)", "def view_function(self,v):\n return v", "def name(self):\n\t\treturn self._func_name", "def source_code(self):\n return str(self.source)", "def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)" ]
[ "0.6376477", "0.62443215", "0.62443215", "0.6056641", "0.6049122", "0.6044755", "0.5996724", "0.59543884", "0.59541714", "0.5934549", "0.5891712", "0.58881766", "0.57815313", "0.5733773", "0.5635553", "0.56257564", "0.5584697", "0.55613136", "0.55607224", "0.5553068", "0.55495644", "0.5543365", "0.55380404", "0.551859", "0.549501", "0.549501", "0.549501", "0.5476485", "0.5471808", "0.5453838", "0.54481906", "0.54336894", "0.54287314", "0.54227144", "0.54073226", "0.54073226", "0.5398902", "0.53926", "0.5381694", "0.53717375", "0.5360924", "0.53583896", "0.53533846", "0.5349705", "0.53450173", "0.5338062", "0.5335678", "0.53250545", "0.5315885", "0.53026485", "0.5291306", "0.5278696", "0.52753067", "0.52753067", "0.52753067", "0.52753067", "0.52753067", "0.52753067", "0.52753067", "0.52753067", "0.5261126", "0.5257128", "0.5256985", "0.52492", "0.5238248", "0.5234882", "0.523104", "0.5228466", "0.5225178", "0.5221498", "0.52174294", "0.52174294", "0.5216899", "0.52159137", "0.52122825", "0.52117705", "0.52117705", "0.52117705", "0.5206339", "0.520441", "0.52008253", "0.5200785", "0.51932096", "0.51844484", "0.51665384", "0.5163916", "0.515897", "0.51506364", "0.51469994", "0.51469976", "0.51446015", "0.51446015", "0.51446015", "0.51446015", "0.51446015", "0.5139537", "0.51381785", "0.51346135", "0.5130365", "0.512967", "0.51271003" ]
0.0
-1
Map of arbitrary keys and values that, when changed, will trigger a reinvocation.
def triggers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: return pulumi.get(self, "triggers")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_dict(new,old):", "def test_dictionary_inplace_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value[4] = 5\r\n assert vm.changed", "def test_dict(self):\n event_cache = []\n\n class A(HasTraits):\n x = EventfulDict({c: c for c in 'abc'})\n a = A()\n a.x.on_events(lambda k, v: event_cache.append('add'), \\\n lambda k, v: event_cache.append('set'), \\\n lambda k: event_cache.append('del'))\n\n del a.x['c']\n # ab\n a.x['z'] = 1\n # abz\n a.x['z'] = 'z'\n # abz\n a.x.pop('a')\n # bz \n\n # Were the correct events captured?\n self.assertEqual(event_cache, ['del', 'add', 'set', 'del'])\n\n # Is the output correct?\n self.assertEqual(a.x, {c: c for c in 'bz'})", "def test_dictionary_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value = {4:5}\r\n assert vm.changed", "def _set_toChange(x):\n for key in list(x.keys()):\n self.toChange[key] = True", "def keys(self, value: Dict[str, ValidKVs]) -> None:\n warnings.warn('This is private, call .clear_keys() and update().', DeprecationWarning, stacklevel=2)\n self.clear_keys()\n self.update(value)", "def __setitem__(self, key, value):\n dict.__setitem__(self, key, value)\n\n self.changed()", "def handle_dict(self, object, name, old, new):\n if old is not Uninitialized:\n unregister = self.next.unregister\n for obj in old.values():\n unregister(obj)\n\n register = self.next.register\n for obj in new.values():\n register(obj)", "def renamed_dict(event):\n\n new_dict = thaw(event.data())\n\n for old, new in list(rename_map.items()):\n new_dict[new] = new_dict.pop(old)\n\n return new_dict", "def on_change(key):\n pass", "def dict_change(binary_dict:dict):\r\n dict_change= {}\r\n for key, value in binary_dict.items():\r\n dict_change[value] = key\r\n return dict_change", "def changed_keys(self):\n return self._changed_keys", "def __setitem__(self, key, value):\n super(ReadOnlyDict, self).__setitem__(key, value)", "def handle_dict_items(self, object, name, old, new):\n self.handle_dict(object, name, new.removed, new.added)\n\n if len(new.changed) > 0:\n # If 'name' refers to the '_items' trait, then remove the '_items'\n # suffix to get the actual dictionary trait.\n #\n # fixme: Is there ever a case where 'name' *won't* refer to the\n # '_items' trait?\n if name.endswith(\"_items\"):\n name = name[: -len(\"_items\")]\n\n dict = getattr(object, name)\n unregister = self.next.unregister\n register = self.next.register\n for key, obj in new.changed.items():\n unregister(obj)\n register(dict[key])", "def construct_kv_dict(self):\r\n key1 = user_state_key('field_a')\r\n key2 = user_state_key('field_b')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def update(self, *args, **kwargs):\n super(ReadOnlyDict, self).update(*args, **kwargs) # pragma: no cover", "def update(self, key, value):\n if key in self.map:\n self.map[key] = value", "def _default_observation_map(self) -> Dict[str, ObservationMapValue]:\n pass", "def __setitem__(self,key,value):\n if key in self.deleted: self.deleted.remove(key)\n if key not in self.changed: self.changed.append(key)\n self.data[key] = value", "def handle_dict_items(self, object, name, old, new):\n raise NotImplementedError", "def _metrics_update(orig, new):\n revsd = orig\n for k, v in orig.items():\n if not v:\n revsd[k] = new[k]\n elif new[k]:\n if new[k] != v:\n # LOG ME, do something\n print(orig)\n print(new)\n elif not new[k] or v:\n pass\n else:\n raise Exception(\"_metrics_update error\")\n return revsd", "def __getstate__(self):\n return dict(self.items())", "def __getstate__(self):\n return dict(self.items())", "def __setitem__(key, value):", "def data_dict_update(self, change):\n self.data_dict = change['value']", "def rename_dictkey(self, kwargs, old, new):\n x = kwargs.copy()\n x[new] = x.pop(old)\n return x", "def rekey(self, map_or_fn=None, inplace=False, **kw):\n if not inplace:\n return self.copy().rekey(map_or_fn, inplace=True, **kw)\n if map_or_fn is None:\n map_or_fn = kw\n if isinstance(map_or_fn, dict):\n if map_or_fn is not kw:\n map_or_fn.update(kw)\n func = lambda k: map_or_fn.get(k, k)\n else:\n func = map_or_fn\n if not callable(func):\n raise ValueError('`map_or_fn` must be a dict, a callable, or None. Received %s: %s'\n % (str(type(map_or_fn)), str(map_or_fn)))\n keys = self.peys()\n new_keys = keys.apply(func).puniq()\n if len(keys) != len(new_keys):\n raise ValueError('rekey map must return the same number of unique keys as the original pdict. '\n 'Only found %d of %d expected keys.' % (len(new_keys), len(keys)))\n vals = self.palues().uproot()\n self.clear()\n self[new_keys] = vals\n return self", "def rekey(self, map_or_fn=None, inplace=False, **kw):\n if not inplace:\n return self.copy().rekey(map_or_fn, inplace=True, **kw)\n if map_or_fn is None:\n map_or_fn = kw\n if isinstance(map_or_fn, dict):\n if map_or_fn is not kw:\n map_or_fn.update(kw)\n func = lambda k: map_or_fn.get(k, k)\n else:\n func = map_or_fn\n if not callable(func):\n raise ValueError('`map_or_fn` must be a dict, a callable, or None. Received %s: %s'\n % (str(type(map_or_fn)), str(map_or_fn)))\n keys = self.peys()\n new_keys = keys.apply(func).puniq()\n if len(keys) != len(new_keys):\n raise ValueError('rekey map must return the same number of unique keys as the original pdict. '\n 'Only found %d of %d expected keys.' % (len(new_keys), len(keys)))\n vals = self.palues().uproot()\n self.clear()\n self[new_keys] = vals\n return self", "def substitute_keys_in_functions(functions, new_keys):\n for _, func in functions.items():\n func['ret_type'] = new_keys[func['ret_type']]\n substitute_params_keys(func['params'], new_keys)", "def __setitem__(self, key, val):\n for k,v in list(self.__dict__.items()):\n if k == key:\n self.__dict__[key] = val\n return \n print((\"Item %s could not be updated...\" %key))", "def _observe_simple(self):\n return {}", "def __setitem__(self, key, value):\n self._maps[0][key] = value", "def setChanged(self,key):\n if key not in self.data:\n raise ArgumentError(\"No settings data for \"+key)\n if key not in self.changed:\n self.changed.append(key)", "def __setitem__(self, key, val):\n dict.__setitem__(self, key, val)", "def __setitem__(self, key, value):", "def mutate_dict_in_place(func, mapping):\n for key, value in mapping.items():\n if isinstance(value, dict):\n mutate_dict_in_place(func, value)\n else:\n mapping[key] = func(value)", "def dict(self, keys) -> dict:\n return {k: self(k) for k in keys}", "def remap_keys(ds, new_keys):\n logger.info(\"Remapping keys of every element using config:\\n %s\", _dict_to_logstring(new_keys))\n\n def remap_keys(x):\n return {new_keys.get(k, k): v for k, v in x.items() if new_keys.get(k, k) is not None}\n return ds.map(remap_keys, num_parallel_calls=TF_AUTOTUNE)", "def __getstate__(self):\n return {k: v for k, v in self.__dict__.iteritems() if k not in ['x', 'y', '_x', '_y']}", "def update(self, new_values):\n values_copy = new_values.copy()\n for key in self.SET_KEYS:\n if key in values_copy:\n values_copy[key] = set(values_copy[key])\n super(ConfigDict, self).update(values_copy)", "def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)", "def handle_dict(self, object, name, old, new):\n raise NotImplementedError", "def f(map, key):\n def decorator(function):\n map[key] = function\n return function\n return decorator", "def _modkeys(self, dict, mod):\n newdict = {}\n for (k, v) in dict.items():\n newk = k + mod\n newdict[newk] = v\n return newdict", "def __setitem__(self, key, value):\n if key in self.define:\n warnings.warn('Key {} is being overwritten to {}. It had a value of {}. Hope you know what you are doing.'.format(key, value, self.define[key]))\n self.define[key] = value", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __methodDict(cls, _dict):\n baseList = list(cls.__bases__)\n baseList.reverse()\n for _super in baseList:\n __methodDict(_super, _dict)\n for key, value in cls.__dict__.items():\n if type(value) == types.FunctionType:\n _dict[key] = value", "def _reload_values(self):\r\n raise NotImplementedError", "def __setitem__(self, key, val):\n self()[key] = val", "def __setitem__(self, key, value):\n if key not in self.ordered_keys:\n self.ordered_keys.append(key)\n super().__setitem__(key, value)", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def __setitem__(self, key, value):\n self.__dict__[key] = value", "def update_config(original, new):\n for k, v in new.items():\n if isinstance(v, abc.Mapping):\n original[k] = update_config(original.get(k, {}), v)\n else:\n original[k] = v\n return original", "def __setitem__(self,key,value):\n self._register[key] = value\n self._register.sync()", "def handle_sc_event(store, changed_keys, info):\n\n for key in changed_keys:\n SC_HANDLERS[key](key=key, info=info)", "def _reference(self):\r\n return {1:2, \"key1\":\"value1\", \"key2\":(1,2,3)}", "def test_load_updates_dict(self):\n new_dict = {\n 'test_new_key': 'test_new_value',\n 'test_key1': 'new_value',\n }\n self.extension.registration.settings = new_dict\n self.settings.load()\n\n # Should have added test_new_key, and modified test_key1\n self.assertEqual(new_dict['test_new_key'],\n self.settings['test_new_key'])\n self.assertEqual(new_dict['test_key1'], self.settings['test_key1'])\n\n # Should have left test_key2 alone\n self.assertEqual(self.test_dict['test_key2'],\n self.settings['test_key2'])", "def update_keymap(self, new_keymap):\n self.keymap.update(new_keymap)", "def append_event_to_params_dict(self, new_name_and_parameters):\n\n params_dict.update(new_name_and_parameters)", "def testMapUpdate(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap({'step': data_types.BuildStats()})\n with self.assertRaises(AssertionError):\n m.update({1: 2})\n with self.assertRaises(AssertionError):\n m.update(step2=1)\n m.update(step=data_types.BuildStats())\n self.assertEqual(m, {'step': data_types.BuildStats()})", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def set(self, key, value):", "def set(self, key, value):", "def updated_with(orig_dict, *new_values):\n newdict = dict(orig_dict)\n for vals in new_values:\n if vals:\n newdict.update(vals)\n return newdict", "def ENFORCEMENT_FUNCTIONS_AS_MODIFIERS(EnforcerDict):\n\n class Other:\n def __init__(self):\n self.keystring = ''\n self.valuesum = 0\n def add_function(self, items):\n for key, value in items:\n if value % 2 == 0 and key.islower():\n self.valuesum += value\n self.keystring += key\n yield key, value\n # noinspection PyUnreachableCode\n def remove_function(self, items):\n return\n yield\n\n global other\n other = Other()\n\n enforcer_dict = EnforcerDict(\n dict(A=1, b=2, c=3, D=4),\n add_function=other.add_function,\n remove_function=other.remove_function\n )\n\n assert other.keystring == 'b'\n assert other.valuesum == 2\n\n del enforcer_dict['b']\n\n assert other.keystring == 'b'\n assert other.valuesum == 2\n\n return enforcer_dict", "def __setitem__(self, key, value):\n self.other[key] = value", "def update_key(self):\n self.__prev_key = self.__new_key", "def _map___iter__(self):\n return self.iterkeys()", "def change_config(self, changesDict):\n for key in sorted(changesDict.keys()):\n self.configDict.update({key: changesDict.get(key)})", "def update(self,dict):\n for key in list(dict.keys()):\n print(\"Hey, I'm updating something\")\n self.d[key]=dict[key]", "def changed(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, key, value):\r\n self.setdefault(key, []).append(value)", "def _reset_changes(self):\r\n self._original = {}\r\n if self.last_updated is not None:\r\n self._original['last_updated'] = self.last_updated", "def tsc_change(self) -> Dict[str, str]:\n return {\n \"type\": self._action.value,\n \"name\": \", \".join(self._names),\n \"link\": self._link,\n }", "def __update_params(self,**kwargs):\n updatedArgSet = set(self._updateParamsArgs) & kwargs.viewkeys()\n if len(updatedArgSet) > 0:\n args = self._subDictionary(self._updateParamsArgs)\n newArgs = self._onParamsUpdate(**args)\n updatedArgs =dict()\n for k in updatedArgSet:\n try:\n updatedArgs[k] = newArgs[k]\n except:\n pass\n\n self.__dictionary.update(newArgs)\n else:\n pass", "def modify_on(class_reference, from_dict, to_dict, all=False, custom_condition='', custom_function=False):\n _entries = select_from(class_reference, all, custom_condition, **from_dict)\n _modify = 0\n if custom_function:\n for _entry in _entries:\n for _key in to_dict:\n _entry.__dict__['_'+_key] = to_dict[_key](_entry.__dict__['_'+_key])\n _entry.put()\n _modify += 1\n else:\n for _entry in _entries:\n for _key in to_dict:\n _entry.__dict__['_'+_key] = to_dict[_key]\n _entry.put()\n _modify += 1\n return _modify", "def pre_update(self, **values):\r\n pass", "def _update(self, *keys_and_val):\n if len(xxx) < 2:\n raise NotEnoughInfo\n value, *location = xxx[::-1]\n location.reverse()\n final_key = location.pop()\n ptr__target_dct = get_target_dct(location)\n ptr__target_dct[final_key] = value\n return", "def __setstate__(self, dict):\n self.__dict__.update(dict)\n self.start_callback = None\n self.finalize_callback = None", "def inverse_update(self, data):\n if not isinstance(data, dict) or not isinstance(self, transforms.MapTransform):\n return data\n d = dict(data)\n for k in self.key_iterator(data):\n transform_key = transforms.TraceableTransform.trace_key(k)\n if transform_key not in data or not data[transform_key]:\n continue\n d = transforms.sync_meta_info(k, data, t=False)\n return d", "def restart_function_map():\n rfunc_map = {}\n if run_in_apache():\n rfunc_map['apache2'] = restart_pid_check\n return rfunc_map", "def __setitem__(self, key, value) -> None:\n # Allows value modification only in __init__.\n caller_method = inspect.getouterframes(inspect.currentframe(), 2)[1][3]\n if caller_method != \"__init__\":\n raise AttributeError\n\n self.__stash[key] = value", "def rename_state_dict_keys(source, key_transformation, target=None):\n if target is None:\n target = source\n\n state_dict = torch.load(source)\n # state_dict = state_dict.state_dict() \n new_state_dict = OrderedDict()\n\n for key, value in state_dict.items():\n new_key = key_transformation(key)\n new_state_dict[new_key] = value\n\n torch.save(new_state_dict, target)", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def observation(self):\n return {k: observer(self._state)\n for k, observer in self.observers.items()}", "def replace_cfs(old_key, new_key):\n altered_methods = []\n for name in methods:\n changed = False\n data = Method(name).load()\n for line in data:\n if line[0] == old_key:\n line[0], changed = new_key, True\n if changed:\n Method(name).write(data)\n altered_methods.append(name)\n return altered_methods", "def _modified(self):\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__modified__'):\n for subkey, value in getattr(self, key)._modified():\n yield (\"%s.%s\" % (key, subkey), value)\n else:\n if key in self.__modified__:\n yield (key, getattr(self, key))", "def update(self, key, new_value):\n raise NotImplementedError", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def update(self, changes, pipe=None):\n if not changes:\n return\n\n if self.key_name in changes:\n raise InvalidOperation('cannot update the redis key')\n\n deletes = {k for k, v in changes.items() if IS(v, None)}\n updates = {k: v for k, v in changes.items() if k not in deletes}\n\n with self._pipe(pipe) as pipe:\n\n core = self.core(pipe=pipe)\n\n def build(k, v):\n core.hset(self.key, k, v)\n\n def cb():\n self._data[k] = v\n\n pipe.on_execute(cb)\n\n for k, v in updates.items():\n build(k, v)\n\n self.remove(deletes, pipe=pipe)", "def updateSimState(self):\n self.sim_state = {k: v for k,v in self.state.iteritems()}", "def edit_work(self, new_values):\n self.eisenhower_priority()\n self.work_refresh()\n\n for attr, new_val in new_values.items():\n self.__dict__[attr] = new_val\n return self.__dict__", "def change(self, key, old_value, new_value):\n try:\n parts = self.list(key)\n try: parts[parts.index(old_value)] = new_value\n except ValueError:\n self[key] = new_value\n else:\n self[key] = \"\\n\".join(parts)\n except KeyError: self[key] = new_value", "def fill_cache(cache, values_dict):\n cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)", "def _update_key(cls, spec):\n if cls.KEY is not None:\n cls._set_key(spec, spec[\"keys\"].popleft())\n elif cls.REF is not None:\n spec[\"ref\"] = cls.REF", "def _update_proxy(self, change):\n # The superclass implementation is sufficient.\n super(MapMarker, self)._update_proxy(change)", "def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')" ]
[ "0.6451828", "0.6257158", "0.6163793", "0.60974216", "0.60500365", "0.5982999", "0.58533454", "0.5849826", "0.58412707", "0.57633644", "0.5753699", "0.571072", "0.55697256", "0.5549071", "0.5544989", "0.5534039", "0.54753613", "0.5421922", "0.5420793", "0.54177576", "0.5398981", "0.53974", "0.53974", "0.53914607", "0.5379817", "0.5373142", "0.5360809", "0.5360809", "0.53599167", "0.5347453", "0.53405535", "0.53315574", "0.5326357", "0.530392", "0.52773076", "0.5268492", "0.52548176", "0.5252297", "0.52396864", "0.523616", "0.52261966", "0.5219407", "0.5173805", "0.51569015", "0.5152584", "0.51515573", "0.5143109", "0.5143109", "0.5143109", "0.5134307", "0.5113886", "0.51131195", "0.51045823", "0.5101222", "0.5100887", "0.5087412", "0.50765306", "0.50554496", "0.50475526", "0.5038443", "0.5031126", "0.50236213", "0.5021076", "0.50205004", "0.50182617", "0.50182617", "0.501588", "0.50158733", "0.50149", "0.5013055", "0.501273", "0.50054586", "0.5005392", "0.50025105", "0.5001886", "0.4996475", "0.49949667", "0.4984378", "0.4981768", "0.49812442", "0.49771667", "0.4970454", "0.4968526", "0.4966967", "0.49668553", "0.4956426", "0.49534613", "0.4951726", "0.49477312", "0.4942866", "0.49413267", "0.49371615", "0.49371615", "0.49353623", "0.49324816", "0.49324414", "0.4930585", "0.49280316", "0.4926312", "0.4923969", "0.49236536" ]
0.0
-1
Get an existing Invocation resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, function_name: Optional[pulumi.Input[str]] = None, input: Optional[pulumi.Input[str]] = None, qualifier: Optional[pulumi.Input[str]] = None, result: Optional[pulumi.Input[str]] = None, triggers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Invocation': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _InvocationState.__new__(_InvocationState) __props__.__dict__["function_name"] = function_name __props__.__dict__["input"] = input __props__.__dict__["qualifier"] = qualifier __props__.__dict__["result"] = result __props__.__dict__["triggers"] = triggers return Invocation(resource_name, opts=opts, __props__=__props__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None) -> 'Agent':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AgentState.__new__(_AgentState)\n\n __props__.__dict__[\"activation_key\"] = activation_key\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"ip_address\"] = ip_address\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_link_endpoint\"] = private_link_endpoint\n __props__.__dict__[\"security_group_arns\"] = security_group_arns\n __props__.__dict__[\"subnet_arns\"] = subnet_arns\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"vpc_endpoint_id\"] = vpc_endpoint_id\n return Agent(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"add_on\"] = add_on\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"blueprint_id\"] = blueprint_id\n __props__.__dict__[\"bundle_id\"] = bundle_id\n __props__.__dict__[\"cpu_count\"] = cpu_count\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"ip_address_type\"] = ip_address_type\n __props__.__dict__[\"ipv6_address\"] = ipv6_address\n __props__.__dict__[\"ipv6_addresses\"] = ipv6_addresses\n __props__.__dict__[\"is_static_ip\"] = is_static_ip\n __props__.__dict__[\"key_pair_name\"] = key_pair_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"public_ip_address\"] = public_ip_address\n __props__.__dict__[\"ram_size\"] = ram_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_data\"] = user_data\n __props__.__dict__[\"username\"] = username\n return Instance(resource_name, opts=opts, __props__=__props__)", "def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"build\"] = None\n __props__.__dict__[\"config\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"state_message\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,\n encryption_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceEncryptionConfigurationArgs']]] = None,\n health_check_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHealthCheckConfigurationArgs']]] = None,\n instance_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceInstanceConfigurationArgs']]] = None,\n network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None,\n observability_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceObservabilityConfigurationArgs']]] = None,\n service_id: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n service_url: Optional[pulumi.Input[str]] = None,\n source_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceSourceConfigurationArgs']]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auto_scaling_configuration_arn\"] = auto_scaling_configuration_arn\n __props__.__dict__[\"encryption_configuration\"] = encryption_configuration\n __props__.__dict__[\"health_check_configuration\"] = health_check_configuration\n __props__.__dict__[\"instance_configuration\"] = instance_configuration\n __props__.__dict__[\"network_configuration\"] = network_configuration\n __props__.__dict__[\"observability_configuration\"] = observability_configuration\n __props__.__dict__[\"service_id\"] = service_id\n __props__.__dict__[\"service_name\"] = service_name\n __props__.__dict__[\"service_url\"] = service_url\n __props__.__dict__[\"source_configuration\"] = source_configuration\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return Service(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Workflow':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = WorkflowArgs.__new__(WorkflowArgs)\n\n __props__.__dict__[\"acr\"] = None\n __props__.__dict__[\"aks_resource_id\"] = None\n __props__.__dict__[\"app_name\"] = None\n __props__.__dict__[\"auth_status\"] = None\n __props__.__dict__[\"branch_name\"] = None\n __props__.__dict__[\"builder_version\"] = None\n __props__.__dict__[\"deployment_properties\"] = None\n __props__.__dict__[\"docker_build_context\"] = None\n __props__.__dict__[\"dockerfile\"] = None\n __props__.__dict__[\"dockerfile_generation_mode\"] = None\n __props__.__dict__[\"dockerfile_output_directory\"] = None\n __props__.__dict__[\"generation_language\"] = None\n __props__.__dict__[\"image_name\"] = None\n __props__.__dict__[\"image_tag\"] = None\n __props__.__dict__[\"language_version\"] = None\n __props__.__dict__[\"last_workflow_run\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"manifest_generation_mode\"] = None\n __props__.__dict__[\"manifest_output_directory\"] = None\n __props__.__dict__[\"manifest_type\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"namespace\"] = None\n __props__.__dict__[\"oidc_credentials\"] = None\n __props__.__dict__[\"port\"] = None\n __props__.__dict__[\"pr_status\"] = None\n __props__.__dict__[\"pr_url\"] = None\n __props__.__dict__[\"pull_number\"] = None\n __props__.__dict__[\"repository_name\"] = None\n __props__.__dict__[\"repository_owner\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n return Workflow(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n additional_locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceAdditionalLocationArgs']]]]] = None,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCertificateArgs']]]]] = None,\n client_certificate_enabled: Optional[pulumi.Input[bool]] = None,\n delegation: Optional[pulumi.Input[pulumi.InputType['ServiceDelegationArgs']]] = None,\n developer_portal_url: Optional[pulumi.Input[str]] = None,\n gateway_disabled: Optional[pulumi.Input[bool]] = None,\n gateway_regional_url: Optional[pulumi.Input[str]] = None,\n gateway_url: Optional[pulumi.Input[str]] = None,\n hostname_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHostnameConfigurationArgs']]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ServiceIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n management_api_url: Optional[pulumi.Input[str]] = None,\n min_api_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n notification_sender_email: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[pulumi.InputType['ServicePolicyArgs']]] = None,\n portal_url: Optional[pulumi.Input[str]] = None,\n private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n protocols: Optional[pulumi.Input[pulumi.InputType['ServiceProtocolsArgs']]] = None,\n public_ip_address_id: Optional[pulumi.Input[str]] = None,\n public_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n publisher_email: Optional[pulumi.Input[str]] = None,\n publisher_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n scm_url: Optional[pulumi.Input[str]] = None,\n security: Optional[pulumi.Input[pulumi.InputType['ServiceSecurityArgs']]] = None,\n sign_in: Optional[pulumi.Input[pulumi.InputType['ServiceSignInArgs']]] = None,\n sign_up: Optional[pulumi.Input[pulumi.InputType['ServiceSignUpArgs']]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tenant_access: Optional[pulumi.Input[pulumi.InputType['ServiceTenantAccessArgs']]] = None,\n virtual_network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceVirtualNetworkConfigurationArgs']]] = None,\n virtual_network_type: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"additional_locations\"] = additional_locations\n __props__.__dict__[\"certificates\"] = certificates\n __props__.__dict__[\"client_certificate_enabled\"] = client_certificate_enabled\n __props__.__dict__[\"delegation\"] = delegation\n __props__.__dict__[\"developer_portal_url\"] = developer_portal_url\n __props__.__dict__[\"gateway_disabled\"] = gateway_disabled\n __props__.__dict__[\"gateway_regional_url\"] = gateway_regional_url\n __props__.__dict__[\"gateway_url\"] = gateway_url\n __props__.__dict__[\"hostname_configuration\"] = hostname_configuration\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"management_api_url\"] = management_api_url\n __props__.__dict__[\"min_api_version\"] = min_api_version\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"notification_sender_email\"] = notification_sender_email\n __props__.__dict__[\"policy\"] = policy\n __props__.__dict__[\"portal_url\"] = portal_url\n __props__.__dict__[\"private_ip_addresses\"] = private_ip_addresses\n __props__.__dict__[\"protocols\"] = protocols\n __props__.__dict__[\"public_ip_address_id\"] = public_ip_address_id\n __props__.__dict__[\"public_ip_addresses\"] = public_ip_addresses\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"publisher_email\"] = publisher_email\n __props__.__dict__[\"publisher_name\"] = publisher_name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"scm_url\"] = scm_url\n __props__.__dict__[\"security\"] = security\n __props__.__dict__[\"sign_in\"] = sign_in\n __props__.__dict__[\"sign_up\"] = sign_up\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tenant_access\"] = tenant_access\n __props__.__dict__[\"virtual_network_configuration\"] = virtual_network_configuration\n __props__.__dict__[\"virtual_network_type\"] = virtual_network_type\n __props__.__dict__[\"zones\"] = zones\n return Service(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Canary':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = CanaryArgs.__new__(CanaryArgs)\n\n __props__.__dict__[\"artifact_config\"] = None\n __props__.__dict__[\"artifact_s3_location\"] = None\n __props__.__dict__[\"code\"] = None\n __props__.__dict__[\"delete_lambda_resources_on_canary_deletion\"] = None\n __props__.__dict__[\"execution_role_arn\"] = None\n __props__.__dict__[\"failure_retention_period\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"run_config\"] = None\n __props__.__dict__[\"runtime_version\"] = None\n __props__.__dict__[\"schedule\"] = None\n __props__.__dict__[\"start_canary_after_creation\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"success_retention_period\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"visual_reference\"] = None\n __props__.__dict__[\"vpc_config\"] = None\n return Canary(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"file_shares\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"kms_key_name\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"networks\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"satisfies_pzs\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"status_message\"] = None\n __props__.__dict__[\"suspension_reasons\"] = None\n __props__.__dict__[\"tier\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())", "def get_one_state(state_id):\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n try:\n res_dict = request.get_json()\n res_dict['id'] = state.id\n res_dict['created_at'] = state.created_at\n state.__init__(**res_dict)\n state.save()\n return jsonify(state.to_dict()), 200\n except:\n abort(400, description='Not a JSON')\n return jsonify(state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[pulumi.InputType['SyntheticsPrivateLocationMetadataArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SyntheticsPrivateLocation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SyntheticsPrivateLocationState.__new__(_SyntheticsPrivateLocationState)\n\n __props__.__dict__[\"config\"] = config\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"metadata\"] = metadata\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"tags\"] = tags\n return SyntheticsPrivateLocation(resource_name, opts=opts, __props__=__props__)", "def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n accept_language: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n created_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distributor: Optional[pulumi.Input[str]] = None,\n has_default_path: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner: Optional[pulumi.Input[str]] = None,\n provisioning_artifact_parameters: Optional[pulumi.Input[pulumi.InputType['ProductProvisioningArtifactParametersArgs']]] = None,\n status: Optional[pulumi.Input[str]] = None,\n support_description: Optional[pulumi.Input[str]] = None,\n support_email: Optional[pulumi.Input[str]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n type: Optional[pulumi.Input[str]] = None) -> 'Product':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ProductState.__new__(_ProductState)\n\n __props__.__dict__[\"accept_language\"] = accept_language\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"created_time\"] = created_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"distributor\"] = distributor\n __props__.__dict__[\"has_default_path\"] = has_default_path\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"owner\"] = owner\n __props__.__dict__[\"provisioning_artifact_parameters\"] = provisioning_artifact_parameters\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"support_description\"] = support_description\n __props__.__dict__[\"support_email\"] = support_email\n __props__.__dict__[\"support_url\"] = support_url\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"type\"] = type\n return Product(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"authorized_network\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"discovery_endpoint\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"instance_messages\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"maintenance_policy\"] = None\n __props__.__dict__[\"maintenance_schedule\"] = None\n __props__.__dict__[\"memcache_full_version\"] = None\n __props__.__dict__[\"memcache_nodes\"] = None\n __props__.__dict__[\"memcache_version\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"node_config\"] = None\n __props__.__dict__[\"node_count\"] = None\n __props__.__dict__[\"parameters\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"update_available\"] = None\n __props__.__dict__[\"update_time\"] = None\n __props__.__dict__[\"zones\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Endpoint':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EndpointArgs.__new__(EndpointArgs)\n\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"endpoint_forwarding_rule\"] = None\n __props__.__dict__[\"endpoint_id\"] = None\n __props__.__dict__[\"endpoint_ip\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"network\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"request_id\"] = None\n __props__.__dict__[\"severity\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"threat_exceptions\"] = None\n __props__.__dict__[\"traffic_logs\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Endpoint(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n db_proxy_endpoint_name: Optional[pulumi.Input[str]] = None,\n db_proxy_name: Optional[pulumi.Input[str]] = None,\n endpoint: Optional[pulumi.Input[str]] = None,\n is_default: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n target_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vpc_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n vpc_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ProxyEndpoint':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ProxyEndpointState.__new__(_ProxyEndpointState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"db_proxy_endpoint_name\"] = db_proxy_endpoint_name\n __props__.__dict__[\"db_proxy_name\"] = db_proxy_name\n __props__.__dict__[\"endpoint\"] = endpoint\n __props__.__dict__[\"is_default\"] = is_default\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"target_role\"] = target_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vpc_security_group_ids\"] = vpc_security_group_ids\n __props__.__dict__[\"vpc_subnet_ids\"] = vpc_subnet_ids\n return ProxyEndpoint(resource_name, opts=opts, __props__=__props__)", "def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)", "def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job", "def get(self, request, state_id, format=None):\n try:\n state = State.objects.get(id=state_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"State not found\")\n\n return Response(StateSerializer(state).data)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)", "def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Trigger':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = TriggerArgs.__new__(TriggerArgs)\n\n __props__.__dict__[\"channel\"] = None\n __props__.__dict__[\"conditions\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"destination\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"event_data_content_type\"] = None\n __props__.__dict__[\"event_filters\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"service_account\"] = None\n __props__.__dict__[\"transport\"] = None\n __props__.__dict__[\"trigger_id\"] = None\n __props__.__dict__[\"uid\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Trigger(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Job':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = JobArgs.__new__(JobArgs)\n\n __props__.__dict__[\"client_request_id\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"created_from_snapshot_id\"] = None\n __props__.__dict__[\"current_state\"] = None\n __props__.__dict__[\"current_state_time\"] = None\n __props__.__dict__[\"environment\"] = None\n __props__.__dict__[\"execution_info\"] = None\n __props__.__dict__[\"job_metadata\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"pipeline_description\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"replace_job_id\"] = None\n __props__.__dict__[\"replaced_by_job_id\"] = None\n __props__.__dict__[\"requested_state\"] = None\n __props__.__dict__[\"runtime_updatable_params\"] = None\n __props__.__dict__[\"satisfies_pzs\"] = None\n __props__.__dict__[\"stage_states\"] = None\n __props__.__dict__[\"start_time\"] = None\n __props__.__dict__[\"steps\"] = None\n __props__.__dict__[\"steps_location\"] = None\n __props__.__dict__[\"temp_files\"] = None\n __props__.__dict__[\"transform_name_mapping\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"view\"] = None\n return Job(resource_name, opts=opts, __props__=__props__)", "def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ServiceArgs.__new__(ServiceArgs)\n\n __props__.__dict__[\"correlation_scheme\"] = None\n __props__.__dict__[\"default_move_cost\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"partition_description\"] = None\n __props__.__dict__[\"placement_constraints\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"service_dns_name\"] = None\n __props__.__dict__[\"service_kind\"] = None\n __props__.__dict__[\"service_load_metrics\"] = None\n __props__.__dict__[\"service_package_activation_mode\"] = None\n __props__.__dict__[\"service_placement_policies\"] = None\n __props__.__dict__[\"service_type_name\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n return Service(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n action: Optional[pulumi.Input[str]] = None,\n layer_name: Optional[pulumi.Input[str]] = None,\n organization_id: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[str]] = None,\n principal: Optional[pulumi.Input[str]] = None,\n revision_id: Optional[pulumi.Input[str]] = None,\n statement_id: Optional[pulumi.Input[str]] = None,\n version_number: Optional[pulumi.Input[int]] = None) -> 'LayerVersionPermission':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _LayerVersionPermissionState.__new__(_LayerVersionPermissionState)\n\n __props__.__dict__[\"action\"] = action\n __props__.__dict__[\"layer_name\"] = layer_name\n __props__.__dict__[\"organization_id\"] = organization_id\n __props__.__dict__[\"policy\"] = policy\n __props__.__dict__[\"principal\"] = principal\n __props__.__dict__[\"revision_id\"] = revision_id\n __props__.__dict__[\"statement_id\"] = statement_id\n __props__.__dict__[\"version_number\"] = version_number\n return LayerVersionPermission(resource_name, opts=opts, __props__=__props__)", "def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Reservation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ReservationArgs.__new__(ReservationArgs)\n\n __props__.__dict__[\"concurrency\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"ignore_idle_slots\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"multi_region_auxiliary\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"reservation_id\"] = None\n __props__.__dict__[\"slot_capacity\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Reservation(resource_name, opts=opts, __props__=__props__)", "def get_by_id(self, _id):\n return Field(self.context, ResourcePathServiceOperation(\"getById\", [_id], self.resource_path))", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n access_string: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n authentication_mode: Optional[pulumi.Input[pulumi.InputType['UserAuthenticationModeArgs']]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n no_password_required: Optional[pulumi.Input[bool]] = None,\n passwords: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_id: Optional[pulumi.Input[str]] = None,\n user_name: Optional[pulumi.Input[str]] = None) -> 'User':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _UserState.__new__(_UserState)\n\n __props__.__dict__[\"access_string\"] = access_string\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"authentication_mode\"] = authentication_mode\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"no_password_required\"] = no_password_required\n __props__.__dict__[\"passwords\"] = passwords\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_id\"] = user_id\n __props__.__dict__[\"user_name\"] = user_name\n return User(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n data_factory_id: Optional[pulumi.Input[str]] = None,\n fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n subresource_name: Optional[pulumi.Input[str]] = None,\n target_resource_id: Optional[pulumi.Input[str]] = None) -> 'ManagedPrivateEndpoint':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ManagedPrivateEndpointState.__new__(_ManagedPrivateEndpointState)\n\n __props__.__dict__[\"data_factory_id\"] = data_factory_id\n __props__.__dict__[\"fqdns\"] = fqdns\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"subresource_name\"] = subresource_name\n __props__.__dict__[\"target_resource_id\"] = target_resource_id\n return ManagedPrivateEndpoint(resource_name, opts=opts, __props__=__props__)", "def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = AssessmentArgs.__new__(AssessmentArgs)\n\n __props__.__dict__[\"additional_data\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"links\"] = None\n __props__.__dict__[\"metadata\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"partners_data\"] = None\n __props__.__dict__[\"resource_details\"] = None\n __props__.__dict__[\"status\"] = None\n __props__.__dict__[\"type\"] = None\n return Assessment(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n avatar_url: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n full_name: Optional[pulumi.Input[str]] = None,\n full_path: Optional[pulumi.Input[str]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n runners_token: Optional[pulumi.Input[str]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n web_url: Optional[pulumi.Input[str]] = None) -> 'Group':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _GroupState.__new__(_GroupState)\n\n __props__.__dict__[\"auto_devops_enabled\"] = auto_devops_enabled\n __props__.__dict__[\"avatar\"] = avatar\n __props__.__dict__[\"avatar_hash\"] = avatar_hash\n __props__.__dict__[\"avatar_url\"] = avatar_url\n __props__.__dict__[\"default_branch_protection\"] = default_branch_protection\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"emails_disabled\"] = emails_disabled\n __props__.__dict__[\"extra_shared_runners_minutes_limit\"] = extra_shared_runners_minutes_limit\n __props__.__dict__[\"full_name\"] = full_name\n __props__.__dict__[\"full_path\"] = full_path\n __props__.__dict__[\"ip_restriction_ranges\"] = ip_restriction_ranges\n __props__.__dict__[\"lfs_enabled\"] = lfs_enabled\n __props__.__dict__[\"membership_lock\"] = membership_lock\n __props__.__dict__[\"mentions_disabled\"] = mentions_disabled\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"parent_id\"] = parent_id\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"prevent_forking_outside_group\"] = prevent_forking_outside_group\n __props__.__dict__[\"project_creation_level\"] = project_creation_level\n __props__.__dict__[\"request_access_enabled\"] = request_access_enabled\n __props__.__dict__[\"require_two_factor_authentication\"] = require_two_factor_authentication\n __props__.__dict__[\"runners_token\"] = runners_token\n __props__.__dict__[\"share_with_group_lock\"] = share_with_group_lock\n __props__.__dict__[\"shared_runners_minutes_limit\"] = shared_runners_minutes_limit\n __props__.__dict__[\"subgroup_creation_level\"] = subgroup_creation_level\n __props__.__dict__[\"two_factor_grace_period\"] = two_factor_grace_period\n __props__.__dict__[\"visibility_level\"] = visibility_level\n __props__.__dict__[\"web_url\"] = web_url\n return Group(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_series: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[str]] = None,\n specification: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_series\"] = instance_series\n __props__.__dict__[\"mysql_version\"] = mysql_version\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"specification\"] = specification\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def _extract_resource(resource: Optional[dict],\n allowed_vals: tuple[tuple[str, ...]],\n exc: Type[exception.CinderException],\n resource_name: str,\n props: tuple[str] = ('status',)) -> Optional[str]:\n\n resource_id = None\n if resource:\n for prop, allowed_states in zip(props, allowed_vals):\n if resource[prop] not in allowed_states:\n msg = _(\"Originating %(res)s %(prop)s must be one of \"\n \"'%(vals)s' values\")\n msg = msg % {'res': resource_name,\n 'prop': prop,\n 'vals': ', '.join(allowed_states)}\n # TODO(harlowja): what happens if the status changes after\n # this initial resource status check occurs??? Seems like\n # someone could delete the resource after this check passes\n # but before the volume is officially created?\n raise exc(reason=msg)\n resource_id = resource['id']\n return resource_id", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n asset_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ZoneAssetStatusArgs']]]]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n state: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n update_time: Optional[pulumi.Input[str]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"asset_statuses\"] = asset_statuses\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_spec\"] = discovery_spec\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"lake\"] = lake\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"resource_spec\"] = resource_spec\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"update_time\"] = update_time\n return Zone(resource_name, opts=opts, __props__=__props__)", "def get_instigator_state(self, origin_id: str) -> InstigatorState:", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Assessment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"additional_data\"] = None\n __props__[\"display_name\"] = None\n __props__[\"links\"] = None\n __props__[\"metadata\"] = None\n __props__[\"name\"] = None\n __props__[\"partners_data\"] = None\n __props__[\"resource_details\"] = None\n __props__[\"status\"] = None\n __props__[\"type\"] = None\n return Assessment(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_url: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'Workspace':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceState.__new__(_WorkspaceState)\n\n __props__.__dict__[\"application_insights_id\"] = application_insights_id\n __props__.__dict__[\"container_registry_id\"] = container_registry_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_url\"] = discovery_url\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"friendly_name\"] = friendly_name\n __props__.__dict__[\"high_business_impact\"] = high_business_impact\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"image_build_compute_name\"] = image_build_compute_name\n __props__.__dict__[\"key_vault_id\"] = key_vault_id\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_user_assigned_identity\"] = primary_user_assigned_identity\n __props__.__dict__[\"public_access_behind_virtual_network_enabled\"] = public_access_behind_virtual_network_enabled\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"storage_account_id\"] = storage_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"v1_legacy_mode_enabled\"] = v1_legacy_mode_enabled\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return Workspace(resource_name, opts=opts, __props__=__props__)", "def state_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n else:\n return jsonify(state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n as_path_match_mode: Optional[pulumi.Input[str]] = None,\n cen_id: Optional[pulumi.Input[str]] = None,\n cen_region_id: Optional[pulumi.Input[str]] = None,\n cidr_match_mode: Optional[pulumi.Input[str]] = None,\n community_match_mode: Optional[pulumi.Input[str]] = None,\n community_operate_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n destination_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n map_result: Optional[pulumi.Input[str]] = None,\n match_asns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n match_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n next_priority: Optional[pulumi.Input[int]] = None,\n operate_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n preference: Optional[pulumi.Input[int]] = None,\n prepend_as_paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n route_map_id: Optional[pulumi.Input[str]] = None,\n route_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n source_region_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n transit_router_route_table_id: Optional[pulumi.Input[str]] = None,\n transmit_direction: Optional[pulumi.Input[str]] = None) -> 'RouteMap':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RouteMapState.__new__(_RouteMapState)\n\n __props__.__dict__[\"as_path_match_mode\"] = as_path_match_mode\n __props__.__dict__[\"cen_id\"] = cen_id\n __props__.__dict__[\"cen_region_id\"] = cen_region_id\n __props__.__dict__[\"cidr_match_mode\"] = cidr_match_mode\n __props__.__dict__[\"community_match_mode\"] = community_match_mode\n __props__.__dict__[\"community_operate_mode\"] = community_operate_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"destination_child_instance_types\"] = destination_child_instance_types\n __props__.__dict__[\"destination_cidr_blocks\"] = destination_cidr_blocks\n __props__.__dict__[\"destination_instance_ids\"] = destination_instance_ids\n __props__.__dict__[\"destination_instance_ids_reverse_match\"] = destination_instance_ids_reverse_match\n __props__.__dict__[\"destination_route_table_ids\"] = destination_route_table_ids\n __props__.__dict__[\"map_result\"] = map_result\n __props__.__dict__[\"match_asns\"] = match_asns\n __props__.__dict__[\"match_community_sets\"] = match_community_sets\n __props__.__dict__[\"next_priority\"] = next_priority\n __props__.__dict__[\"operate_community_sets\"] = operate_community_sets\n __props__.__dict__[\"preference\"] = preference\n __props__.__dict__[\"prepend_as_paths\"] = prepend_as_paths\n __props__.__dict__[\"priority\"] = priority\n __props__.__dict__[\"route_map_id\"] = route_map_id\n __props__.__dict__[\"route_types\"] = route_types\n __props__.__dict__[\"source_child_instance_types\"] = source_child_instance_types\n __props__.__dict__[\"source_instance_ids\"] = source_instance_ids\n __props__.__dict__[\"source_instance_ids_reverse_match\"] = source_instance_ids_reverse_match\n __props__.__dict__[\"source_region_ids\"] = source_region_ids\n __props__.__dict__[\"source_route_table_ids\"] = source_route_table_ids\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"transit_router_route_table_id\"] = transit_router_route_table_id\n __props__.__dict__[\"transmit_direction\"] = transmit_direction\n return RouteMap(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n inter_region_traffic_qos_policy_description: Optional[pulumi.Input[str]] = None,\n inter_region_traffic_qos_policy_name: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n transit_router_attachment_id: Optional[pulumi.Input[str]] = None,\n transit_router_id: Optional[pulumi.Input[str]] = None) -> 'InterRegionTrafficQosPolicy':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InterRegionTrafficQosPolicyState.__new__(_InterRegionTrafficQosPolicyState)\n\n __props__.__dict__[\"inter_region_traffic_qos_policy_description\"] = inter_region_traffic_qos_policy_description\n __props__.__dict__[\"inter_region_traffic_qos_policy_name\"] = inter_region_traffic_qos_policy_name\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"transit_router_attachment_id\"] = transit_router_attachment_id\n __props__.__dict__[\"transit_router_id\"] = transit_router_id\n return InterRegionTrafficQosPolicy(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FhirStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = FhirStoreArgs.__new__(FhirStoreArgs)\n\n __props__.__dict__[\"complex_data_type_reference_parsing\"] = None\n __props__.__dict__[\"dataset_id\"] = None\n __props__.__dict__[\"default_search_handling_strict\"] = None\n __props__.__dict__[\"disable_referential_integrity\"] = None\n __props__.__dict__[\"disable_resource_versioning\"] = None\n __props__.__dict__[\"enable_update_create\"] = None\n __props__.__dict__[\"fhir_store_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"notification_config\"] = None\n __props__.__dict__[\"notification_configs\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"stream_configs\"] = None\n __props__.__dict__[\"validation_config\"] = None\n __props__.__dict__[\"version\"] = None\n return FhirStore(resource_name, opts=opts, __props__=__props__)", "def a_states_id(state_id):\n i = storage.get(\"State\", state_id)\n if i:\n return jsonify(i.to_dict())\n else:\n return (jsonify({\"error\": \"Not found\"}), 404)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Product':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"approval_required\"] = None\n __props__[\"description\"] = None\n __props__[\"display_name\"] = None\n __props__[\"name\"] = None\n __props__[\"state\"] = None\n __props__[\"subscription_required\"] = None\n __props__[\"subscriptions_limit\"] = None\n __props__[\"terms\"] = None\n __props__[\"type\"] = None\n return Product(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n acl_name: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,\n cluster_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterClusterEndpointArgs']]]]] = None,\n data_tiering: Optional[pulumi.Input[bool]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_patch_version: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n final_snapshot_name: Optional[pulumi.Input[str]] = None,\n kms_key_arn: Optional[pulumi.Input[str]] = None,\n maintenance_window: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n node_type: Optional[pulumi.Input[str]] = None,\n num_replicas_per_shard: Optional[pulumi.Input[int]] = None,\n num_shards: Optional[pulumi.Input[int]] = None,\n parameter_group_name: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[int]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n shards: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterShardArgs']]]]] = None,\n snapshot_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n snapshot_name: Optional[pulumi.Input[str]] = None,\n snapshot_retention_limit: Optional[pulumi.Input[int]] = None,\n snapshot_window: Optional[pulumi.Input[str]] = None,\n sns_topic_arn: Optional[pulumi.Input[str]] = None,\n subnet_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tls_enabled: Optional[pulumi.Input[bool]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"acl_name\"] = acl_name\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auto_minor_version_upgrade\"] = auto_minor_version_upgrade\n __props__.__dict__[\"cluster_endpoints\"] = cluster_endpoints\n __props__.__dict__[\"data_tiering\"] = data_tiering\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_patch_version\"] = engine_patch_version\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"final_snapshot_name\"] = final_snapshot_name\n __props__.__dict__[\"kms_key_arn\"] = kms_key_arn\n __props__.__dict__[\"maintenance_window\"] = maintenance_window\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"name_prefix\"] = name_prefix\n __props__.__dict__[\"node_type\"] = node_type\n __props__.__dict__[\"num_replicas_per_shard\"] = num_replicas_per_shard\n __props__.__dict__[\"num_shards\"] = num_shards\n __props__.__dict__[\"parameter_group_name\"] = parameter_group_name\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"security_group_ids\"] = security_group_ids\n __props__.__dict__[\"shards\"] = shards\n __props__.__dict__[\"snapshot_arns\"] = snapshot_arns\n __props__.__dict__[\"snapshot_name\"] = snapshot_name\n __props__.__dict__[\"snapshot_retention_limit\"] = snapshot_retention_limit\n __props__.__dict__[\"snapshot_window\"] = snapshot_window\n __props__.__dict__[\"sns_topic_arn\"] = sns_topic_arn\n __props__.__dict__[\"subnet_group_name\"] = subnet_group_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"tls_enabled\"] = tls_enabled\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"additional_info\"] = None\n __props__.__dict__[\"affinity\"] = None\n __props__.__dict__[\"availability_zone\"] = None\n __props__.__dict__[\"block_device_mappings\"] = None\n __props__.__dict__[\"cpu_options\"] = None\n __props__.__dict__[\"credit_specification\"] = None\n __props__.__dict__[\"disable_api_termination\"] = None\n __props__.__dict__[\"ebs_optimized\"] = None\n __props__.__dict__[\"elastic_gpu_specifications\"] = None\n __props__.__dict__[\"elastic_inference_accelerators\"] = None\n __props__.__dict__[\"enclave_options\"] = None\n __props__.__dict__[\"hibernation_options\"] = None\n __props__.__dict__[\"host_id\"] = None\n __props__.__dict__[\"host_resource_group_arn\"] = None\n __props__.__dict__[\"iam_instance_profile\"] = None\n __props__.__dict__[\"image_id\"] = None\n __props__.__dict__[\"instance_initiated_shutdown_behavior\"] = None\n __props__.__dict__[\"instance_type\"] = None\n __props__.__dict__[\"ipv6_address_count\"] = None\n __props__.__dict__[\"ipv6_addresses\"] = None\n __props__.__dict__[\"kernel_id\"] = None\n __props__.__dict__[\"key_name\"] = None\n __props__.__dict__[\"launch_template\"] = None\n __props__.__dict__[\"license_specifications\"] = None\n __props__.__dict__[\"monitoring\"] = None\n __props__.__dict__[\"network_interfaces\"] = None\n __props__.__dict__[\"placement_group_name\"] = None\n __props__.__dict__[\"private_dns_name\"] = None\n __props__.__dict__[\"private_dns_name_options\"] = None\n __props__.__dict__[\"private_ip\"] = None\n __props__.__dict__[\"private_ip_address\"] = None\n __props__.__dict__[\"propagate_tags_to_volume_on_creation\"] = None\n __props__.__dict__[\"public_dns_name\"] = None\n __props__.__dict__[\"public_ip\"] = None\n __props__.__dict__[\"ramdisk_id\"] = None\n __props__.__dict__[\"security_group_ids\"] = None\n __props__.__dict__[\"security_groups\"] = None\n __props__.__dict__[\"source_dest_check\"] = None\n __props__.__dict__[\"ssm_associations\"] = None\n __props__.__dict__[\"subnet_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"tenancy\"] = None\n __props__.__dict__[\"user_data\"] = None\n __props__.__dict__[\"volumes\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ServiceArgs.__new__(ServiceArgs)\n\n __props__.__dict__[\"capacity_provider_strategy\"] = None\n __props__.__dict__[\"cluster\"] = None\n __props__.__dict__[\"deployment_configuration\"] = None\n __props__.__dict__[\"deployment_controller\"] = None\n __props__.__dict__[\"desired_count\"] = None\n __props__.__dict__[\"enable_ecs_managed_tags\"] = None\n __props__.__dict__[\"enable_execute_command\"] = None\n __props__.__dict__[\"health_check_grace_period_seconds\"] = None\n __props__.__dict__[\"launch_type\"] = None\n __props__.__dict__[\"load_balancers\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"network_configuration\"] = None\n __props__.__dict__[\"placement_constraints\"] = None\n __props__.__dict__[\"placement_strategies\"] = None\n __props__.__dict__[\"platform_version\"] = None\n __props__.__dict__[\"propagate_tags\"] = None\n __props__.__dict__[\"role\"] = None\n __props__.__dict__[\"scheduling_strategy\"] = None\n __props__.__dict__[\"service_arn\"] = None\n __props__.__dict__[\"service_connect_configuration\"] = None\n __props__.__dict__[\"service_name\"] = None\n __props__.__dict__[\"service_registries\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"task_definition\"] = None\n return Service(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Subscription':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"accessed_at\"] = None\n __props__[\"auto_delete_on_idle\"] = None\n __props__[\"count_details\"] = None\n __props__[\"created_at\"] = None\n __props__[\"dead_lettering_on_filter_evaluation_exceptions\"] = None\n __props__[\"dead_lettering_on_message_expiration\"] = None\n __props__[\"default_message_time_to_live\"] = None\n __props__[\"enable_batched_operations\"] = None\n __props__[\"entity_availability_status\"] = None\n __props__[\"is_read_only\"] = None\n __props__[\"location\"] = None\n __props__[\"lock_duration\"] = None\n __props__[\"max_delivery_count\"] = None\n __props__[\"message_count\"] = None\n __props__[\"name\"] = None\n __props__[\"requires_session\"] = None\n __props__[\"status\"] = None\n __props__[\"type\"] = None\n __props__[\"updated_at\"] = None\n return Subscription(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n approved_subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n server_name: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n sleep: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n timeout: Optional[pulumi.Input[int]] = None) -> 'PrivatelinkAzure':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _PrivatelinkAzureState.__new__(_PrivatelinkAzureState)\n\n __props__.__dict__[\"approved_subscriptions\"] = approved_subscriptions\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"server_name\"] = server_name\n __props__.__dict__[\"service_name\"] = service_name\n __props__.__dict__[\"sleep\"] = sleep\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"timeout\"] = timeout\n return PrivatelinkAzure(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'JobStep':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = JobStepArgs.__new__(JobStepArgs)\n\n __props__.__dict__[\"action\"] = None\n __props__.__dict__[\"credential\"] = None\n __props__.__dict__[\"execution_options\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"output\"] = None\n __props__.__dict__[\"step_id\"] = None\n __props__.__dict__[\"target_group\"] = None\n __props__.__dict__[\"type\"] = None\n return JobStep(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Organization':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = OrganizationArgs.__new__(OrganizationArgs)\n\n __props__.__dict__[\"arn\"] = None\n __props__.__dict__[\"feature_set\"] = None\n __props__.__dict__[\"management_account_arn\"] = None\n __props__.__dict__[\"management_account_email\"] = None\n __props__.__dict__[\"management_account_id\"] = None\n __props__.__dict__[\"root_id\"] = None\n return Organization(resource_name, opts=opts, __props__=__props__)", "def get_id(\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n in_states=None,\n filters=None,\n):\n instance_ids = find_instances(\n name=name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n in_states=in_states,\n filters=filters,\n )\n if instance_ids:\n log.info(\"Instance ids: %s\", \" \".join(instance_ids))\n if len(instance_ids) == 1:\n return instance_ids[0]\n else:\n raise CommandExecutionError(\n \"Found more than one instance matching the criteria.\"\n )\n else:\n log.warning(\"Could not find instance.\")\n return None", "def get(resource_name, id, opts=None, accepts_prompt_none_forward_from_client=None, add_read_token_role_on_create=None, alias=None, authenticate_by_default=None, authorization_url=None, backchannel_supported=None, client_id=None, client_secret=None, default_scopes=None, display_name=None, enabled=None, extra_config=None, first_broker_login_flow_alias=None, hide_on_login_page=None, internal_id=None, jwks_url=None, link_only=None, login_hint=None, logout_url=None, post_broker_login_flow_alias=None, provider_id=None, realm=None, store_token=None, token_url=None, trust_email=None, ui_locales=None, user_info_url=None, validate_signature=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"accepts_prompt_none_forward_from_client\"] = accepts_prompt_none_forward_from_client\n __props__[\"add_read_token_role_on_create\"] = add_read_token_role_on_create\n __props__[\"alias\"] = alias\n __props__[\"authenticate_by_default\"] = authenticate_by_default\n __props__[\"authorization_url\"] = authorization_url\n __props__[\"backchannel_supported\"] = backchannel_supported\n __props__[\"client_id\"] = client_id\n __props__[\"client_secret\"] = client_secret\n __props__[\"default_scopes\"] = default_scopes\n __props__[\"display_name\"] = display_name\n __props__[\"enabled\"] = enabled\n __props__[\"extra_config\"] = extra_config\n __props__[\"first_broker_login_flow_alias\"] = first_broker_login_flow_alias\n __props__[\"hide_on_login_page\"] = hide_on_login_page\n __props__[\"internal_id\"] = internal_id\n __props__[\"jwks_url\"] = jwks_url\n __props__[\"link_only\"] = link_only\n __props__[\"login_hint\"] = login_hint\n __props__[\"logout_url\"] = logout_url\n __props__[\"post_broker_login_flow_alias\"] = post_broker_login_flow_alias\n __props__[\"provider_id\"] = provider_id\n __props__[\"realm\"] = realm\n __props__[\"store_token\"] = store_token\n __props__[\"token_url\"] = token_url\n __props__[\"trust_email\"] = trust_email\n __props__[\"ui_locales\"] = ui_locales\n __props__[\"user_info_url\"] = user_info_url\n __props__[\"validate_signature\"] = validate_signature\n return IdentityProvider(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n apply_immediately: Optional[pulumi.Input[bool]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n authentication_strategy: Optional[pulumi.Input[str]] = None,\n auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,\n broker_name: Optional[pulumi.Input[str]] = None,\n configuration: Optional[pulumi.Input[pulumi.InputType['BrokerConfigurationArgs']]] = None,\n deployment_mode: Optional[pulumi.Input[str]] = None,\n encryption_options: Optional[pulumi.Input[pulumi.InputType['BrokerEncryptionOptionsArgs']]] = None,\n engine_type: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n host_instance_type: Optional[pulumi.Input[str]] = None,\n instances: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BrokerInstanceArgs']]]]] = None,\n ldap_server_metadata: Optional[pulumi.Input[pulumi.InputType['BrokerLdapServerMetadataArgs']]] = None,\n logs: Optional[pulumi.Input[pulumi.InputType['BrokerLogsArgs']]] = None,\n maintenance_window_start_time: Optional[pulumi.Input[pulumi.InputType['BrokerMaintenanceWindowStartTimeArgs']]] = None,\n publicly_accessible: Optional[pulumi.Input[bool]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n storage_type: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n users: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BrokerUserArgs']]]]] = None) -> 'Broker':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _BrokerState.__new__(_BrokerState)\n\n __props__.__dict__[\"apply_immediately\"] = apply_immediately\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"authentication_strategy\"] = authentication_strategy\n __props__.__dict__[\"auto_minor_version_upgrade\"] = auto_minor_version_upgrade\n __props__.__dict__[\"broker_name\"] = broker_name\n __props__.__dict__[\"configuration\"] = configuration\n __props__.__dict__[\"deployment_mode\"] = deployment_mode\n __props__.__dict__[\"encryption_options\"] = encryption_options\n __props__.__dict__[\"engine_type\"] = engine_type\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"host_instance_type\"] = host_instance_type\n __props__.__dict__[\"instances\"] = instances\n __props__.__dict__[\"ldap_server_metadata\"] = ldap_server_metadata\n __props__.__dict__[\"logs\"] = logs\n __props__.__dict__[\"maintenance_window_start_time\"] = maintenance_window_start_time\n __props__.__dict__[\"publicly_accessible\"] = publicly_accessible\n __props__.__dict__[\"security_groups\"] = security_groups\n __props__.__dict__[\"storage_type\"] = storage_type\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"users\"] = users\n return Broker(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Release':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ReleaseArgs.__new__(ReleaseArgs)\n\n __props__.__dict__[\"allow_null_values\"] = None\n __props__.__dict__[\"atomic\"] = None\n __props__.__dict__[\"chart\"] = None\n __props__.__dict__[\"cleanup_on_fail\"] = None\n __props__.__dict__[\"create_namespace\"] = None\n __props__.__dict__[\"dependency_update\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"devel\"] = None\n __props__.__dict__[\"disable_crd_hooks\"] = None\n __props__.__dict__[\"disable_openapi_validation\"] = None\n __props__.__dict__[\"disable_webhooks\"] = None\n __props__.__dict__[\"force_update\"] = None\n __props__.__dict__[\"keyring\"] = None\n __props__.__dict__[\"lint\"] = None\n __props__.__dict__[\"manifest\"] = None\n __props__.__dict__[\"max_history\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"namespace\"] = None\n __props__.__dict__[\"postrender\"] = None\n __props__.__dict__[\"recreate_pods\"] = None\n __props__.__dict__[\"render_subchart_notes\"] = None\n __props__.__dict__[\"replace\"] = None\n __props__.__dict__[\"repository_opts\"] = None\n __props__.__dict__[\"reset_values\"] = None\n __props__.__dict__[\"resource_names\"] = None\n __props__.__dict__[\"reuse_values\"] = None\n __props__.__dict__[\"skip_await\"] = None\n __props__.__dict__[\"skip_crds\"] = None\n __props__.__dict__[\"status\"] = None\n __props__.__dict__[\"timeout\"] = None\n __props__.__dict__[\"value_yaml_files\"] = None\n __props__.__dict__[\"values\"] = None\n __props__.__dict__[\"verify\"] = None\n __props__.__dict__[\"version\"] = None\n __props__.__dict__[\"wait_for_jobs\"] = None\n return Release(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Machine':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = MachineArgs.__new__(MachineArgs)\n\n __props__.__dict__[\"ad_fqdn\"] = None\n __props__.__dict__[\"agent_configuration\"] = None\n __props__.__dict__[\"agent_upgrade\"] = None\n __props__.__dict__[\"agent_version\"] = None\n __props__.__dict__[\"client_public_key\"] = None\n __props__.__dict__[\"cloud_metadata\"] = None\n __props__.__dict__[\"detected_properties\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"dns_fqdn\"] = None\n __props__.__dict__[\"domain_name\"] = None\n __props__.__dict__[\"error_details\"] = None\n __props__.__dict__[\"extensions\"] = None\n __props__.__dict__[\"identity\"] = None\n __props__.__dict__[\"last_status_change\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"location_data\"] = None\n __props__.__dict__[\"machine_fqdn\"] = None\n __props__.__dict__[\"mssql_discovered\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"os_name\"] = None\n __props__.__dict__[\"os_profile\"] = None\n __props__.__dict__[\"os_sku\"] = None\n __props__.__dict__[\"os_type\"] = None\n __props__.__dict__[\"os_version\"] = None\n __props__.__dict__[\"parent_cluster_resource_id\"] = None\n __props__.__dict__[\"private_link_scope_resource_id\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"resources\"] = None\n __props__.__dict__[\"service_statuses\"] = None\n __props__.__dict__[\"status\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"vm_id\"] = None\n __props__.__dict__[\"vm_uuid\"] = None\n return Machine(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(exploration_id, state_id, strict=True):\n # TODO(sll): Generalize this to handle multiple state_ids at a time.\n state_memcache_key = _get_state_memcache_key(exploration_id, state_id)\n memcached_state = memcache_services.get_multi(\n [state_memcache_key]).get(state_memcache_key)\n\n if memcached_state is not None:\n return memcached_state\n else:\n state_model = exp_models.StateModel.get(\n exploration_id, state_id, strict=strict)\n if state_model:\n state = exp_domain.State.from_dict(state_id, state_model.value)\n memcache_services.set_multi({state_memcache_key: state})\n return state\n else:\n return None", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n comparison: Optional[pulumi.Input[str]] = None,\n created_at: Optional[pulumi.Input[int]] = None,\n critical: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionCriticalArgs']]] = None,\n description: Optional[pulumi.Input[str]] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n entity_guid: Optional[pulumi.Input[str]] = None,\n event: Optional[pulumi.Input[str]] = None,\n integration_provider: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n policy_id: Optional[pulumi.Input[int]] = None,\n process_where: Optional[pulumi.Input[str]] = None,\n runbook_url: Optional[pulumi.Input[str]] = None,\n select: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n updated_at: Optional[pulumi.Input[int]] = None,\n violation_close_timer: Optional[pulumi.Input[int]] = None,\n warning: Optional[pulumi.Input[pulumi.InputType['InfraAlertConditionWarningArgs']]] = None,\n where: Optional[pulumi.Input[str]] = None) -> 'InfraAlertCondition':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InfraAlertConditionState.__new__(_InfraAlertConditionState)\n\n __props__.__dict__[\"comparison\"] = comparison\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"critical\"] = critical\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"entity_guid\"] = entity_guid\n __props__.__dict__[\"event\"] = event\n __props__.__dict__[\"integration_provider\"] = integration_provider\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"policy_id\"] = policy_id\n __props__.__dict__[\"process_where\"] = process_where\n __props__.__dict__[\"runbook_url\"] = runbook_url\n __props__.__dict__[\"select\"] = select\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"updated_at\"] = updated_at\n __props__.__dict__[\"violation_close_timer\"] = violation_close_timer\n __props__.__dict__[\"warning\"] = warning\n __props__.__dict__[\"where\"] = where\n return InfraAlertCondition(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_id: Optional[pulumi.Input[str]] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None) -> 'AccessConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AccessConfigurationState.__new__(_AccessConfigurationState)\n\n __props__.__dict__[\"access_configuration_id\"] = access_configuration_id\n __props__.__dict__[\"access_configuration_name\"] = access_configuration_name\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"directory_id\"] = directory_id\n __props__.__dict__[\"force_remove_permission_policies\"] = force_remove_permission_policies\n __props__.__dict__[\"permission_policies\"] = permission_policies\n __props__.__dict__[\"relay_state\"] = relay_state\n __props__.__dict__[\"session_duration\"] = session_duration\n return AccessConfiguration(resource_name, opts=opts, __props__=__props__)", "def get(resource_name, id, opts=None, arn=None, block_device_mappings=None, capacity_reservation_specification=None, credit_specification=None, default_version=None, description=None, disable_api_termination=None, ebs_optimized=None, elastic_gpu_specifications=None, elastic_inference_accelerator=None, iam_instance_profile=None, image_id=None, instance_initiated_shutdown_behavior=None, instance_market_options=None, instance_type=None, kernel_id=None, key_name=None, latest_version=None, license_specifications=None, monitoring=None, name=None, name_prefix=None, network_interfaces=None, placement=None, ram_disk_id=None, security_group_names=None, tag_specifications=None, tags=None, user_data=None, vpc_security_group_ids=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"arn\"] = arn\n __props__[\"block_device_mappings\"] = block_device_mappings\n __props__[\"capacity_reservation_specification\"] = capacity_reservation_specification\n __props__[\"credit_specification\"] = credit_specification\n __props__[\"default_version\"] = default_version\n __props__[\"description\"] = description\n __props__[\"disable_api_termination\"] = disable_api_termination\n __props__[\"ebs_optimized\"] = ebs_optimized\n __props__[\"elastic_gpu_specifications\"] = elastic_gpu_specifications\n __props__[\"elastic_inference_accelerator\"] = elastic_inference_accelerator\n __props__[\"iam_instance_profile\"] = iam_instance_profile\n __props__[\"image_id\"] = image_id\n __props__[\"instance_initiated_shutdown_behavior\"] = instance_initiated_shutdown_behavior\n __props__[\"instance_market_options\"] = instance_market_options\n __props__[\"instance_type\"] = instance_type\n __props__[\"kernel_id\"] = kernel_id\n __props__[\"key_name\"] = key_name\n __props__[\"latest_version\"] = latest_version\n __props__[\"license_specifications\"] = license_specifications\n __props__[\"monitoring\"] = monitoring\n __props__[\"name\"] = name\n __props__[\"name_prefix\"] = name_prefix\n __props__[\"network_interfaces\"] = network_interfaces\n __props__[\"placement\"] = placement\n __props__[\"ram_disk_id\"] = ram_disk_id\n __props__[\"security_group_names\"] = security_group_names\n __props__[\"tag_specifications\"] = tag_specifications\n __props__[\"tags\"] = tags\n __props__[\"user_data\"] = user_data\n __props__[\"vpc_security_group_ids\"] = vpc_security_group_ids\n return LaunchTemplate(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"create_sample_data\"] = create_sample_data\n __props__.__dict__[\"db_instance_category\"] = db_instance_category\n __props__.__dict__[\"db_instance_class\"] = db_instance_class\n __props__.__dict__[\"db_instance_mode\"] = db_instance_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"encryption_key\"] = encryption_key\n __props__.__dict__[\"encryption_type\"] = encryption_type\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_group_count\"] = instance_group_count\n __props__.__dict__[\"instance_network_type\"] = instance_network_type\n __props__.__dict__[\"instance_spec\"] = instance_spec\n __props__.__dict__[\"ip_whitelists\"] = ip_whitelists\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"master_node_num\"] = master_node_num\n __props__.__dict__[\"payment_type\"] = payment_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"security_ip_lists\"] = security_ip_lists\n __props__.__dict__[\"seg_node_num\"] = seg_node_num\n __props__.__dict__[\"seg_storage_type\"] = seg_storage_type\n __props__.__dict__[\"ssl_enabled\"] = ssl_enabled\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"storage_size\"] = storage_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"used_time\"] = used_time\n __props__.__dict__[\"vector_configuration_status\"] = vector_configuration_status\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n git_remote_settings: Optional[pulumi.Input[pulumi.InputType['RepositoryGitRemoteSettingsArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n workspace_compilation_overrides: Optional[pulumi.Input[pulumi.InputType['RepositoryWorkspaceCompilationOverridesArgs']]] = None) -> 'Repository':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RepositoryState.__new__(_RepositoryState)\n\n __props__.__dict__[\"git_remote_settings\"] = git_remote_settings\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"workspace_compilation_overrides\"] = workspace_compilation_overrides\n return Repository(resource_name, opts=opts, __props__=__props__)", "def get_resource_by_name(self, name, **kwargs):\n instance = self.manager.get_by_name(name, **kwargs)\n return instance", "def _get(isamAppliance, id):\n return isamAppliance.invoke_get(\"Retrieve a specific STS chain\", \"{0}/{1}\".format(uri, id),\n requires_modules=requires_modules,\n requires_version=requires_version)", "def get_state_by_id(states: [State], state_id: str, id_type: str) -> State:\n if id_type == 'new':\n for state in states:\n if state.new_id == state_id:\n return state\n if id_type == 'old':\n for state in states:\n if state.id == state_id:\n return state\n return states[0]", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n api_management_id: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None) -> 'Tag':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _TagState.__new__(_TagState)\n\n __props__.__dict__[\"api_management_id\"] = api_management_id\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"name\"] = name\n return Tag(resource_name, opts=opts, __props__=__props__)", "def view_state_id(state_id):\n states_obj = storage.all(\"State\")\n if request.method == 'GET':\n for state in states_obj.values():\n if state.id == state_id:\n id_found = state.to_dict()\n return jsonify(id_found)\n abort(404)\n\n if request.method == 'DELETE':\n for state in states_obj.values():\n if state.id == state_id:\n storage.delete(state)\n storage.save()\n return make_response(jsonify({}), 200)\n abort(404)\n\n if request.method == 'PUT':\n key = \"State.\" + state_id\n states = storage.all(\"State\")\n instance = states.get(key)\n if instance is None:\n abort(404)\n else:\n if not request.json:\n abort(400, \"Not a JSON\")\n req_var = request.get_json()\n for key, value in req_var.items():\n setattr(instance, key, value)\n storage.save()\n return make_response(jsonify(instance.to_dict()), 200)", "def get(resource_name, id, opts=None, arn=None, artifact_store=None, name=None, role_arn=None, stages=None, tags=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"arn\"] = arn\n __props__[\"artifact_store\"] = artifact_store\n __props__[\"name\"] = name\n __props__[\"role_arn\"] = role_arn\n __props__[\"stages\"] = stages\n __props__[\"tags\"] = tags\n return Pipeline(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EnvironmentArgs.__new__(EnvironmentArgs)\n\n __props__.__dict__[\"arm_template_display_name\"] = None\n __props__.__dict__[\"created_by_user\"] = None\n __props__.__dict__[\"deployment_properties\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"resource_group_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"unique_identifier\"] = None\n return Environment(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n minimum_engine_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Acl':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AclState.__new__(_AclState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"minimum_engine_version\"] = minimum_engine_version\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"name_prefix\"] = name_prefix\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_names\"] = user_names\n return Acl(resource_name, opts=opts, __props__=__props__)", "def get_state(self, run_id):\n raise NotImplementedError()", "def __call__(self, id_, requisite=\"require\"):\n # return the correct data structure for the requisite\n return StateRequisite(requisite, self.module, id_)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n accessibility_error_redirect_url: Optional[pulumi.Input[str]] = None,\n accessibility_login_redirect_url: Optional[pulumi.Input[str]] = None,\n accessibility_self_service: Optional[pulumi.Input[bool]] = None,\n admin_note: Optional[pulumi.Input[str]] = None,\n app_links_json: Optional[pulumi.Input[str]] = None,\n app_settings_json: Optional[pulumi.Input[str]] = None,\n authentication_policy: Optional[pulumi.Input[str]] = None,\n auto_key_rotation: Optional[pulumi.Input[bool]] = None,\n auto_submit_toolbar: Optional[pulumi.Input[bool]] = None,\n client_basic_secret: Optional[pulumi.Input[str]] = None,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n client_uri: Optional[pulumi.Input[str]] = None,\n consent_method: Optional[pulumi.Input[str]] = None,\n enduser_note: Optional[pulumi.Input[str]] = None,\n grant_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n groups_claim: Optional[pulumi.Input[pulumi.InputType['OAuthGroupsClaimArgs']]] = None,\n hide_ios: Optional[pulumi.Input[bool]] = None,\n hide_web: Optional[pulumi.Input[bool]] = None,\n implicit_assignment: Optional[pulumi.Input[bool]] = None,\n issuer_mode: Optional[pulumi.Input[str]] = None,\n jwks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OAuthJwkArgs']]]]] = None,\n jwks_uri: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n login_mode: Optional[pulumi.Input[str]] = None,\n login_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n login_uri: Optional[pulumi.Input[str]] = None,\n logo: Optional[pulumi.Input[str]] = None,\n logo_uri: Optional[pulumi.Input[str]] = None,\n logo_url: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n omit_secret: Optional[pulumi.Input[bool]] = None,\n pkce_required: Optional[pulumi.Input[bool]] = None,\n policy_uri: Optional[pulumi.Input[str]] = None,\n post_logout_redirect_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n profile: Optional[pulumi.Input[str]] = None,\n redirect_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n refresh_token_leeway: Optional[pulumi.Input[int]] = None,\n refresh_token_rotation: Optional[pulumi.Input[str]] = None,\n response_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n sign_on_mode: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n token_endpoint_auth_method: Optional[pulumi.Input[str]] = None,\n tos_uri: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n user_name_template: Optional[pulumi.Input[str]] = None,\n user_name_template_push_status: Optional[pulumi.Input[str]] = None,\n user_name_template_suffix: Optional[pulumi.Input[str]] = None,\n user_name_template_type: Optional[pulumi.Input[str]] = None,\n wildcard_redirect: Optional[pulumi.Input[str]] = None) -> 'OAuth':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _OAuthState.__new__(_OAuthState)\n\n __props__.__dict__[\"accessibility_error_redirect_url\"] = accessibility_error_redirect_url\n __props__.__dict__[\"accessibility_login_redirect_url\"] = accessibility_login_redirect_url\n __props__.__dict__[\"accessibility_self_service\"] = accessibility_self_service\n __props__.__dict__[\"admin_note\"] = admin_note\n __props__.__dict__[\"app_links_json\"] = app_links_json\n __props__.__dict__[\"app_settings_json\"] = app_settings_json\n __props__.__dict__[\"authentication_policy\"] = authentication_policy\n __props__.__dict__[\"auto_key_rotation\"] = auto_key_rotation\n __props__.__dict__[\"auto_submit_toolbar\"] = auto_submit_toolbar\n __props__.__dict__[\"client_basic_secret\"] = client_basic_secret\n __props__.__dict__[\"client_id\"] = client_id\n __props__.__dict__[\"client_secret\"] = client_secret\n __props__.__dict__[\"client_uri\"] = client_uri\n __props__.__dict__[\"consent_method\"] = consent_method\n __props__.__dict__[\"enduser_note\"] = enduser_note\n __props__.__dict__[\"grant_types\"] = grant_types\n __props__.__dict__[\"groups_claim\"] = groups_claim\n __props__.__dict__[\"hide_ios\"] = hide_ios\n __props__.__dict__[\"hide_web\"] = hide_web\n __props__.__dict__[\"implicit_assignment\"] = implicit_assignment\n __props__.__dict__[\"issuer_mode\"] = issuer_mode\n __props__.__dict__[\"jwks\"] = jwks\n __props__.__dict__[\"jwks_uri\"] = jwks_uri\n __props__.__dict__[\"label\"] = label\n __props__.__dict__[\"login_mode\"] = login_mode\n __props__.__dict__[\"login_scopes\"] = login_scopes\n __props__.__dict__[\"login_uri\"] = login_uri\n __props__.__dict__[\"logo\"] = logo\n __props__.__dict__[\"logo_uri\"] = logo_uri\n __props__.__dict__[\"logo_url\"] = logo_url\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"omit_secret\"] = omit_secret\n __props__.__dict__[\"pkce_required\"] = pkce_required\n __props__.__dict__[\"policy_uri\"] = policy_uri\n __props__.__dict__[\"post_logout_redirect_uris\"] = post_logout_redirect_uris\n __props__.__dict__[\"profile\"] = profile\n __props__.__dict__[\"redirect_uris\"] = redirect_uris\n __props__.__dict__[\"refresh_token_leeway\"] = refresh_token_leeway\n __props__.__dict__[\"refresh_token_rotation\"] = refresh_token_rotation\n __props__.__dict__[\"response_types\"] = response_types\n __props__.__dict__[\"sign_on_mode\"] = sign_on_mode\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"token_endpoint_auth_method\"] = token_endpoint_auth_method\n __props__.__dict__[\"tos_uri\"] = tos_uri\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"user_name_template\"] = user_name_template\n __props__.__dict__[\"user_name_template_push_status\"] = user_name_template_push_status\n __props__.__dict__[\"user_name_template_suffix\"] = user_name_template_suffix\n __props__.__dict__[\"user_name_template_type\"] = user_name_template_type\n __props__.__dict__[\"wildcard_redirect\"] = wildcard_redirect\n return OAuth(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n kms_key: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n schedule_config: Optional[pulumi.Input[pulumi.InputType['DataIntegrationScheduleConfigArgs']]] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'DataIntegration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _DataIntegrationState.__new__(_DataIntegrationState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"kms_key\"] = kms_key\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"schedule_config\"] = schedule_config\n __props__.__dict__[\"source_uri\"] = source_uri\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return DataIntegration(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n accessor: Optional[pulumi.Input[str]] = None,\n binddn: Optional[pulumi.Input[str]] = None,\n bindpass: Optional[pulumi.Input[str]] = None,\n case_sensitive_names: Optional[pulumi.Input[bool]] = None,\n certificate: Optional[pulumi.Input[str]] = None,\n client_tls_cert: Optional[pulumi.Input[str]] = None,\n client_tls_key: Optional[pulumi.Input[str]] = None,\n deny_null_bind: Optional[pulumi.Input[bool]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_remount: Optional[pulumi.Input[bool]] = None,\n discoverdn: Optional[pulumi.Input[bool]] = None,\n groupattr: Optional[pulumi.Input[str]] = None,\n groupdn: Optional[pulumi.Input[str]] = None,\n groupfilter: Optional[pulumi.Input[str]] = None,\n insecure_tls: Optional[pulumi.Input[bool]] = None,\n local: Optional[pulumi.Input[bool]] = None,\n max_page_size: Optional[pulumi.Input[int]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n path: Optional[pulumi.Input[str]] = None,\n starttls: Optional[pulumi.Input[bool]] = None,\n tls_max_version: Optional[pulumi.Input[str]] = None,\n tls_min_version: Optional[pulumi.Input[str]] = None,\n token_bound_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n token_explicit_max_ttl: Optional[pulumi.Input[int]] = None,\n token_max_ttl: Optional[pulumi.Input[int]] = None,\n token_no_default_policy: Optional[pulumi.Input[bool]] = None,\n token_num_uses: Optional[pulumi.Input[int]] = None,\n token_period: Optional[pulumi.Input[int]] = None,\n token_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n token_ttl: Optional[pulumi.Input[int]] = None,\n token_type: Optional[pulumi.Input[str]] = None,\n upndomain: Optional[pulumi.Input[str]] = None,\n url: Optional[pulumi.Input[str]] = None,\n use_token_groups: Optional[pulumi.Input[bool]] = None,\n userattr: Optional[pulumi.Input[str]] = None,\n userdn: Optional[pulumi.Input[str]] = None,\n userfilter: Optional[pulumi.Input[str]] = None,\n username_as_alias: Optional[pulumi.Input[bool]] = None) -> 'AuthBackend':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AuthBackendState.__new__(_AuthBackendState)\n\n __props__.__dict__[\"accessor\"] = accessor\n __props__.__dict__[\"binddn\"] = binddn\n __props__.__dict__[\"bindpass\"] = bindpass\n __props__.__dict__[\"case_sensitive_names\"] = case_sensitive_names\n __props__.__dict__[\"certificate\"] = certificate\n __props__.__dict__[\"client_tls_cert\"] = client_tls_cert\n __props__.__dict__[\"client_tls_key\"] = client_tls_key\n __props__.__dict__[\"deny_null_bind\"] = deny_null_bind\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"disable_remount\"] = disable_remount\n __props__.__dict__[\"discoverdn\"] = discoverdn\n __props__.__dict__[\"groupattr\"] = groupattr\n __props__.__dict__[\"groupdn\"] = groupdn\n __props__.__dict__[\"groupfilter\"] = groupfilter\n __props__.__dict__[\"insecure_tls\"] = insecure_tls\n __props__.__dict__[\"local\"] = local\n __props__.__dict__[\"max_page_size\"] = max_page_size\n __props__.__dict__[\"namespace\"] = namespace\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"starttls\"] = starttls\n __props__.__dict__[\"tls_max_version\"] = tls_max_version\n __props__.__dict__[\"tls_min_version\"] = tls_min_version\n __props__.__dict__[\"token_bound_cidrs\"] = token_bound_cidrs\n __props__.__dict__[\"token_explicit_max_ttl\"] = token_explicit_max_ttl\n __props__.__dict__[\"token_max_ttl\"] = token_max_ttl\n __props__.__dict__[\"token_no_default_policy\"] = token_no_default_policy\n __props__.__dict__[\"token_num_uses\"] = token_num_uses\n __props__.__dict__[\"token_period\"] = token_period\n __props__.__dict__[\"token_policies\"] = token_policies\n __props__.__dict__[\"token_ttl\"] = token_ttl\n __props__.__dict__[\"token_type\"] = token_type\n __props__.__dict__[\"upndomain\"] = upndomain\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"use_token_groups\"] = use_token_groups\n __props__.__dict__[\"userattr\"] = userattr\n __props__.__dict__[\"userdn\"] = userdn\n __props__.__dict__[\"userfilter\"] = userfilter\n __props__.__dict__[\"username_as_alias\"] = username_as_alias\n return AuthBackend(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,\n base_instance_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distribution_policy_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n fingerprint: Optional[pulumi.Input[str]] = None,\n instance_group: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n named_ports: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n stateful_disks: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,\n target_pools: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n target_size: Optional[pulumi.Input[float]] = None,\n update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,\n versions: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,\n wait_for_instances: Optional[pulumi.Input[bool]] = None) -> 'RegionInstanceGroupManager':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"auto_healing_policies\"] = auto_healing_policies\n __props__[\"base_instance_name\"] = base_instance_name\n __props__[\"description\"] = description\n __props__[\"distribution_policy_zones\"] = distribution_policy_zones\n __props__[\"fingerprint\"] = fingerprint\n __props__[\"instance_group\"] = instance_group\n __props__[\"name\"] = name\n __props__[\"named_ports\"] = named_ports\n __props__[\"project\"] = project\n __props__[\"region\"] = region\n __props__[\"self_link\"] = self_link\n __props__[\"stateful_disks\"] = stateful_disks\n __props__[\"target_pools\"] = target_pools\n __props__[\"target_size\"] = target_size\n __props__[\"update_policy\"] = update_policy\n __props__[\"versions\"] = versions\n __props__[\"wait_for_instances\"] = wait_for_instances\n return RegionInstanceGroupManager(resource_name, opts=opts, __props__=__props__)", "def fetch_by_id(self, trigger_id):\n result = self._client.get(self._full_path(trigger_id + '/state'))\n if 'state' in result:\n trigger = self._client.get(self._full_path(trigger_id))\n return Trigger(self._client, **trigger)\n elif not 'trigger_id' in result:\n raise ResponseStructureError(\"invalid api response\", result)", "def get_experiment_state_v1(self, skill_id, experiment_id, **kwargs):\n # type: (str, str, **Any) -> Union[ApiResponse, object, GetExperimentStateResponse_5152b250, StandardizedError_f5106a89, BadRequestError_f854b05]\n operation_name = \"get_experiment_state_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'skill_id' is set\n if ('skill_id' not in params) or (params['skill_id'] is None):\n raise ValueError(\n \"Missing the required parameter `skill_id` when calling `\" + operation_name + \"`\")\n # verify the required parameter 'experiment_id' is set\n if ('experiment_id' not in params) or (params['experiment_id'] is None):\n raise ValueError(\n \"Missing the required parameter `experiment_id` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/skills/{skillId}/experiments/{experimentId}/state'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'skill_id' in params:\n path_params['skillId'] = params['skill_id']\n if 'experiment_id' in params:\n path_params['experimentId'] = params['experiment_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.experiment.get_experiment_state_response.GetExperimentStateResponse\", status_code=200, message=\"Returned skill experiment state.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=429, message=\"Exceeds the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"GET\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=\"ask_smapi_model.v1.skill.experiment.get_experiment_state_response.GetExperimentStateResponse\")\n\n if full_response:\n return api_response\n return api_response.body", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ResolverConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ResolverConfigArgs.__new__(ResolverConfigArgs)\n\n __props__.__dict__[\"autodefined_reverse\"] = None\n __props__.__dict__[\"autodefined_reverse_flag\"] = None\n __props__.__dict__[\"owner_id\"] = None\n __props__.__dict__[\"resource_id\"] = None\n return ResolverConfig(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n core_network_arn: Optional[pulumi.Input[str]] = None,\n core_network_attachment_arn: Optional[pulumi.Input[str]] = None,\n customer_gateway_configuration: Optional[pulumi.Input[str]] = None,\n customer_gateway_id: Optional[pulumi.Input[str]] = None,\n enable_acceleration: Optional[pulumi.Input[bool]] = None,\n local_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n local_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n outside_ip_address_type: Optional[pulumi.Input[str]] = None,\n remote_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n remote_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnConnectionRouteArgs']]]]] = None,\n static_routes_only: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,\n transit_gateway_id: Optional[pulumi.Input[str]] = None,\n transport_transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,\n tunnel1_address: Optional[pulumi.Input[str]] = None,\n tunnel1_bgp_asn: Optional[pulumi.Input[str]] = None,\n tunnel1_bgp_holdtime: Optional[pulumi.Input[int]] = None,\n tunnel1_cgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel1_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_log_options: Optional[pulumi.Input[pulumi.InputType['VpnConnectionTunnel1LogOptionsArgs']]] = None,\n tunnel1_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel1_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel1_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel1_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel1_vgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel2_address: Optional[pulumi.Input[str]] = None,\n tunnel2_bgp_asn: Optional[pulumi.Input[str]] = None,\n tunnel2_bgp_holdtime: Optional[pulumi.Input[int]] = None,\n tunnel2_cgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel2_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_log_options: Optional[pulumi.Input[pulumi.InputType['VpnConnectionTunnel2LogOptionsArgs']]] = None,\n tunnel2_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel2_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel2_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel2_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel2_vgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel_inside_ip_version: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n vgw_telemetries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnConnectionVgwTelemetryArgs']]]]] = None,\n vpn_gateway_id: Optional[pulumi.Input[str]] = None) -> 'VpnConnection':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VpnConnectionState.__new__(_VpnConnectionState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"core_network_arn\"] = core_network_arn\n __props__.__dict__[\"core_network_attachment_arn\"] = core_network_attachment_arn\n __props__.__dict__[\"customer_gateway_configuration\"] = customer_gateway_configuration\n __props__.__dict__[\"customer_gateway_id\"] = customer_gateway_id\n __props__.__dict__[\"enable_acceleration\"] = enable_acceleration\n __props__.__dict__[\"local_ipv4_network_cidr\"] = local_ipv4_network_cidr\n __props__.__dict__[\"local_ipv6_network_cidr\"] = local_ipv6_network_cidr\n __props__.__dict__[\"outside_ip_address_type\"] = outside_ip_address_type\n __props__.__dict__[\"remote_ipv4_network_cidr\"] = remote_ipv4_network_cidr\n __props__.__dict__[\"remote_ipv6_network_cidr\"] = remote_ipv6_network_cidr\n __props__.__dict__[\"routes\"] = routes\n __props__.__dict__[\"static_routes_only\"] = static_routes_only\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"transit_gateway_attachment_id\"] = transit_gateway_attachment_id\n __props__.__dict__[\"transit_gateway_id\"] = transit_gateway_id\n __props__.__dict__[\"transport_transit_gateway_attachment_id\"] = transport_transit_gateway_attachment_id\n __props__.__dict__[\"tunnel1_address\"] = tunnel1_address\n __props__.__dict__[\"tunnel1_bgp_asn\"] = tunnel1_bgp_asn\n __props__.__dict__[\"tunnel1_bgp_holdtime\"] = tunnel1_bgp_holdtime\n __props__.__dict__[\"tunnel1_cgw_inside_address\"] = tunnel1_cgw_inside_address\n __props__.__dict__[\"tunnel1_dpd_timeout_action\"] = tunnel1_dpd_timeout_action\n __props__.__dict__[\"tunnel1_dpd_timeout_seconds\"] = tunnel1_dpd_timeout_seconds\n __props__.__dict__[\"tunnel1_enable_tunnel_lifecycle_control\"] = tunnel1_enable_tunnel_lifecycle_control\n __props__.__dict__[\"tunnel1_ike_versions\"] = tunnel1_ike_versions\n __props__.__dict__[\"tunnel1_inside_cidr\"] = tunnel1_inside_cidr\n __props__.__dict__[\"tunnel1_inside_ipv6_cidr\"] = tunnel1_inside_ipv6_cidr\n __props__.__dict__[\"tunnel1_log_options\"] = tunnel1_log_options\n __props__.__dict__[\"tunnel1_phase1_dh_group_numbers\"] = tunnel1_phase1_dh_group_numbers\n __props__.__dict__[\"tunnel1_phase1_encryption_algorithms\"] = tunnel1_phase1_encryption_algorithms\n __props__.__dict__[\"tunnel1_phase1_integrity_algorithms\"] = tunnel1_phase1_integrity_algorithms\n __props__.__dict__[\"tunnel1_phase1_lifetime_seconds\"] = tunnel1_phase1_lifetime_seconds\n __props__.__dict__[\"tunnel1_phase2_dh_group_numbers\"] = tunnel1_phase2_dh_group_numbers\n __props__.__dict__[\"tunnel1_phase2_encryption_algorithms\"] = tunnel1_phase2_encryption_algorithms\n __props__.__dict__[\"tunnel1_phase2_integrity_algorithms\"] = tunnel1_phase2_integrity_algorithms\n __props__.__dict__[\"tunnel1_phase2_lifetime_seconds\"] = tunnel1_phase2_lifetime_seconds\n __props__.__dict__[\"tunnel1_preshared_key\"] = tunnel1_preshared_key\n __props__.__dict__[\"tunnel1_rekey_fuzz_percentage\"] = tunnel1_rekey_fuzz_percentage\n __props__.__dict__[\"tunnel1_rekey_margin_time_seconds\"] = tunnel1_rekey_margin_time_seconds\n __props__.__dict__[\"tunnel1_replay_window_size\"] = tunnel1_replay_window_size\n __props__.__dict__[\"tunnel1_startup_action\"] = tunnel1_startup_action\n __props__.__dict__[\"tunnel1_vgw_inside_address\"] = tunnel1_vgw_inside_address\n __props__.__dict__[\"tunnel2_address\"] = tunnel2_address\n __props__.__dict__[\"tunnel2_bgp_asn\"] = tunnel2_bgp_asn\n __props__.__dict__[\"tunnel2_bgp_holdtime\"] = tunnel2_bgp_holdtime\n __props__.__dict__[\"tunnel2_cgw_inside_address\"] = tunnel2_cgw_inside_address\n __props__.__dict__[\"tunnel2_dpd_timeout_action\"] = tunnel2_dpd_timeout_action\n __props__.__dict__[\"tunnel2_dpd_timeout_seconds\"] = tunnel2_dpd_timeout_seconds\n __props__.__dict__[\"tunnel2_enable_tunnel_lifecycle_control\"] = tunnel2_enable_tunnel_lifecycle_control\n __props__.__dict__[\"tunnel2_ike_versions\"] = tunnel2_ike_versions\n __props__.__dict__[\"tunnel2_inside_cidr\"] = tunnel2_inside_cidr\n __props__.__dict__[\"tunnel2_inside_ipv6_cidr\"] = tunnel2_inside_ipv6_cidr\n __props__.__dict__[\"tunnel2_log_options\"] = tunnel2_log_options\n __props__.__dict__[\"tunnel2_phase1_dh_group_numbers\"] = tunnel2_phase1_dh_group_numbers\n __props__.__dict__[\"tunnel2_phase1_encryption_algorithms\"] = tunnel2_phase1_encryption_algorithms\n __props__.__dict__[\"tunnel2_phase1_integrity_algorithms\"] = tunnel2_phase1_integrity_algorithms\n __props__.__dict__[\"tunnel2_phase1_lifetime_seconds\"] = tunnel2_phase1_lifetime_seconds\n __props__.__dict__[\"tunnel2_phase2_dh_group_numbers\"] = tunnel2_phase2_dh_group_numbers\n __props__.__dict__[\"tunnel2_phase2_encryption_algorithms\"] = tunnel2_phase2_encryption_algorithms\n __props__.__dict__[\"tunnel2_phase2_integrity_algorithms\"] = tunnel2_phase2_integrity_algorithms\n __props__.__dict__[\"tunnel2_phase2_lifetime_seconds\"] = tunnel2_phase2_lifetime_seconds\n __props__.__dict__[\"tunnel2_preshared_key\"] = tunnel2_preshared_key\n __props__.__dict__[\"tunnel2_rekey_fuzz_percentage\"] = tunnel2_rekey_fuzz_percentage\n __props__.__dict__[\"tunnel2_rekey_margin_time_seconds\"] = tunnel2_rekey_margin_time_seconds\n __props__.__dict__[\"tunnel2_replay_window_size\"] = tunnel2_replay_window_size\n __props__.__dict__[\"tunnel2_startup_action\"] = tunnel2_startup_action\n __props__.__dict__[\"tunnel2_vgw_inside_address\"] = tunnel2_vgw_inside_address\n __props__.__dict__[\"tunnel_inside_ip_version\"] = tunnel_inside_ip_version\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"vgw_telemetries\"] = vgw_telemetries\n __props__.__dict__[\"vpn_gateway_id\"] = vpn_gateway_id\n return VpnConnection(resource_name, opts=opts, __props__=__props__)", "def getstate(self,name):\n state = self.states[name]\n debug('kfnode.getstate ',(name,state))\n return state", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n call_recovery: Optional[pulumi.Input[str]] = None,\n default_auth_provider: Optional[pulumi.Input[str]] = None,\n default_included_group_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n email_recovery: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n password_auto_unlock_minutes: Optional[pulumi.Input[int]] = None,\n password_dictionary_lookup: Optional[pulumi.Input[bool]] = None,\n password_exclude_first_name: Optional[pulumi.Input[bool]] = None,\n password_exclude_last_name: Optional[pulumi.Input[bool]] = None,\n password_exclude_username: Optional[pulumi.Input[bool]] = None,\n password_expire_warn_days: Optional[pulumi.Input[int]] = None,\n password_history_count: Optional[pulumi.Input[int]] = None,\n password_lockout_notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n password_max_age_days: Optional[pulumi.Input[int]] = None,\n password_max_lockout_attempts: Optional[pulumi.Input[int]] = None,\n password_min_age_minutes: Optional[pulumi.Input[int]] = None,\n password_min_length: Optional[pulumi.Input[int]] = None,\n password_min_lowercase: Optional[pulumi.Input[int]] = None,\n password_min_number: Optional[pulumi.Input[int]] = None,\n password_min_symbol: Optional[pulumi.Input[int]] = None,\n password_min_uppercase: Optional[pulumi.Input[int]] = None,\n password_show_lockout_failures: Optional[pulumi.Input[bool]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n question_min_length: Optional[pulumi.Input[int]] = None,\n question_recovery: Optional[pulumi.Input[str]] = None,\n recovery_email_token: Optional[pulumi.Input[int]] = None,\n skip_unlock: Optional[pulumi.Input[bool]] = None,\n sms_recovery: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None) -> 'PolicyPasswordDefault':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _PolicyPasswordDefaultState.__new__(_PolicyPasswordDefaultState)\n\n __props__.__dict__[\"call_recovery\"] = call_recovery\n __props__.__dict__[\"default_auth_provider\"] = default_auth_provider\n __props__.__dict__[\"default_included_group_id\"] = default_included_group_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"email_recovery\"] = email_recovery\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"password_auto_unlock_minutes\"] = password_auto_unlock_minutes\n __props__.__dict__[\"password_dictionary_lookup\"] = password_dictionary_lookup\n __props__.__dict__[\"password_exclude_first_name\"] = password_exclude_first_name\n __props__.__dict__[\"password_exclude_last_name\"] = password_exclude_last_name\n __props__.__dict__[\"password_exclude_username\"] = password_exclude_username\n __props__.__dict__[\"password_expire_warn_days\"] = password_expire_warn_days\n __props__.__dict__[\"password_history_count\"] = password_history_count\n __props__.__dict__[\"password_lockout_notification_channels\"] = password_lockout_notification_channels\n __props__.__dict__[\"password_max_age_days\"] = password_max_age_days\n __props__.__dict__[\"password_max_lockout_attempts\"] = password_max_lockout_attempts\n __props__.__dict__[\"password_min_age_minutes\"] = password_min_age_minutes\n __props__.__dict__[\"password_min_length\"] = password_min_length\n __props__.__dict__[\"password_min_lowercase\"] = password_min_lowercase\n __props__.__dict__[\"password_min_number\"] = password_min_number\n __props__.__dict__[\"password_min_symbol\"] = password_min_symbol\n __props__.__dict__[\"password_min_uppercase\"] = password_min_uppercase\n __props__.__dict__[\"password_show_lockout_failures\"] = password_show_lockout_failures\n __props__.__dict__[\"priority\"] = priority\n __props__.__dict__[\"question_min_length\"] = question_min_length\n __props__.__dict__[\"question_recovery\"] = question_recovery\n __props__.__dict__[\"recovery_email_token\"] = recovery_email_token\n __props__.__dict__[\"skip_unlock\"] = skip_unlock\n __props__.__dict__[\"sms_recovery\"] = sms_recovery\n __props__.__dict__[\"status\"] = status\n return PolicyPasswordDefault(resource_name, opts=opts, __props__=__props__)", "def get(self, state):\n return state[self.primary or self]", "def get_sample_state_by_id():\n ids = [] # type: list\n if demisto.getArg('ids'):\n ids += argToList(demisto.getArg('ids'))\n if demisto.getArg('id'):\n ids.append(demisto.getArg('id'))\n response = get_sample_state_helper(ids)\n md = tableToMarkdown('ThreatGrid - Sample state', response['samples'], ['ID', 'State'])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': response['samples']},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': response['requests']\n })", "def salesforce_get(self, obj_name, obj_id):\n self.builtin.log(f\"Getting {obj_name} with Id {obj_id}\")\n obj_class = getattr(self.cumulusci.sf, obj_name)\n return obj_class.get(obj_id)", "def get_object(self, id, **args):\n return self.request(\"{0}/{1}\".format(self.version, id), args)", "async def get_state(\n self,\n entity_id: str = None,\n attribute: str = None,\n default: Any = None,\n copy: bool = True,\n **kwargs: Optional[Any],\n ) -> Any:\n namespace = self._get_namespace(**kwargs)\n\n return await self.get_entity_api(namespace, entity_id).get_state(attribute, default, copy, **kwargs)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"available_maintenance_versions\"] = None\n __props__.__dict__[\"backend_type\"] = None\n __props__.__dict__[\"connection_name\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"current_disk_size\"] = None\n __props__.__dict__[\"database_installed_version\"] = None\n __props__.__dict__[\"database_version\"] = None\n __props__.__dict__[\"disk_encryption_configuration\"] = None\n __props__.__dict__[\"disk_encryption_status\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"failover_replica\"] = None\n __props__.__dict__[\"gce_zone\"] = None\n __props__.__dict__[\"instance_type\"] = None\n __props__.__dict__[\"ip_addresses\"] = None\n __props__.__dict__[\"ipv6_address\"] = None\n __props__.__dict__[\"kind\"] = None\n __props__.__dict__[\"maintenance_version\"] = None\n __props__.__dict__[\"master_instance_name\"] = None\n __props__.__dict__[\"max_disk_size\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"on_premises_configuration\"] = None\n __props__.__dict__[\"out_of_disk_report\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"region\"] = None\n __props__.__dict__[\"replica_configuration\"] = None\n __props__.__dict__[\"replica_names\"] = None\n __props__.__dict__[\"root_password\"] = None\n __props__.__dict__[\"satisfies_pzs\"] = None\n __props__.__dict__[\"scheduled_maintenance\"] = None\n __props__.__dict__[\"secondary_gce_zone\"] = None\n __props__.__dict__[\"self_link\"] = None\n __props__.__dict__[\"server_ca_cert\"] = None\n __props__.__dict__[\"service_account_email_address\"] = None\n __props__.__dict__[\"settings\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"suspension_reason\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EnvironmentArgs.__new__(EnvironmentArgs)\n\n __props__.__dict__[\"application_name\"] = None\n __props__.__dict__[\"cname_prefix\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"endpoint_url\"] = None\n __props__.__dict__[\"environment_name\"] = None\n __props__.__dict__[\"operations_role\"] = None\n __props__.__dict__[\"option_settings\"] = None\n __props__.__dict__[\"platform_arn\"] = None\n __props__.__dict__[\"solution_stack_name\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"template_name\"] = None\n __props__.__dict__[\"tier\"] = None\n __props__.__dict__[\"version_label\"] = None\n return Environment(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n disabled: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n send_user_invitation: Optional[pulumi.Input[bool]] = None,\n user_invitation_id: Optional[pulumi.Input[str]] = None,\n verified: Optional[pulumi.Input[bool]] = None) -> 'User':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _UserState.__new__(_UserState)\n\n __props__.__dict__[\"disabled\"] = disabled\n __props__.__dict__[\"email\"] = email\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"roles\"] = roles\n __props__.__dict__[\"send_user_invitation\"] = send_user_invitation\n __props__.__dict__[\"user_invitation_id\"] = user_invitation_id\n __props__.__dict__[\"verified\"] = verified\n return User(resource_name, opts=opts, __props__=__props__)", "def state_run(id_, **kwargs):\n run = get_run_object(id_)\n check_run_permission(run, kwargs[\"token_info\"])\n\n stub = get_runs_services_stub()\n state = stub.State(job_pb2.ID(id=id_))\n\n return util.deserialize_protobuf(state)" ]
[ "0.58092475", "0.56211823", "0.5562653", "0.5514351", "0.5514051", "0.5457251", "0.54326993", "0.542786", "0.5396179", "0.5394465", "0.5353406", "0.5345885", "0.5341295", "0.5336269", "0.5336196", "0.5327796", "0.5297418", "0.5287107", "0.5264695", "0.5260708", "0.5249568", "0.5248615", "0.5242768", "0.52264327", "0.52262187", "0.5203133", "0.5172942", "0.51423836", "0.5139257", "0.5128688", "0.5107366", "0.5092462", "0.50906056", "0.507465", "0.5068333", "0.5056124", "0.50497144", "0.5041673", "0.50357217", "0.50344926", "0.50279045", "0.50245905", "0.502219", "0.5019789", "0.50143594", "0.50077575", "0.50067997", "0.500442", "0.49979958", "0.49887508", "0.4983147", "0.49598613", "0.4951938", "0.49514386", "0.49390057", "0.49378294", "0.49323925", "0.4915757", "0.49023426", "0.48988274", "0.4893452", "0.4885686", "0.48852703", "0.48821545", "0.4876236", "0.4857134", "0.48520482", "0.48514852", "0.48462233", "0.48438156", "0.48431027", "0.48287904", "0.482799", "0.48251238", "0.48166618", "0.4813655", "0.47898614", "0.47707945", "0.47658157", "0.4762001", "0.47609642", "0.47541", "0.47520265", "0.4743167", "0.47376853", "0.4736689", "0.472023", "0.47191164", "0.47144344", "0.47129992", "0.47103497", "0.4705832", "0.47039774", "0.47018266", "0.46989116", "0.46987656", "0.4697496", "0.46882978", "0.46873474", "0.4686024" ]
0.659338
0
Name of the lambda function.
def function_name(self) -> pulumi.Output[str]: return pulumi.get(self, "function_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def name(self):\n\t\treturn self._func_name", "def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")", "def get_function_name(self):\n return self.__function", "def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])", "def name(cls):\n return arg.s()(cls.func).func.__name__", "def function_name(cls):\n function_name = String(cls.__name__).snakecase().lower()\n return function_name", "def getName(self):\n return _libsbml.FunctionDefinition_getName(self)", "def islambda(func):\n return getattr(func, 'func_name', False) == '<lambda>'", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def function_name(parameters):", "def get_function_name_at(self, address):\n pass", "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def _callable_name(f):\n try:\n return f.__name__\n except AttributeError:\n if isinstance(f, partial):\n return f.func.__name__\n return f.__class__.__name__", "def function(self) -> str:\n return pulumi.get(self, \"function\")", "def display_name(self) -> str:\n return f\"{self.func.__module__}.{self.func.__qualname__}\"", "def funcName():\r\n import sys\r\n return sys._getframe(1).f_code.co_name", "def method_name(self) -> str:\n if isinstance(self.view_func, str):\n return self.view_func\n return self.view_func.__name__", "def get_function_name(wrapped, instance, args, kwargs):\n return wrapped.__name__", "def get_callable_name(func):\n if isinstance(func, functools.partial):\n return get_callable_name(func.func)\n else:\n return func.__name__", "def function_name(func):\n return log(level=\"info\", message=_function_name(func))", "def name(self):\n name = self.function_name\n\n # Feature type is based on additional data that used\n # for example if insight is for Healthsites Facilities\n # than feature type is Healthsites Facilities\n\n if self.feature_type:\n name = '%s for %s' % (name, self.feature_type)\n return name", "def _set_name_scope(self):\n if self.name is None:\n self._name_scope = self.__class__.__name__\n elif self.name == '<lambda>':\n self._name_scope = 'lambda'\n else:\n # E.g. '_my_loss' => 'my_loss'\n self._name_scope = self.name.strip('_')", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def _state_name(self):\n return '{}_{}'.format(self.function_name, self.STATE_CONFIG_SUFFIX)", "def getName(self):\n return _libsbml.Event_getName(self)", "def event_name(self):\n return self._event_name", "def this_func_input_name():\n\treturn input_name_from_func_name(inspect.stack()[1][3])", "def get_function_raw_name_at(self, address):\n pass", "def get_function_name():\n\n # inspect.stack()[0][2] returns name of this function\n function_name = inspect.stack()[1][3]\n\n return function_name", "def lambda_func_doc(self, label):\n latex = (\n r'0 = \\lambda - '\n r'\\frac{\\dot{m}_\\mathrm{air}}{\\dot{m}_\\mathrm{air,min}}'\n )\n return generate_latex_eq(self, latex, label)", "def _name(self):\n return self.arguments[0].split('(')[0]", "def _name(self):\n return self._arguments[0].split('(')[0]", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def name(self) -> str:\n return f\"{self.class_object.__name__}.{self.method_str}\"", "def funcname(func):\n try:\n return '%s()' % func.__name__\n except AttributeError:\n return repr(func)", "def name(self):\n return signal_base_get_name(self.obj)", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def is_lambda(fun):\n return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__", "def name(self) -> str:\n return self._formal_name", "def GetCurrentFuncName():\n return sys._getframe(1).f_code.co_name", "def getElementName(self):\n return _libsbml.FunctionDefinition_getElementName(self)", "def node_name(self) -> str:\n op_name = f\"{self.name.name}_{self.name.overload_name}\".lower()\n return \"\".join(word.capitalize() or \"\" for word in op_name.split(\"_\"))", "def cal_name(self):\n return self.event_name", "def _generateLambda(self, string):\n derivation = self.fieldNames.sub(r'parent.getSampleValue(stats, \"\\1\")',\n string)\n return lambda stats, parent: eval(derivation)", "def trigger_name(self) -> \"str\":\n return self._attrs.get(\"triggerName\")", "def getName(self):\n return signal_base_get_name(self.obj)", "def get_lambdas(self):\n return self.graph.build_lambdas.output.lambda_pairs[-1][:, 0]", "def name(self):\n # type: () -> str\n return self._name", "def name(self):\n return self.__name__", "def destination_function(self):\n return self._event['destination_function_name']", "def get_fun_name(line):\n match = re.match(r'(function|macro)\\s*\\((\\w+)', line)\n if not match:\n return\n return match.groups()[1]", "def name(self):\n # type: () -> string_types\n return self._name", "def name(self):\n # type: () -> string_types\n return self._name", "def event_name(self):\n return dict.get(self, 'event_name', None)", "def fname(func: Callable) -> str:\n return \"{}.{}\".format(func.__module__, func.__name__)", "def get_function(self,attr):\n func_name = self._user_funcs[attr] \n if hasattr(func_name,'__call__'):\n func_name = func_name(self)\n return func_name", "def getElementName(self):\n return _libsbml.ListOfFunctionDefinitions_getElementName(self)", "def getElementName(self):\n return _libsbml.Trigger_getElementName(self)", "def as_function_name(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_CNAME)", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def get_function_name(ifunc, *, scoped=False, mangle=False):\n\n name = _translate_function_name(interrogate_function_name(ifunc), mangle)\n\n if scoped:\n parent = interrogate_function_class(ifunc)\n if parent:\n name = get_type_name(parent, scoped=True, mangle=mangle) + '.' + name\n\n return name", "def method_name(self):\n pass", "def get_name():\n return __name__", "def idp_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idp_lambda_arn\")", "def getElementName(self):\n return _libsbml.FunctionTerm_getElementName(self)", "def test_name_of_func_should_be_passed_name(self):\n scope = self.Test.scope('foo', where='foo')\n self.assertEqual(scope.__name__, 'foo')", "def fn(self):\n return self._fn", "def getName(self):\n return _libsbml.FluxObjective_getName(self)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def name(self) -> str:\n if hasattr(self, \"_name\"):\n return self._name\n _args: list[Arg] = []\n _ctx = self._select(\"name\", _args)\n return _ctx.execute_sync(str)", "def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))", "def name() -> str:\n pass", "def fname(func):\n return \"%s.%s\" % (func.__module__, func.__name__)", "def name ( self ) :\n return self.__name if self.__name else ''", "def get_class_functional_name(name):\n name = _strip_class_name(name)\n return name", "def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])", "def key(func):\n return func.__func__ if hasattr(func, \"__func__\") else func", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def name(self):\n return self.__name", "def get_name(listener_id):\n return \"{}{}\".format(f5_const.PREFIX_LISTENER, listener_id)", "def signature(function: model.Function) -> str:\n return str(function.signature)", "def get_filter_name(self):\n pass", "def function(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"function\")", "def getName(self):\r\n return self.__name__" ]
[ "0.77034545", "0.77034545", "0.7593706", "0.73899704", "0.73329914", "0.7193673", "0.7108169", "0.70655984", "0.6839772", "0.68352073", "0.67619866", "0.6591472", "0.65683824", "0.6567803", "0.6475904", "0.6402211", "0.6297765", "0.6272437", "0.62723136", "0.6268716", "0.62462837", "0.6244579", "0.62361825", "0.6224806", "0.6182332", "0.6182332", "0.6182332", "0.6173039", "0.6165112", "0.61562943", "0.61312586", "0.6110205", "0.60902673", "0.6070161", "0.60338116", "0.60258424", "0.5970842", "0.5958052", "0.5950451", "0.59492046", "0.59424484", "0.592031", "0.59045196", "0.5903703", "0.5897504", "0.58945054", "0.5889464", "0.58314306", "0.58274144", "0.58200705", "0.58184564", "0.581762", "0.57626164", "0.5759648", "0.57526284", "0.5732005", "0.5732005", "0.57317984", "0.57263786", "0.57263625", "0.57248193", "0.57200485", "0.5715866", "0.5708168", "0.5708168", "0.57057124", "0.5693515", "0.56733423", "0.56669897", "0.56647336", "0.566042", "0.56593794", "0.5656557", "0.5649768", "0.5649768", "0.5649768", "0.5649768", "0.5647923", "0.56448925", "0.564054", "0.56388444", "0.5632923", "0.563136", "0.5629419", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.56141824", "0.561274", "0.56053305", "0.56021523", "0.559393", "0.5587055" ]
0.7464853
4
JSON payload to the lambda function.
def input(self) -> pulumi.Output[str]: return pulumi.get(self, "input")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lambda_payload(self, date: str, symbol: str) -> bytes:\n self._lambda_event['s3_bucket'] = self._config['s3_bucket']\n self._lambda_event['s3_key_quotes'] = (\n self._config['s3_key_input_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_quotes_suffix'])\n self._lambda_event['s3_key_trades'] = (\n self._config['s3_key_input_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_trades_suffix'])\n self._lambda_event['s3_key_output'] = (\n self._config['s3_key_output_prefix'] + date + '/' + symbol + '/' +\n self._config['s3_key_output_suffix'])\n return json.dumps(self._lambda_event).encode()", "def lambda_handler(event, context):\n\n if not event[\"body\"]:\n return create_output(400, \"Invalid body: body is empty.\")\n\n try:\n item = json.loads(event[\"body\"])\n except json.JSONDecodeError:\n return create_output(400, \"Invalid body: can't decode body.\")\n\n for key in iris_keys:\n if key not in item:\n return create_output(400, f\"Invalid body: missing key {key} in body.\")\n try:\n float(item[key])\n except ValueError:\n return create_output(400, f\"Invalid body: can't parse {key} to float.\")\n\n item[\"id\"] = create_hash(item)\n\n try:\n table.put_item(Item=item)\n except Exception as e:\n logger.error(f\"Error, can't insert item: {e}\")\n return create_output(500, \"Internal error: can't insert item in table.\")\n\n return create_output(200, \"Item created.\")", "def lambda_handler(event, context):\n logger.debug(event)\n\n product_list = PRODUCT_LIST\n\n return {\n \"statusCode\": 200,\n \"headers\": HEADERS,\n \"body\": json.dumps({\"products\": product_list}),\n }", "def lambda_handler(event, context):\n # EOL char append function\n encode_data = lambda x: \"{data}{eol}\".format(data=json.dumps(x), eol=chr(10)).encode(\"UTF-8\")\n \n # Punk API call\n try:\n logger.debug(\"Requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n request = r.get(os.environ[\"API_URL\"])\n except Exception as e:\n logger.error(\"An error occured while requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n raise e\n \n # Send records to kinesis stream\n logger.debug(\"Sending data to stream: {stream}\".format(stream=os.environ[\"STREAM_NAME\"]))\n for data in request.json():\n client.put_record(\n StreamName=os.environ[\"STREAM_NAME\"],\n Data=encode_data(data),\n PartitionKey=\"key\"\n )\n\n return {\n 'statusCode': request.status_code,\n 'body': data\n }", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def lambda_handler(event, context):\n\n operations = {\n 'POST': main,\n }\n\n if event.get('httpMethod', False):\n operation = event['httpMethod']\n else:\n operation = \"not available\"\n\n payload = base64.b64decode(event['body'])\n try:\n payload = json.loads(payload)\n except TypeError:\n pass\n\n if operation in operations:\n return respond(None, operations[operation](payload))\n else:\n return respond(ValueError(f'Unsupported method {operation}'))", "def lambda_handler(event, context):\n\n # return {\n # \"statusCode\": 200,\n # \"body\": json.dumps(\n # {\n # \"message\": \"hello world\",\n # }\n # ),\n # }\n src_bytes = base64.b64decode(event[\"body\"])\n src = cv2.imdecode(np.frombuffer(src_bytes, dtype=np.uint8), cv2.IMREAD_COLOR)\n gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n _, gray_bytes = cv2.imencode(\".jpg\", gray)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(base64.b64encode(gray_bytes).decode(\"UTF-8\")),\n }", "def payload(self):", "def lambda_handler(event, context):\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n 'message': 'API deployed',\n })\n }", "def _send_json(self, payload: dict):\n data = json.dumps(payload)\n return self.send(data)", "def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)", "def lambda_handler(event, context):\n for item in json.loads(event[\"Records\"][0][\"body\"]):\n item[\"id\"] = uuid.uuid1().bytes\n for key, value in item.items():\n if key == \"id\":\n item[key] = {\"B\": bytes(value)}\n elif key == \"fiscal_year\":\n item[key] = {\"N\": str(value)}\n elif key == \"emissions_mtco2e\":\n item[key] = {\"N\": str(value)}\n elif key == \"consumption\":\n item[key] = {\"N\": str(value)}\n else:\n item[key] = {\"S\": str(value)}\n\n time.sleep(0.001)\n\n dynamo.put_item(TableName=\"Greenhouse_gas_emissions\", Item=dict(item))", "def set_payload(self, payload):\n self.payload = json.dumps(payload)", "def lambda_handler(event, context):\n\n # S3 resource invocation\n s3_resource = boto3.resource('s3')\n # S3 bucket selection\n data_bucket_name = \"put_here_data_bucket_name\"\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n request_body_dict = json.loads(event['body'])\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending both ticker and start date if given\n if request_body_dict['start_date'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_future_request(request_body=request_body_dict,\n s3_resource=s3_resource,\n s3_bucket=data_bucket_name, prefix='valid'))\n # or only ticker name if no start date has been provided\n elif request_body_dict['ticker_name'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_request(ticker_name=request_body_dict['ticker_name'],\n s3_resource=s3_resource, s3_bucket=data_bucket_name,\n prefix='train'))\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n # print data for debug purposes\n print(result)\n\n return {\n 'statusCode': 200,\n 'headers': {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},\n 'body': str(result)\n }", "def lambda_handler(event, context):\n\n event_body = json.loads(event['body'])\n print(\"EVENT:\")\n print(event_body)\n\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n recs = flow(event_body, textract, cache = True)\n rval = {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\" : \"hello world\",\n \"textract\" : recs\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n\n return rval", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n\n # Log the values received in the event argument\n logger.info(f'Request event: {event}')\n\n # Define default hard-coded return values\n response = {\n 'uid': 'Example function ID',\n 'return_val01': 'Return value #1',\n 'return_val02': 'Return Value #2',\n }\n\n # Retrieve type of invocation (GET, PUT, etc.)\n if 'http_verb' in event:\n operation = event['http_verb'].upper()\n if operation == 'PUT':\n # Return the values passed to the function\n response = {\n 'uid': event['functionID'],\n 'return_val01': event['parameters']['parm01'],\n 'return_val02': event['parameters']['parm02'],\n }\n\n logger.info(f'Response={response}')\n return response", "def test4():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000147203645/tic000147203645_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def lambda_handler(event, context):\n return {\n 'statusCode': 200,\n 'body': say_hello()\n }", "def lambda_handler(event, context):\n\n print(\"EVENT:\")\n print(event)\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n\n recs = flow(event, s3)\n print(recs)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def to_lambda_output(self):\n resp = {\n 'statusCode': self.status_code,\n 'body': self.body,\n 'headers': self.headers\n }\n\n return resp", "def invoke_lambda(lambda_name, lambda_payload):\n try:\n LOGGER.debug(f\"Sending request to '{lambda_name}' method: {lambda_payload}\")\n client = boto3.client('lambda')\n invoke_response = client.invoke(FunctionName=lambda_name,\n InvocationType=\"RequestResponse\",\n Payload=json.dumps(lambda_payload))\n response = json.loads(invoke_response['Payload'].read())\n except Exception as ex:\n LOGGER.debug(f\"Error encountered while invoking lambda method '{lambda_name}': {repr(ex)}\")\n\n return response", "def lambda_handler(event, context):\n # define initial status code and headers\n statusCode = 400\n try:\n # get the body params\n if type(event) == dict:\n event_body = event.get('body', event)\n else:\n event_body = json.loads(event).get('body', {})\n # generate and store the reservation response result from reservation handler function\n reservation_handler = ReservationHandler(EventBodyData=event_body)\n result = reservation_handler.sabre_reservation_handler()\n # define status code, headers and response\n if type(result) == dict:\n statusCode = result.get(\"statusCode\", statusCode)\n response = result.get(\"body\", \"\")\n else:\n response = result\n except Exception as E:\n response = str(E)\n\n # return the response\n return {\n 'statusCode': statusCode,\n 'body': response\n }", "def test3():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000129646247_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def lambda_handler(event, context):\n blogs = [{\n 'title': 'BETTER UTILITY THAN A TRUCK WITH MORE PERFORMANCE THAN A SPORTS CAR',\n 'description': 'Cybertruck is built with an exterior shell made for ultimate durability and passenger protection. Starting with a nearly impenetrable exoskeleton, every component is designed for superior strength and endurance, from Ultra-Hard 30X Cold-Rolled stainless-steel structural skin to Tesla armor glass.'\n }, {\n 'title': 'ULTRA-HARD 30X COLD-ROLLED STAINLESS STEEL',\n 'description': 'If there was something better, we’d use it. Help eliminate dents, damage and long-term corrosion with a smooth monochrome exoskeleton that puts the shell on the outside of the car and provides you and your passengers maximum protection.'\n }, {\n 'title': 'TESLA ARMOR GLASS',\n 'description': 'Ultra-strong glass and polymer-layered composite can absorb and redirect impact force for improved performance and damage tolerance.'\n }\n ]\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(blogs),\n }", "def lambda_handler(event, context):\n # Boto is always available in AWS lambda, but may not be available in standalone mode\n# import boto3\n# from base64 import b64decode\n\n # To generate the encrypted values, go to AWS IAM Keys and Generate a key\n # Then grant decryption using the key to the IAM Role used for your lambda function.\n #\n # Use the command `aws kms encrypt --key-id alias/<key-alias> --plaintext <value-to-encrypt>\n # Put the encrypted value in the configuration dictionary below\n# encrypted_config = {\n# 'pagerduty_api_key': '<ENCRYPTED VALUE>',\n# 'schedule_ids': '<ENCRYPTED VALUE>'\n# }\n\n# kms = boto3.client('kms')\n# config = {x: kms.decrypt(CiphertextBlob=b64decode(y))['Plaintext'] for x, y in encrypted_config.iteritems()}\n on_call = OnCall(API_KEY, SCHEDULE_IDS)\n output = on_call.run()\n\n return { \"response_type\": \"in_channel\", \"text\": '\\n'.join(output) }", "def lambda_handler(event, context):\n\n try:\n created_item = create_new_table_item(event)\n return {\"statusCode\": 201, \"body\": json.dumps(f\"{created_item}\")}\n\n except BlankRequestBody as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(MISSING_PARAMETERS_MESSAGE)}\n\n except ValidationError as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 400, \"body\": json.dumps(INCORRECT_PARAMETERS_MESSAGE)}\n\n except Exception as exception:\n logging.error(f\"{exception}\")\n return {\"statusCode\": 500, \"body\": json.dumps(\"Internal server error\")}", "def lambda_handler(event, context):\n try:\n # Extract the Job ID\n job_id = event['CodePipeline.job']['id']\n\n # Extract the Job Data\n job_data = event['CodePipeline.job']['data']\n\n # Extract the params\n params = get_user_params(job_data)\n\n # Get the lists of artifacts coming in and out of this function\n input_artifacts = job_data['inputArtifacts']\n output_artifacts = job_data['outputArtifacts']\n\n # Perform a build on the source (from source_artifact)\n # and write results to the build_artifact\n s3 = setup_s3_client(job_data)\n source_artifact = find_artifact(input_artifacts, params['source_artifact'])\n src_dir = get_zipped_artifact(s3, source_artifact)\n dest_dir = tempfile.mkdtemp()\n perform_build(os.path.join(src_dir, 'src'), dest_dir)\n build_artifact = find_artifact(output_artifacts, params['build_artifact'])\n put_zipped_artifact(s3, dest_dir, build_artifact)\n\n # Pick the template out of the source code and write it to the\n # template_artifact\n template_artifact = find_artifact(output_artifacts, params['template_artifact'])\n put_zipped_artifact(s3, os.path.join(src_dir, params['template_subdir_path']), template_artifact)\n\n shutil.rmtree(src_dir)\n shutil.rmtree(dest_dir)\n put_job_success(job_id, \"Built code: \" + \", template:\")\n\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail the job and log the exception message.\n print('Function failed due to exception.')\n print(e)\n traceback.print_exc()\n put_job_failure(job_id, 'Function exception: ' + str(e))\n\n print('Function complete.')\n return \"Complete.\"", "def lambda_handler(event, context):\n return dispatch(event)", "def handle_json(self, source, data):\n method, args = json.loads(data)\n try:\n result = self.call(source, method, *args)\n except Exception as exc:\n result = str(exc)\n\n return json.dumps(result)", "def get_payload(self):\n if self.payload == '':\n return {}\n\n return json.loads(self.payload)", "def lambda_inject_context(payload, scope):\n try:\n invoke_payload = payload.get('Payload', {})\n\n if not isinstance(invoke_payload, dict):\n invoke_payload = json.loads(invoke_payload)\n\n tracer.inject(scope.span.context, ot.Format.HTTP_HEADERS, invoke_payload)\n payload['Payload'] = json.dumps(invoke_payload)\n except Exception:\n logger.debug(\"non-fatal lambda_inject_context: \", exc_info=True)", "def lambda_handler(event, context):\n feature = session.query(m.Feature).first()\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": feature.title,\n }),\n }", "def callback(ch, method, properties, body):\n record = json.loads(body.decode()) # decode binary string to dict\n pprint(record)", "def payload(self, payload: \"dict\"):\n self._attrs[\"payload\"] = payload", "def lambda_handler(event, context):\n\n # Check that we were passed the required arguments\n validate_event(event)\n\n try:\n numpy_method_name = event.get('method')\n numpy_argument_array = event.get('arguments')\n\n logger.info(\"Handing call to the NumPy {} method with arguments: {}\".format(numpy_method_name, numpy_argument_array))\n result = getattr(numpy, numpy_method_name)(*numpy_argument_array)\n logger.info(\"Result from NumPy is {}\".format(result))\n return {'result': result}\n except:\n error_message = \"Unexpected error: {}\".format(str(sys.exc_info()))\n logger.error(error_message)\n raise Exception(error_message)", "def get_json_payload(self):\n self._payload_to_obj()\n return self._obj_payload", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n try:\n response = s3.get_object(Bucket=BUCKET, Key=KEY)\n print('CONTENT TYPE:', response['ContentType'])\n print('response:')\n pprint.pprint(response)\n print('event')\n pprint.pprint(event)\n print('payload')\n pprint.pprint(event.get('payload'))\n # return json.loads(json.dumps(response, default=str))\n # defined by https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return {\n 'statusCode': 200,\n 'isBase64Encoded': False,\n 'body': json.dumps(response, default=str)\n }\n # return response['ContentType']\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(KEY, BUCKET))\n raise e", "def pewma():\n try:\n content = request.get_json()\n try:\n data = content[\"data\"]\n except:\n data = content\n result = pewma_model.lambda_handler(data)\n return jsonify(result)\n except Exception as e:\n return jsonify({\"error\": str(e)})", "def on_push(self, payload):\n pass", "def lambda_handler(event, context):\n qpmBucket = event['queryStringParameters']['bucket']\n fn_bucket = list_s3_objects(qpmBucket)\n print(fn_bucket)\n \n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps(str(fn_bucket)),\n }", "def lambda_handler(event, context):\n\n # resolve backend api key from the secrets manager\n sm_client = boto3.client('secretsmanager')\n sm_resp = sm_client.get_secret_value(os.getenv('BACKEND_SERVICE_API_KEY_SECRET_ARN'))\n backend_api_key = json.dumps(sm_resp.get('SecretString')).get('key')\n\n # TODO implement further business logic\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }", "def invokeLambdaFunction(lambdaArn:str, parameters:Dict=None) -> Dict:\n payloadDict = {\n 'http_verb': 'POST',\n 'functionID': lambdaArn,\n }\n if parameters:\n payloadDict['parameters'] = parameters\n return json.loads(_invokeLambdaFunction(lambdaArn, payloadDict)['Payload'].read().decode('utf-8'))", "def lambda_handler(event, context):\n \n filename = None\n fobj = None\n\n try:\n \n filename = 'dlq' + '-' + datetime.datetime.now().strftime(\"%s\")\n fobj = open('/tmp/'+filename, 'w')\n logger.debug('S3 client set up.')\n\n for record in event['Records']:\n fobj.write(json.dumps(record['body']))\n fobj.write(\"\\n\")\n \n except Exception as ex:\n logger.error('Exception in executing ingestion to S3: {}'.format(ex))\n send_sns_alert(str(ex))\n raise\n\n else:\n \n #Saves file to S3\n fobj.close()\n load_data_s3(filename)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Success!')\n }\n\n finally:\n\n # S3 - close temp object\n fobj.close()", "def hello_world(\n event: Dict[str, Any],\n context,\n):\n body_str = event.get(\"body\", \"{}\")\n body_str = body_str if body_str else \"{}\"\n body_obj = json.loads(body_str)\n wiki_search_term = body_obj.get(\"searchTerm\", \"\")\n if not body_obj or not wiki_search_term:\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/handle-errors-in-lambda-integration.html\n response = {\n \"statusCode\": 400,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps({\"message\": \"Wikipedia search term was not provided\"}),\n }\n else:\n summary = wikipedia.summary(wiki_search_term)\n response = {\n \"statusCode\": 200,\n \"headers\": {\"Content-Type\": \"application/json\"},\n \"body\": json.dumps(summary),\n }\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return response", "def invoke_process(fuction_name, account_id, region):\n invoke_payload = (\n json.JSONEncoder().encode(\n {\n \"account\": account_id,\n \"region\": region\n }\n )\n )\n lambda_client.invoke(\n FunctionName=fuction_name,\n InvocationType='Event',\n Payload=invoke_payload,\n )", "def test_generic(key,bucket):\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": bucket\n },\n \"object\": {\n \"key\": key\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n print(out)\n assert out[\"statusCode\"] == 200", "def get_json(payload):\n try:\n the_json = json.dumps(payload, indent=4, sort_keys=True)\n return the_json\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def lambda_handler(event: APIGatewayProxyEvent, context: LambdaContext) -> Dict[str, Any]:\n\n return app.resolve(event, context)", "def generate_payload(req):\n logging.info(f'Do something with {req}')\n return json.dumps({\n \"msg1\": \"Hello world 1!\",\n \"msg2\": \"Hello world 2!\"\n })", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n curr_time = str(int(datetime.now().strftime(\"%s\")) * 1000)\n client.put_item(TableName='demo-table', \n Item={\n 'doi': {'S': curr_time}, \n 'crossref_flag': {'BOOL': False},\n 'europepmc_flag': {'BOOL': False},\n 's3_flag': {'BOOL': False},\n 'indexing_flag': {'BOOL': False}\n })\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def lambda_handler(event, context=None):\n response = {}\n try:\n response = middleware.IdentityAuthMiddleWare.process_request(event, response)\n except Exception as e:\n response[\"message\"] = e.message\n response[\"errors\"] = e.errors\n # removing request_dump data\n if \"request_dump\" in response[\"errors\"]:\n del response[\"errors\"][\"request_dump\"]\n for _k, _v in response[\"errors\"].items():\n response[\"errors\"][_k] = str(_v)\n return response", "def set_json_payload(self, payload):\n self._str_payload = None\n self._obj_payload = payload", "def lambda_handler(event, context):\r\n body = json.loads(event[\"body\"].replace(\"'\", '\"'))\r\n # This allows the function to run locally by sending requests to a local DynamoDB. Option one is for when it's\r\n # being run by SAM, option two for when the tests are being run, and three for production\r\n if os.environ.get('AWS_SAM_LOCAL'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://dynamo:8000')\r\n table = dynamodb.Table(\"pollsStorageDB\")\r\n elif 'local' == os.environ.get('APP_STAGE'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')\r\n table = dynamodb.Table(\"pollsStorageDB\")\r\n else:\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table(os.environ[\"DDB_TABLE_NAME\"])\r\n # Create dict to contain the number of votes for each possible response\r\n responses = {}\r\n for answer in body[\"answersList\"]:\r\n responses[answer] = 0\r\n \r\n # Sort out the expiry date\r\n if body.get(\"expiresIn\"):\r\n try:\r\n expiresIn = (datetime.now() + timedelta(days=int(body[\"expiresIn\"]))).timestamp()\r\n except BaseException as e:\r\n print(e)\r\n else:\r\n expiresIn = (datetime.now() + timedelta(days=30)).timestamp()\r\n\r\n # Create unique ID for the poll\r\n randomString = ''.join([random.choice(string.ascii_letters \r\n + string.digits) for n in range(32)]) \r\n poll = {\r\n \"id\": randomString,\r\n 'question': body[\"question\"],\r\n 'answersList': body[\"answersList\"],\r\n 'responses': responses,\r\n 'created': datetime.now().timestamp(),\r\n \"expires\": Decimal(expiresIn)\r\n }\r\n response = table.put_item(\r\n Item=poll\r\n )\r\n \r\n return {\r\n 'headers': {\r\n 'Access-Control-Allow-Headers': 'Content-Type',\r\n 'Access-Control-Allow-Origin': '*',\r\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'\r\n },\r\n \"statusCode\": 200,\r\n \"body\": json.dumps({\r\n \"success\": True,\r\n \"polls\": [poll]\r\n }),\r\n }", "def lambda_handler(event, context):\n print(event)\n print(context)\n storage_gateway_status()", "def build_payload():\r\n payload = json.dumps({\"method\": \"ListActivePairedVolumes\",\r\n \"params\": {}, \"id\": 1})\r\n return payload", "def lambda_handler(event, context): # pylint: disable=too-many-locals,too-many-branches,too-many-statements\r\n try: # pylint: disable=too-many-nested-blocks\r\n print(\"Execution started!\")\r\n #print(\"Event: \",event)\r\n # Bucket name and Full path for file - where file will be uploded\r\n source_bucket_name = event[\"detail\"][\"requestParameters\"][\"bucketName\"]\r\n source_key = urllib.parse.unquote_plus(\r\n event[\"detail\"][\"requestParameters\"][\"key\"], encoding='utf-8')\r\n \r\n print(\"file_path: \",source_key)\r\n #Loading master config\r\n print(\"Loading master_config\")\r\n audit_config = {}\r\n config_path = \"./config/\" + \\\r\n os.environ['CCM_ENV'] + \"/master_config.json\"\r\n config_content = open(config_path).read()\r\n config_json = json.loads(config_content)\r\n audit_config = config_json[\"audit_config\"]\r\n snow_params = config_json[\"ERROR_NOTIFICATION_SNOW_PARAMS\"]\r\n athena_query_param = config_json[\"ATHENA_QUERY_PARAMS\"]\r\n athena_table_params = config_json[\"ATHENA_TABLE_PARAMS\"]\r\n\r\n # Audit Parameters Based on the Invoking lambda and its operation involved\r\n audit_config[\"component_type_code\"] = \"ETL\"\r\n audit_config[\"component_name\"] = \"PCP Appflow\"\r\n audit_config[\"source_name\"] = \"Patient Connections Platform\"\r\n audit_config[\"target_name\"] = \"Consumer Consent Management\"\r\n audit_config[\"full_file_path\"] = \"s3://\" + \\\r\n source_bucket_name + \"/\" + source_key\r\n audit_config[\"file_version_id\"] = \"\"\r\n\r\n # Creates Job Entry in ABC Framework\r\n print(\"audit config::\", audit_config)\r\n process_execution_id = audit_helper.\\\r\n invoke_edb_abc_log_process_status_event_job_entry(audit_config)\r\n audit_config[\"process_execution_id\"] = process_execution_id\r\n print(\"process_execution_id ::\", process_execution_id)\r\n #print(\"source_key: \",source_key)\r\n s3_write = boto3.client('s3')\r\n record_dict = {}\r\n file_name = \"\"\r\n final_json = \"\"\r\n # prefix = \"\"\r\n # file_list = []\r\n # client = boto3.client(\"s3\")\r\n # result = client.list_objects(Bucket=source_bucket_name, Prefix=source_key, Delimiter='/')\r\n # #print(result)\r\n # for obj in result.get('CommonPrefixes'):\r\n # prefix = obj.get('Prefix')\r\n # #print(prefix)\r\n # file_list = list_files(client,source_bucket_name,prefix)\r\n # for file in file_list:\r\n # #print(file)\r\n json_read = read_s3_file(source_bucket_name, source_key)\r\n data = json.loads(json_read)\r\n #print(data)\r\n if data != '':\r\n record_dict = {k.lower(): v for k, v in data.items()}\r\n print(\"Record_Dict::\",record_dict)\r\n event_type_param = {}\r\n event_type_list = athena_table_params.keys()\r\n print(\"event_type_list\",event_type_list)\r\n for key in event_type_list:\r\n print(\"key\",key)\r\n if key in source_key:\r\n print(\"key\",key)\r\n event_type_param = athena_table_params[key]\r\n print(event_type_param)\r\n if \"changeeventheader\" in record_dict:\r\n if record_dict[\"changeeventheader\"][\"changeType\"] == \"CREATE\":\r\n #and record_dict[\"dtpc_affiliate__c\"] == 'US':\r\n recordid_create = record_dict[\"changeeventheader\"][\"recordIds\"][0]\r\n print(recordid_create)\r\n if recordid_create != '':\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace(\":\",\".\")\r\n create_json = json.dumps(record_dict)\r\n final_json = create_json\r\n file_name = recordid_create + \"-create-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, Key=final_source_key)\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n elif record_dict[\"changeeventheader\"][\"changeType\"] == \"UPDATE\":\r\n record_ids_list = record_dict[\"changeeventheader\"][\"recordIds\"]\r\n if len(record_ids_list) != 0:\r\n for ele in record_ids_list:\r\n print(ele)\r\n element = \"'\" + ele + \"'\"\r\n payload_condition = event_type_param[\"recordid_condition\"]\r\n query = 'SELECT * FROM '+event_type_param[\"athena_create_table\"]+\\\r\n ' WHERE lastmodifieddate IN(SELECT max(lastmodifieddate) from '\\\r\n +event_type_param[\"athena_create_table\"]+\\\r\n ', UNNEST(\"'+payload_condition[0]+'\".\"'+payload_condition[1]+\\\r\n '\") AS ln(jsondata) WHERE jsondata IN ('+element+'));'\r\n print(query)\r\n athena_query_param['athena_query'] = query\r\n query_result_record_id = athena_helper.perform_athena_search\\\r\n (athena_query_param)\r\n print(\"Athena Query Result for Create Path:::\", query_result_record_id)\r\n update_json = create_complete_payload(data,query_result_record_id)\r\n print(\"update_json: \",update_json)\r\n if len(update_json) != 0:\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace\\\r\n (\":\",\".\")\r\n final_json = json.dumps(update_json)\r\n file_name = ele + \"-update-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, \\\r\n Key=final_source_key)\r\n else:\r\n print(ele,\" does not have a create payload\")\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n else:\r\n raise Exception(\"ChangeEventHeader is missing: \", record_dict)\r\n else:\r\n raise Exception(\"Invalid Payload: \", record_dict)\r\n\r\n except (Exception) as err: # pylint: disable=line-too-long,broad-except\r\n print(\"Error occured: {0}\".format(str(err)))\r\n audit_type = \"error\"\r\n error_msg = sys.exc_info()\r\n exc_type = error_msg\r\n exc_obj = error_msg\r\n snow_params[\"flag\"] = \"FAIL\"\r\n snow_params[\"error_message\"] = str(exc_obj)\r\n snow_params[\"error_type\"] = str(exc_type)\r\n audit_config[\"exception_message\"] = str(exc_obj)\r\n if audit_config != {}:\r\n logging.exception(sys.exc_info())\r\n audit_helper.invoke_edb_abc_log_process_status_event(\r\n audit_type, audit_config) # pylint: disable=line-too-long\r\n audit_helper.raise_snow_incident(snow_params)", "def body(self):\n return json.dumps(self.data, cls=ServerlessJsonEncoder)", "def lambda_handler(*_):\n\n # Boto is always available in AWS lambda, but may not be available in\n # standalone mode\n import boto3\n\n # To generate the encrypted values, go to AWS IAM Keys and Generate a key\n # Then grant decryption using the key to the IAM Role used for your lambda\n # function.\n #\n # Use the command `aws kms encrypt --key-id alias/<key-alias> --plaintext <value-to-encrypt>\n # Put the encrypted value in the configuration dictionary below\n encrypted_config = {\n 'slack_token': '<ENCRYPTED VALUE>',\n 'pager_duty_token': '<ENCRYPTED VALUE>'\n 'pager_duty_domain_prefix': '<ENCRYPTED VALUE>'\n }\n\n kms = boto3.client('kms')\n config = {x: kms.decrypt(CiphertextBlob=b64decode(y))['Plaintext'] for x, y in encrypted_config.iteritems()}\n return SlackOnCall(**config).run()", "def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }", "def lambda_handler(event, context):\n logging.info(\"Received event: \" + json.dumps(event, indent=2))\n request_type = event['RequestType']\n if request_type == 'Create':\n attach_policy(event, context)\n elif request_type == 'Delete':\n detach_policy(event, context)\n elif request_type == 'Update':\n update_policy(event, context)", "def __init__(self, payload):\n self.payload = payload", "def payload(self) -> dict:\n return {\n 'event_name': '',\n\n }", "def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass", "def lambda_handler(event, context):\n for record in event[\"Records\"]:\n arn = record[\"Sns\"][\"TopicArn\"].split(\":\")\n message = json.loads(record[\"Sns\"][\"Message\"])\n message_handler(arn[3], message)\n return ''", "def get_payload(self):\n return {'message': 'bar'}", "def payload(self, payload):\n\n self._payload = payload", "def process(self, payload, status_code=0):", "def event(event, context):\n# Sample event:\n #\n # _event = { \"Records\":[\n # {\n # \"eventVersion\":\"2.1\",\n # \"eventSource\":\"aws:s3\",\n # \"awsRegion\":\"us-east-1\",\n # \"eventTime\":\"2021-10-14T07:40:55.113Z\",\n # \"eventName\":\"ObjectCreated:Put\",\n # \"userIdentity\":{\n # \"principalId\":\"AWS:AROA6L2YJX2JCJYHEJ4UI:serverless-image-processing-test-create\"\n # },\n # \"requestParameters\":{\n # \"sourceIPAddress\":\"94.140.8.209\"\n # },\n # \"responseElements\":{\n # \"x-amz-request-id\":\"7CJHSGZ9MZF9995F\",\n # \"x-amz-id-2\":\"X5OtpRb+P9CuYKDHvjT8z9prnqqsH1yatZchN2uw8/158mcRUVhQNSW/z5ffXLqkLhu+4Kc163vZiRgVk3XaGd8H1NhZCu8N\"\n # },\n # \"s3\":{\n # \"s3SchemaVersion\":\"1.0\",\n # \"configurationId\":\"9b8f4135-35d4-4e07-b8a5-7d68cc95870b\",\n # \"bucket\":{\n # \"name\":\"serverless-image-processing-test-serverless-image-processing\",\n # \"ownerIdentity\":{\n # \"principalId\":\"A5IHQSLNTJKZN\"\n # },\n # \"arn\":\"arn:aws:s3:::serverless-image-processing-test-serverless-image-processing\"\n # },\n # \"object\":{\n # \"key\":\"test/6e7ef3f0-dcb6-4db6-9518-3bc6ec0ba492\",\n # \"size\":116716,\n # \"eTag\":\"f04e70e100f653a0e67f32f6098dea1c\",\n # \"sequencer\":\"006167DF06C888A626\"\n # }\n # }\n # }\n # ]\n # }\n\n logger.debug('event: {}'.format(event))\n for record in event['Records']:\n processRecord(record)\n\n return {'statusCode': httplib.ACCEPTED}", "def payload(self) -> dict:\n return {\n # TBD\n }", "def _get_payload(self, method, **params):\n try:\n payload = params['data']['payload']\n if self.prettyprint:\n payload = \"\\n\" + json.dumps(json.loads(payload),\n indent=self.indent)\n except KeyError:\n payload = \"N/A\" if method == \"Event Channel Message\" else \"None\"\n return payload", "def inner(fn_inner):\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler", "def build_payload(self, **kwargs):\n\n return None", "def lambda_handler(event, context):\n # Environmental Variables\n CATALOG_BRANCHES_TABLE = anejocommon.set_env_var('CATALOG_BRANCHES_TABLE')\n PRODUCT_INFO_TABLE = anejocommon.set_env_var('PRODUCT_INFO_TABLE')\n S3_BUCKET = anejocommon.set_env_var('S3_BUCKET')\n\n # Loop through event records\n try:\n event_records = event['Records']\n except KeyError:\n event_records = [{'body': event}]\n\n for record in event_records:\n try:\n catalog_sync_info = json.loads(record['body'])\n except TypeError:\n catalog_sync_info = record['body']\n\n # Event Variables\n catalog_url = catalog_sync_info['catalog_url']\n\n apple_bucket_catalog_path = anejocommon.get_path_from_url(\n catalog_url,\n 'html',\n append_to_path='.apple'\n )\n \n catalog = anejocommon.retrieve_url(catalog_url)\n try:\n catalog_plist = plistlib.readPlistFromBytes(catalog.data)\n except plistlib.InvalidFileException:\n print(\"ERROR: Cannot read catalog plist\")\n return\n\n # Write our local (filtered) catalogs\n anejocommon.write_local_catalogs(\n apple_bucket_catalog_path,\n catalog_plist,\n S3_BUCKET,\n CATALOG_BRANCHES_TABLE,\n PRODUCT_INFO_TABLE\n )", "def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)", "def lambda_handler(event, context):\n logging.info(event)\n current_time = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n is_conversation_result = 'Details' in event\n if is_conversation_result:\n combine_bot_state_to_s3(event, current_time)\n else:\n save_bot_state_to_s3(event, current_time)\n\n # Generate response back to bot\n response = dict()\n if not is_conversation_result:\n response = {\n 'dialogAction': {\n 'type': 'Delegate',\n 'slots': event['currentIntent']['slots']\n }\n }\n logging.info(response)\n return response", "def aws_lambda(f):\n def wrapper(event, context):\n logger = logging.getLogger(__name__)\n try:\n # if no return value is given by wrapped func,\n # return default status code 200 response.\n r = f(event, context)\n if r is None:\n r = {\n 'statusCode': 200,\n 'body': json.dumps({'input': event})\n }\n return r\n except Exception as e:\n # if exception is thrown, log exception,\n # return exception text,\n # and return status code associated with passed\n # exception type\n logger.info(\n 'Call to {} resulted in exception'.format(f.__name__), e)\n exc_type = type(e)\n # get exception type for code lookup and msg\n if exc_type is type:\n exc_type = e\n msg = e.__name__\n else:\n msg = str(e)\n # get default exception code for raised Exception.\n # default to code 500 if exception is not in codes dict.\n code = codes.get(exc_type, DEFAULT_EXCEPTION_CODE)\n return {\n 'statusCode': code,\n 'body': json.dumps({'input': event, 'message': msg})\n }\n\n wrapper.__name__ = f.__name__ + '_wrapper'\n return wrapper", "def lambda_handler(event, context):\n print 'Received event: ' + json.dumps(event, indent=2)\n print \"Context log stream: \"+ context.log_stream_name\n\n try:\n filename = get_latest_agent_filename()\n download_agent_if_missing(filename)\n prepare_agent_input_data(event, context)\n run_agent(filename)\n\n except URLError as ex:\n print 'Error: ', ex", "def lambda_handler(event, context):\n\n client = boto3.client('events')\n event_to_put = {\n \"source\": \"aws-lambda-function\"\n }\n event_to_put.update(**event)\n try:\n response = client.put_events(\n Entries=[\n {\n 'Source': 'learn.eventbridge',\n 'Detail': json.dumps(event_to_put),\n 'DetailType': 'Learning Eventbridge',\n 'EventBusName': 'default'\n },\n ]\n )\n return {\n 'statusCode': 200,\n 'body': json.dumps('Event has been put on event bus successfully.')\n }\n except Exception as ex:\n return {\n 'statusCode': 500,\n 'body': json.dumps(str(ex))\n }", "def lambda_handler(event, context):\n input = event[\"queryStringParameters\"][\"input\"]\n completed_interpretation = subprocess.run(\n [\"./esco\", \"--quiet\", \"--type\", \"ws\", \"baudelaire.ws\"],\n text=True,\n encoding=\"ascii\",\n input=input,\n stdout=subprocess.PIPE)\n\n # Discard the first two lines of the output (they contain the message\n # \"Enter a word and press Enter:\" and then an empty line).\n trimmed_output = completed_interpretation.stdout.split(\"\\n\", 2)[2]\n\n return {\n \"statusCode\": 200,\n \"headers\": {\"Access-Control-Allow-Origin\": \"*\"},\n \"body\": trimmed_output,\n }", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n print(event)\n method=event['httpMethod']\n print(f\"method={method}\")\n print(f\"table_name={table_name}\")\n myTriggerType='instrument_price'\n\n \n if method == \"DELETE\":\n #path=event['path']\n trigger_id=event['pathParameters']['trigger_id']\n print(f\"triggerId={trigger_id}\")\n\n try:\n #see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Table.delete_item\n response = table.delete_item(\n Key={'PK':f\"TR#{myTriggerType}#{trigger_id}\", \"SK\":f\"TR#{myTriggerType}#{trigger_id}\"},\n ConditionExpression=And(Attr('PK').eq(Attr('SK')),Attr('triggerType').eq(myTriggerType)),\n )\n except ClientError as e:\n print(f\"clientError={e}\")\n if e.response['Error']['Code']=='ConditionalCheckFailedException':\n return iftttError(404,\"item not found\")\n raise\n print(f\"response={response}\")\n return {\n \"statusCode\": 200,\n \"body\":\"\",\n }\n \n elif method == \"POST\":\n body=json.loads(event['body'])\n trigger_id=body['trigger_identity']\n print(f\"triggerId={trigger_id}\")\n\n response = table.get_item(\n Key={'PK':f\"TR#{myTriggerType}#{trigger_id}\", \"SK\":f\"TR#{myTriggerType}#{trigger_id}\"},\n ProjectionExpression=\"triggerEvents, triggerType\",\n )\n print(f\"response={response}\")\n\n if \"Item\" not in response:\n #brand new \n print(f\"inserting {trigger_id}\")\n if 'triggerFields' not in body:\n return iftttError(400, \"triggerFields missing from request\")\n triggerFields=body['triggerFields']\n #todo validate trigger fields\n try:\n response = table.put_item(\n Item={\n 'PK':f\"TR#{myTriggerType}#{trigger_id}\", \n \"SK\":f\"TR#{myTriggerType}#{trigger_id}\",\n 'triggerId': trigger_id,\n #hacky string way to avoid having multiple columns\n 'triggerFields': json.dumps(triggerFields),\n 'triggerType': myTriggerType,\n },\n ConditionExpression=Or(Attr('triggerType').eq(myTriggerType),Attr('triggerType').not_exists())\n )\n except ClientError as e:\n print(f\"clientError={e}\")\n #somehow got created with someone elses triggerType\n if e.response['Error']['Code']=='ConditionalCheckFailedException':\n return iftttError(404,\"item not found\")\n raise\n print(\"response \",response)\n triggered=[]\n elif response['Item'].get(\"triggerType\",myTriggerType) != myTriggerType:\n #it exists but it is someone elses\n return iftttError(404,\"item not found\")\n else:\n item=response['Item']\n print(f\"found {item} \")\n #hacky string way to avoid having multiple columns\n #TODO: change this to use a Map? (will allow to add without overwrite)\n events = json.loads(item.get(\"triggerEvents\",\"[]\"))\n triggered= []\n for event in events:\n #TODO: implement limit (not needed now becasue I expect only up to one events)\n triggered.append(event['data'])\n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"data\": triggered,\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n else :\n return iftttError(400, f\"unexpected httpMethod {method}\")", "def payload(self) -> \"dict\":\n return self._attrs.get(\"payload\")", "def _dispatch_json(\n self,\n action: str, # get, post, put, delete\n url: str,\n payload: Optional[dict] = None,\n file_list: Optional[list] = None,\n ) -> dict:\n result = self._dispatch(action, url, payload, file_list)\n return json.loads(result)", "def handler(context, event):\n\n if _ensure_str(event.trigger.kind) != 'http' or _invoked_by_cron(event):\n body = event.body.decode('utf-8')\n context.logger.info('Received event body: {0}'.format(body))\n\n # serialized record\n serialized_record = json.dumps({\n 'body': body,\n 'headers': {\n _ensure_str(header): _ensure_str(value)\n for header, value in event.headers.items()\n },\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n })\n\n # store in log file\n with open(events_log_file_path, 'a') as events_log_file:\n events_log_file.write(serialized_record + ', ')\n\n else:\n\n # read the log file\n try:\n with open(events_log_file_path, 'r') as events_log_file:\n events_log_file_contents = events_log_file.read()\n except IOError:\n events_log_file_contents = ''\n\n # make this valid JSON by removing last two chars (, ) and enclosing in [ ]\n encoded_event_log = '[' + events_log_file_contents[:-2] + ']'\n\n context.logger.info('Returning events: {0}'.format(encoded_event_log))\n\n # return json.loads(encoded_event_log)\n return encoded_event_log", "def execute(self, **payload):\n pass", "def to_json(self) -> Dict[str, Any]:\n raise Exception(\"Attempted to convert an anonymous Action to JSON\")", "def lambda_handler(event, context):\n\n markdown_files = sys.argv[1:]\n logging.info(f'Markdown files to clean = {markdown_files}')\n for file_to_clean in markdown_files:\n clean_file(file_to_clean)\n print(json.dumps(event))\n body = event['body']\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world from aws.\",\n \"commit\": body\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def on_create(self, payload):\n pass", "def lambda_handler(event, context):\n #print(\"event.session.application.applicationId=\" + event['session']['application']['applicationId'])\n\n #if event['session']['new']:\n # on_session_started({'requestId': event['request']['requestId']},event['session'])\n \n intent = None\n try:\n intent = Intent(**event)\n return handle_intent(intent)\n except Exception as ex:\n err = traceback.format_exc()\n print(err)\n return error_handler_func(intent,msg=str(err))", "def _wrap_handler(self, handler, body):\n try:\n decoded_body = json.loads(body)\n result = yield handler(decoded_body)\n return result\n except Exception as e:\n return {\"error\": str(e)}", "def append_json(self, obj: Any, headers: Optional[MultiMapping[str]] = ...) -> Payload:\n ...", "def lambda_handler(event=None, context=None):\n logger.info('Lambda function invoked index()')\n\n # Get configuration from environment\n file_name_default = os.environ.get('FILE_NAME') or \"bamboo_employees\"\n api_key = os.environ.get('BAMBOO_TOKEN')\n url = os.environ.get('BAMBOO_API')\n\n # Parameters, take a file name if exists and remove it from the dict\n file_name = request.args.get('file_name') or file_name_default\n\n try:\n # Request data from Bamboo API\n headers = {'Accept': 'application/json'}\n auth = HTTPBasicAuth(api_key, 'x')\n response = requests.get(url=url, headers=headers, auth=auth)\n\n if response.status_code != requests.codes.ok:\n raise Exception('Error making the request to Bamboo\\r\\n')\n\n # Check the result\n result = json.loads(response.text)\n if 'employees' in result:\n # Generate the GeoJSON from API response\n employees = []\n for employee in result['employees']:\n # Bamboo does not provide explicit locations\n point = Point(None, None)\n employees.append(Feature(geometry=point, properties=employee))\n\n # Produce a GeoJSON Feature collection\n body = json.dumps(FeatureCollection(employees))\n attachment = 'attachment; filename={0}.json'.format(file_name)\n\n # Create a response with the proper headers\n # CARTO will use the file name property as the table name\n response = make_response(body)\n response.headers['Content-Type'] = 'application/json'\n response.headers['Content-Disposition'] = attachment\n else:\n raise Exception('No photos on your request')\n\n return response\n\n except Exception as e:\n response = make_response(e.message + \"\\r\\n\")\n response.headers['Content-Type'] = 'text/plain'\n response.status_code = 500\n return response", "def _build_payload(self, body: Dict) -> Dict[str, Any]:\n return {'jsonrpc': '2.0',\n 'id': self._id_count,\n **body}", "def jwt_encode_handler(payload):\n\n return jwt.encode(\n payload,\n api_settings.JWT_SECRET_KEY,\n api_settings.JWT_ALGORITHM\n ).decode('utf-8')", "def lambda_handler(event, context):\n params = parse_qs(event['body'])\n token = params['token'][0] if 'token' in params else ''\n\n if not verify_slack_token(token):\n logger.error(\"Request token (%s) does not match expected token\", token)\n return lambda_response(Exception('Invalid request token'))\n\n return gauges_app(params)", "def serialize(self, content):\r\n content = super(JSONPTemplateEmitter, self).serialize(content)\r\n callback = self.request.GET.get('callback', 'callback')\r\n return '%s(%s)' % (callback, content)", "def dumps_json(function):\n def f(*args, **kwargs):\n return json.dumps(function(*args, **kwargs))\n return f", "def _send(self, payload):\n return payload", "def lambda_handler(event, context):\n\n # Get details from the event.\n job = event[\"CodePipeline.job\"]\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params[\"AssumeRoleArn\"]\n image_parameter_name = user_params[\"ImageParameterName\"]\n stack_name = user_params[\"StackName\"]\n template_filename = user_params[\"TemplateFilename\"]\n\n # Create client in the pipeline account.\n pipeline_s3_client = get_artifact_s3_client(job)\n\n # Create clients in the target account.\n target_session = get_session(\n role_arn=assume_role_arn, session_name=\"prepare-ami-deployment\"\n )\n target_cfn_client = target_session.client(\"cloudformation\")\n target_ssm_client = target_session.client(\"ssm\")\n\n # Download the input artifact zip file, read manifest.json from it,\n # and get the AMI it references. Also look up the associated image name.\n with download_zip_file(\n s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key\n ) as zip_file:\n image_detail_string = zip_file.read(\"imageDetail.json\").decode(\"utf-8\")\n log(\"IMAGE_DETAIL_STRING\", image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail[\"ImageURI\"]\n log(\"IMAGE\", image)\n\n # Update the SSM parameters with the image,\n # to be used by the CloudFormation deployment stage of the pipeline.\n target_ssm_client.put_parameter(\n Name=image_parameter_name, Value=image, Type=\"String\", Overwrite=True\n )\n\n # Write the CloudFormation stack's template to the output artifact location,\n # to be used by the CloudFormation deployment stage of the pipeline.\n template = get_cloudformation_template(\n cfn_client=target_cfn_client, stack_name=stack_name\n )\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)" ]
[ "0.6538712", "0.64331055", "0.64326906", "0.6389101", "0.6364047", "0.63546777", "0.62912625", "0.62705666", "0.6229331", "0.6210945", "0.6210182", "0.6204859", "0.6172121", "0.6156562", "0.6149879", "0.6147023", "0.6147023", "0.6141123", "0.6130984", "0.6108313", "0.6090652", "0.6064748", "0.6038574", "0.6003723", "0.59843844", "0.59818316", "0.59769946", "0.59596807", "0.5929668", "0.5912442", "0.58879906", "0.5871038", "0.58506626", "0.58377266", "0.58232015", "0.5806266", "0.5748555", "0.57440877", "0.5732604", "0.57119614", "0.5706657", "0.57034713", "0.5695121", "0.5682591", "0.56708115", "0.5664693", "0.5653896", "0.56506246", "0.5645223", "0.5633401", "0.56306815", "0.5583362", "0.5571285", "0.5536226", "0.5530973", "0.55234146", "0.5504199", "0.54995376", "0.54979753", "0.54963726", "0.5490188", "0.54893327", "0.54789966", "0.5475242", "0.54732877", "0.54641974", "0.54394126", "0.5414304", "0.5411501", "0.5406281", "0.54036015", "0.5400629", "0.53998554", "0.53974855", "0.53965735", "0.53912675", "0.5381195", "0.53736955", "0.5367219", "0.5363841", "0.536026", "0.5351406", "0.53444856", "0.5333664", "0.5326042", "0.53237593", "0.53227466", "0.52914846", "0.52794814", "0.52758247", "0.5275711", "0.5272727", "0.52710366", "0.52706444", "0.52671283", "0.52663046", "0.5256966", "0.52432907", "0.5242611", "0.52404547", "0.5228146" ]
0.0
-1
Qualifier (i.e., version) of the lambda function. Defaults to `$LATEST`.
def qualifier(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "qualifier")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def function_version(self) -> str:\n return pulumi.get(self, \"function_version\")", "def get_lambda_latest_version_num(fn_arn: str, region: str) -> int:\n\n client = boto3.client('lambda', region_name=region)\n response = client.list_versions_by_function(FunctionName=fn_arn)\n\n for v in response['Versions']:\n if v['Version'] == '$LATEST':\n latest_hash = v['CodeSha256']\n break\n\n for v in response['Versions']:\n if v['Version'] != '$LATEST' and v['CodeSha256'] == latest_hash:\n return v['Version']", "def get_boto3_version() -> str:\n return boto3_version", "def get_latest_tf_version(include_prerelease: bool = False) -> str:\n return get_available_tf_versions(include_prerelease)[0]", "def get_bucket_versioning(Bucket=None):\n pass", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def get_botocore_version() -> str:\n return botocore_version", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def version():\n from MotionWise.log_proc import __version__ as log_ver\n from MotionWise.pm_measurement import __version__ as pm_ver\n from MotionWise.MotionWise_perf_proxy import __version__ as proxy_ver \n from MotionWise.MotionWise_perf_client import __version__ as client_ver \n \n ver = \"$Revision: 80204 $\".split()[1]\n batch = max([ pm_instrument.version().split('.')[-1], log_ver\n , ver, pm_ver, proxy_ver, client_ver, FP.__version__])\n return \"3.0.{}\".format(batch)", "def schema_version(self) -> str:\n return self._pipeline_definition.get(\"version\")", "def version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"version\")", "def version_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_name\")", "def schema_transformation_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema_transformation_version\")", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def get_trigger_version(uuid: UUID) -> Optional[float]:\n scenario = store.get_scenario(uuid)\n if scenario:\n return scenario.sections['Triggers'].trigger_version\n return None", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version\")", "def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")", "def version():\n\n pass", "def resource_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_version\")", "def function_region(self) -> str:\n return pulumi.get(self, \"function_region\")", "def qualifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"qualifier\")", "def qualifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"qualifier\")", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def runtime_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"runtime_version\")", "def version(self):\n pass", "def version(self):\n pass", "def version(self):\n pass", "def signature(function: model.Function) -> str:\n return str(function.signature)", "def signature_version(self) -> str:\n return self[\"Sns\"][\"SignatureVersion\"]", "def get_version_tag(self, version: str) -> str:\n return version", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def get_version() -> str:\n return __version__", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def get_trigger(event):\n\n if \"Records\" in event and event[\"Records\"][0][\"eventSource\"] == \"aws:s3\":\n return \"S3\"\n elif \"queryStringParameters\" in event:\n return \"APIGateway\"\n else:\n return \"eval\"", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def version(self):\n raise NotImplementedError", "def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)", "def pyzmq_version():\n if __revision__:\n return '@'.join([__version__,__revision__[:6]])\n else:\n return __version__", "def pyzmq_version():\n if __revision__:\n return '@'.join([__version__,__revision__[:6]])\n else:\n return __version__", "def version(self) -> str:\n return '0.1'", "def get_current_version(self) -> str:\n raise NotImplementedError()", "def _provided_or_most_recent(self) -> str:\n if self._operator == \">=\" and parse(self._version) <= parse(\n self._reserved_latest_version\n ):\n return f\"{self._reserved_name}=={self._reserved_latest_version}\"\n elif (\n self._operator == \"\"\n and self._version == \"\"\n and self._reserved_latest_version != \"\"\n ):\n return f\"{self._reserved_name}=={self._reserved_latest_version}\"\n return self._raw", "def version(self) -> Optional[pulumi.Input['FhirStoreVersion']]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def get_version():\n return 1", "def get_version(self):\n pass", "def probe_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"probe_version\")", "def probe_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"probe_version\")", "def version():\n return __VERSION__", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def version_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_code\")", "def python_branch():\n\n return _sys_version()[2]", "def version(self):\n return self.get_current_version()", "def get_version():\n return \"4.{}\".format(__version__)", "def key_version(self) -> Optional[str]:\n return pulumi.get(self, \"key_version\")", "def get_current_component_version():\n from resource_management.core.exceptions import Fail\n from resource_management.libraries.functions.default import default\n from resource_management.libraries.functions.stack_select import get_role_component_current_stack_version\n from resource_management.libraries.functions.repository_util import CommandRepository\n\n version = default(\"/commandParams/version\", None)\n if not version:\n repository = CommandRepository(default(\"/repositoryFile\", {}))\n if not repository.resolved:\n try:\n version = get_role_component_current_stack_version()\n except (Fail, TypeError):\n pass\n else:\n version = repository.version_string\n\n return version", "def get_version(self):\n return self.cur_config['version']['name']", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def version(self):", "def getVersion(self, *args):\n return _libsbml.QualExtension_getVersion(self, *args)", "def QualExtension_getDefaultVersion():\n return _libsbml.QualExtension_getDefaultVersion()", "def azure_function_endpoint(self) -> Optional[pulumi.Input['EventSubscriptionAzureFunctionEndpointArgs']]:\n return pulumi.get(self, \"azure_function_endpoint\")", "def azure_function_endpoint(self) -> Optional[pulumi.Input['EventSubscriptionAzureFunctionEndpointArgs']]:\n return pulumi.get(self, \"azure_function_endpoint\")", "def version(self):\n raise NotImplementedError('version')", "def type_version_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"type_version_arn\")", "def _get_function_path_from_list_functions_endpoint(self, function):\n if 'function_scope' in function and function['function_scope']['bucket'] != '*':\n return f\"{function['function_scope']['bucket']}/{function['function_scope']['scope']}/{function['appname']}\"\n return function['appname']", "def python_revision():\n return _sys_version()[3]", "def set_version(self, version=None, version_fun: Callable[[], str] = None):\n def version_compute():\n fun = version_fun\n if fun is None:\n fun = default_version_hash\n\n if version is None:\n return fun()\n else:\n return version\n\n self.version = version_compute\n return self", "def version(self, newVersion=None):\n pass", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")", "def runtime_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runtime_version\")" ]
[ "0.66423076", "0.64009196", "0.52357906", "0.5183368", "0.51745695", "0.51704615", "0.514472", "0.50531745", "0.50192595", "0.5005698", "0.49799076", "0.49550298", "0.49529997", "0.4908176", "0.49046764", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.4904551", "0.49000004", "0.48911935", "0.4886574", "0.48737833", "0.485743", "0.485743", "0.48541826", "0.48526996", "0.4850375", "0.4850375", "0.4850375", "0.48416638", "0.4824116", "0.48229957", "0.48160335", "0.48160335", "0.4813814", "0.48099604", "0.48099604", "0.48087656", "0.48039195", "0.48029765", "0.48029765", "0.48029765", "0.48029765", "0.47979885", "0.47954914", "0.47954914", "0.4788902", "0.47786736", "0.4769681", "0.47578427", "0.47524658", "0.47524658", "0.47524658", "0.47524658", "0.47524658", "0.4741852", "0.4741852", "0.4741852", "0.4740374", "0.47339633", "0.47314695", "0.47314695", "0.472889", "0.4721709", "0.47165388", "0.4707841", "0.47001642", "0.4687905", "0.4680425", "0.46800914", "0.46746874", "0.46682763", "0.46682763", "0.46682763", "0.46680975", "0.46679652", "0.46631727", "0.4660976", "0.4660976", "0.465198", "0.46498847", "0.46473876", "0.464529", "0.46415856", "0.4641112", "0.46390697", "0.46390697", "0.46390697" ]
0.47418374
70
String result of the lambda function invocation.
def result(self) -> pulumi.Output[str]: return pulumi.get(self, "result")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def get_lambda_value(lambda_node):\n return get_call_value(lambda_node.body)", "def function(self) -> str:\n return pulumi.get(self, \"function\")", "def __str__(self):\n header = [\n ' ObjectiveFunction:']\n header += [('Function: {}').format(self.func.__name__)]\n header += [('Objective: {}').format(self.objective)]\n return ('\\n').join(header) + '\\n'", "def function(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"function\")", "def __call__(self, the_instance, *args, **kwargs):\n return str(self._function(the_instance, *args, **kwargs))", "def fn(self):\n return \"Hello\"", "def task_6_insert_function_result_into_string(func: Callable):\n return f'start {func()} finish'", "def _generateLambda(self, string):\n derivation = self.fieldNames.sub(r'parent.getSampleValue(stats, \"\\1\")',\n string)\n return lambda stats, parent: eval(derivation)", "def get_call_string(self) -> Optional[str]: # noqa\n call_repr = get_call_string(self.func_name, self.args, self.kwargs, max_length=75)\n return call_repr", "def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result", "def lambda_func_doc(self, label):\n latex = (\n r'0 = \\lambda - '\n r'\\frac{\\dot{m}_\\mathrm{air}}{\\dot{m}_\\mathrm{air,min}}'\n )\n return generate_latex_eq(self, latex, label)", "def to_lambda_output(self):\n resp = {\n 'statusCode': self.status_code,\n 'body': self.body,\n 'headers': self.headers\n }\n\n return resp", "def callback( context ):\n return '<tag>{}</tag>'.format( function( context ) )", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def __str__(self):\n return str(self._event)", "def __call__( self, *args, **kwargs ):\n\n return self.__str__( )", "def fn(self):\n return self._fn", "def lambda_eval(v):\n return v() if hasattr(v, '__call__') else v", "def __str__(self):\n return self.function_representation", "def __repr__(self):\n return ('ObjectiveFunction({}, {})').format(self.func.__name__, self.objective)", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def function(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function\")", "def __repr__(self):\n result = [\n self.__class__.__name__,\n '(func = ',\n repr(self.func),\n ', timeout = ',\n repr(self.timeout),\n ]\n \n cached = self.cached\n if (cached is not ...):\n result.append(' cached = ')\n result.append(repr(cached))\n \n result.append(')')\n \n return ''.join(result)", "def to_string(self):\r\n return self.command()", "def _(self, node: Call):\n\n args = []\n for n in node.arguments:\n args.append(self.visit(n))\n\n func_args = \" \".join(args)\n\n return f\"( call {node.func.name} {func_args} )\"", "def __str__(self):\n names = [self.name]\n names += [_callable_name(transform) for transform in self.transforms]\n return ' | '.join(names) + f' -> {self.shape} {self.dtype}'", "def lambda_method(self,t): \n return 5*math.sin(2*math.pi*1*t) # I don't see the value of 1 here but this is how lamda is defined in the exercise.", "def __repr__(self) -> str:\n return f\"<Function[{self.name}](line:{self.line})>\"", "def invoke_arn(self) -> str:\n return pulumi.get(self, \"invoke_arn\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def process_target(self):\n assert isinstance(self.target, str)\n return f\"%{super().process_target()}%\"", "def debug():\n def _debug(x):\n return e.String(x.as_source())\n yield (\"(λ any . str)\", _debug)", "def __str__(self):\r\n return self.__call__()", "def __repr__(self):\n return \"{0}(callable_obj = {1})\".format(self.__class__.__name__, repr(self._callable))", "def lambda_output(self) -> Optional[pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationOutputLambdaOutputArgs']]:\n return pulumi.get(self, \"lambda_output\")", "def get_function_name(self):\n return self.__function", "def invoke_url(self) -> pulumi.Output[str]:\n return self.stage.invoke_url # type: ignore[no-any-return]", "def command_string(func, targets, sources, kwds):\n args= [repr(targets[0])] if len(targets) == 1 \\\n else [] if not targets else [repr(targets)]\n if sources:\n args.append(repr(sources[0]) if len(sources) == 1\n else repr(sources))\n if kwds:\n args.append(', '.join(['{}={}'.format(k, repr(v))\n for k, v in kwds.items()]))\n return '{}({})'.format(func.__name__, ', '.join(args))", "def eval(self, expression: str) -> str:\n ret = self.exec_(\"print({})\".format(expression))\n ret = ret.strip()\n return ret", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(UmbrellaSampling.key, self.x0, self.kf, self.n_upd)\n\n return strme", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(TransitionMatrix.key, self.nout, self.n_upd, self.mode)\n\n return strme", "def a_function_of_mine():\n return 'result'", "def event(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"event\")", "def f_val_to_str(self):\n old_locked = self._locked\n try:\n return repr(self.f_get())\n except Exception:\n return \"No Evaluation possible (yet)!\"\n finally:\n self._locked = old_locked", "def string(self):\n return f'y = {self.a.item()}'", "def get_trace_string(self):\n return (\"%s -> %s(0x%s) addr:0x%s\" %\n (self.instr_str, self.rd, self.rd_val, self.addr))", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def result_stdout(result):\n return result[1][0]", "def value(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def as_string (self) :\n\n if self.is_machinetag() :\n return \"%s:%s=%s\" % (self.namespace(), self.predicate(), self.value())", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def __str__(self) -> str:\n st = \"<Output> \"\n if self.inst_out:\n st += f'instance:{self.inst_out};'\n st += f'''{self.output} -> {self.target or '\"\"'} -> '''\n if self.inst_in:\n st += f\"instance:{self.inst_in};\"\n st += self.input\n\n if self.params and not self.inst_in:\n st += f\" ({self.params})\"\n if self.delay != 0:\n st += f\" after {self.delay} seconds\"\n if self.times != -1:\n st += \" (once only)\" if self.times == 1 else f\" ({self.times!s} times only)\"\n return st", "def getCallable():", "def __str__(self):\n return self.result", "def func_hash(self) -> str:\n\n return self.call_data[:10]", "def log_function_code(func_to_log: Callable) -> str:\n if not callable(func_to_log):\n TypeError(f\"Parameter 'func_to_log' is not function. Actual value: {func_to_log}.\")\n function_definition = inspect.getsource(func_to_log)\n if function_definition.startswith(\"return \"):\n function_definition = function_definition[7:]\n return repr(function_definition.strip())", "def actionString(self,action):\n return str(self._mdp.A[action])", "def get_result(self, obj):\n return str(obj)", "def get_result(self, obj):\n return str(obj)", "def __repr__(self):\n\t\treturn self.func.__doc__", "def __str__(self) -> str:\n # The default str() for Function includes the arity, which is redundant\n # here. Just use the symbol's name.\n root_str = self.root.name\n children_str = ', '.join(str(child) for child in self.children)\n return f'{root_str}({children_str})'", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __call__(self, token_received, **kwargs) -> str:\n print(token_received, flush=True, end=\"\")\n return token_received", "def log_stdout(self, function):\n return function()", "def get_current_value(self) -> str:\n if isinstance(self._function_args, dict):\n # noinspection PyCallingNonCallable\n value = self._value_function(**self._function_args)\n else:\n # noinspection PyCallingNonCallable\n value = self._value_function(*self._function_args)\n\n if callable(self._formatter):\n formatted_value = self._formatter(value)\n elif isinstance(self._formatter, str):\n formatted_value = self._formatter.format(value = value)\n else:\n formatted_value = str(value)\n\n return formatted_value", "def __str__(self):\n return ' '.join([self.source, self.name, str(self.outputs)])", "def __str__(self):\n return str((self.instruction_pointer, self.program,))", "def lambda_rad(self):\n InputFile = self('Meta','InputFile').decode(\"utf-8\")\n d_InputFile = dict([item.replace(' ','').split('=') for item in InputFile.splitlines() if '=' in item])\n if 'lambda' in d_InputFile:\n return float(d_InputFile['lambda'])\n else:\n return self.lambdaref", "def lambda_handler(event, context):\n\n # Log the values received in the event argument\n logger.info(f'Request event: {event}')\n\n # Define default hard-coded return values\n response = {\n 'uid': 'Example function ID',\n 'return_val01': 'Return value #1',\n 'return_val02': 'Return Value #2',\n }\n\n # Retrieve type of invocation (GET, PUT, etc.)\n if 'http_verb' in event:\n operation = event['http_verb'].upper()\n if operation == 'PUT':\n # Return the values passed to the function\n response = {\n 'uid': event['functionID'],\n 'return_val01': event['parameters']['parm01'],\n 'return_val02': event['parameters']['parm02'],\n }\n\n logger.info(f'Response={response}')\n return response", "def __str__(self):\n s = self.prev_error.failures + '\\n' if self.prev_error else ''\n\n s += '%s' % self.message\n if self.args[1:]:\n s += ' %s' % str(self.args[1:])\n\n for task in self.tasktrace:\n s += '\\n in %s %s' % (task.task.__name__, task.name)\n return s", "def task_calc():\n return 'What is the result of the expression?'", "def code(self) -> str:\n input_names = {id: f\"arg_{i + 1}\" for i, id in enumerate(self.role.input_placeholder_ids)}\n output_names = {id: f\"out_{i + 1}\" for i, id in enumerate(self.role.output_placeholder_ids)}\n state_names = {\n ph.id.value: f\"state_{i + 1}\" for i, ph in enumerate(self.role.state.state_placeholders)\n }\n var_names = {**input_names, **output_names, **state_names}\n\n out = f\"def {self.name}(\"\n out += \", \".join([var_names[id] for id in self.role.input_placeholder_ids])\n out += \"):\\n\"\n for action in self.role.actions:\n out += f\" {action.code(var_names)}\\n\"\n\n out += \" return \"\n out += \", \".join([var_names[id] for id in self.role.output_placeholder_ids])\n\n return out", "def write(self):\n return self.expr.lhs.base.function", "def _log_str(self):\n return (\n \"[name: {}, id: {}]\"\n .format(self._raw['Name'] if self._raw else \"<not retrieved>\", self._id)\n )", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __str__(self):\n if not six.PY3:\n return unicode(self.args[0]).encode('utf-8')\n\n return self.args[0]", "def __str__(self):\n iso_time = str(datetime.datetime.fromtimestamp(self.next_time))\n return \"<Job(%s, %ss, %s)>\" % (iso_time, self.interval, self.func)", "def view_function(self,v):\n return v", "def name(self):\n\t\treturn self._func_name", "def source_code(self):\n return str(self.source)", "def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)" ]
[ "0.63775957", "0.6243465", "0.6243465", "0.6057504", "0.6049874", "0.604393", "0.59944534", "0.59546214", "0.5952952", "0.59346074", "0.5891595", "0.5888873", "0.5779383", "0.5732003", "0.563572", "0.56272775", "0.5584745", "0.55607206", "0.5560158", "0.55510116", "0.5549994", "0.554588", "0.5536301", "0.5517513", "0.5495927", "0.5495927", "0.5495927", "0.547467", "0.54703134", "0.545463", "0.5447386", "0.5434786", "0.5428345", "0.5421283", "0.54074574", "0.54074574", "0.53970337", "0.5392653", "0.5379697", "0.53709316", "0.5361247", "0.53574085", "0.5352179", "0.53476197", "0.5345017", "0.5334717", "0.53333974", "0.5325462", "0.5315944", "0.5300802", "0.5289821", "0.52765554", "0.52740294", "0.52740294", "0.52740294", "0.52740294", "0.52740294", "0.52740294", "0.52740294", "0.52740294", "0.52611953", "0.52568096", "0.525493", "0.5247376", "0.52357566", "0.52357554", "0.5229213", "0.52246875", "0.52206266", "0.5215743", "0.5214811", "0.5214811", "0.52145785", "0.5210813", "0.5210378", "0.5210378", "0.5210378", "0.5206057", "0.52047384", "0.52004075", "0.51986367", "0.5191432", "0.5185326", "0.51667845", "0.51612663", "0.51588947", "0.5147937", "0.5147778", "0.5147386", "0.514329", "0.514329", "0.514329", "0.514329", "0.514329", "0.5137944", "0.51361287", "0.51360583", "0.51297003", "0.51291186", "0.5128368" ]
0.5227699
67
Map of arbitrary keys and values that, when changed, will trigger a reinvocation.
def triggers(self) -> pulumi.Output[Optional[Mapping[str, str]]]: return pulumi.get(self, "triggers")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_dict(new,old):", "def test_dictionary_inplace_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value[4] = 5\r\n assert vm.changed", "def test_dict(self):\n event_cache = []\n\n class A(HasTraits):\n x = EventfulDict({c: c for c in 'abc'})\n a = A()\n a.x.on_events(lambda k, v: event_cache.append('add'), \\\n lambda k, v: event_cache.append('set'), \\\n lambda k: event_cache.append('del'))\n\n del a.x['c']\n # ab\n a.x['z'] = 1\n # abz\n a.x['z'] = 'z'\n # abz\n a.x.pop('a')\n # bz \n\n # Were the correct events captured?\n self.assertEqual(event_cache, ['del', 'add', 'set', 'del'])\n\n # Is the output correct?\n self.assertEqual(a.x, {c: c for c in 'bz'})", "def test_dictionary_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value = {4:5}\r\n assert vm.changed", "def _set_toChange(x):\n for key in list(x.keys()):\n self.toChange[key] = True", "def keys(self, value: Dict[str, ValidKVs]) -> None:\n warnings.warn('This is private, call .clear_keys() and update().', DeprecationWarning, stacklevel=2)\n self.clear_keys()\n self.update(value)", "def __setitem__(self, key, value):\n dict.__setitem__(self, key, value)\n\n self.changed()", "def handle_dict(self, object, name, old, new):\n if old is not Uninitialized:\n unregister = self.next.unregister\n for obj in old.values():\n unregister(obj)\n\n register = self.next.register\n for obj in new.values():\n register(obj)", "def renamed_dict(event):\n\n new_dict = thaw(event.data())\n\n for old, new in list(rename_map.items()):\n new_dict[new] = new_dict.pop(old)\n\n return new_dict", "def on_change(key):\n pass", "def dict_change(binary_dict:dict):\r\n dict_change= {}\r\n for key, value in binary_dict.items():\r\n dict_change[value] = key\r\n return dict_change", "def changed_keys(self):\n return self._changed_keys", "def __setitem__(self, key, value):\n super(ReadOnlyDict, self).__setitem__(key, value)", "def handle_dict_items(self, object, name, old, new):\n self.handle_dict(object, name, new.removed, new.added)\n\n if len(new.changed) > 0:\n # If 'name' refers to the '_items' trait, then remove the '_items'\n # suffix to get the actual dictionary trait.\n #\n # fixme: Is there ever a case where 'name' *won't* refer to the\n # '_items' trait?\n if name.endswith(\"_items\"):\n name = name[: -len(\"_items\")]\n\n dict = getattr(object, name)\n unregister = self.next.unregister\n register = self.next.register\n for key, obj in new.changed.items():\n unregister(obj)\n register(dict[key])", "def construct_kv_dict(self):\r\n key1 = user_state_key('field_a')\r\n key2 = user_state_key('field_b')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def update(self, *args, **kwargs):\n super(ReadOnlyDict, self).update(*args, **kwargs) # pragma: no cover", "def update(self, key, value):\n if key in self.map:\n self.map[key] = value", "def __setitem__(self,key,value):\n if key in self.deleted: self.deleted.remove(key)\n if key not in self.changed: self.changed.append(key)\n self.data[key] = value", "def _default_observation_map(self) -> Dict[str, ObservationMapValue]:\n pass", "def handle_dict_items(self, object, name, old, new):\n raise NotImplementedError", "def _metrics_update(orig, new):\n revsd = orig\n for k, v in orig.items():\n if not v:\n revsd[k] = new[k]\n elif new[k]:\n if new[k] != v:\n # LOG ME, do something\n print(orig)\n print(new)\n elif not new[k] or v:\n pass\n else:\n raise Exception(\"_metrics_update error\")\n return revsd", "def __getstate__(self):\n return dict(self.items())", "def __getstate__(self):\n return dict(self.items())", "def __setitem__(key, value):", "def data_dict_update(self, change):\n self.data_dict = change['value']", "def rename_dictkey(self, kwargs, old, new):\n x = kwargs.copy()\n x[new] = x.pop(old)\n return x", "def rekey(self, map_or_fn=None, inplace=False, **kw):\n if not inplace:\n return self.copy().rekey(map_or_fn, inplace=True, **kw)\n if map_or_fn is None:\n map_or_fn = kw\n if isinstance(map_or_fn, dict):\n if map_or_fn is not kw:\n map_or_fn.update(kw)\n func = lambda k: map_or_fn.get(k, k)\n else:\n func = map_or_fn\n if not callable(func):\n raise ValueError('`map_or_fn` must be a dict, a callable, or None. Received %s: %s'\n % (str(type(map_or_fn)), str(map_or_fn)))\n keys = self.peys()\n new_keys = keys.apply(func).puniq()\n if len(keys) != len(new_keys):\n raise ValueError('rekey map must return the same number of unique keys as the original pdict. '\n 'Only found %d of %d expected keys.' % (len(new_keys), len(keys)))\n vals = self.palues().uproot()\n self.clear()\n self[new_keys] = vals\n return self", "def rekey(self, map_or_fn=None, inplace=False, **kw):\n if not inplace:\n return self.copy().rekey(map_or_fn, inplace=True, **kw)\n if map_or_fn is None:\n map_or_fn = kw\n if isinstance(map_or_fn, dict):\n if map_or_fn is not kw:\n map_or_fn.update(kw)\n func = lambda k: map_or_fn.get(k, k)\n else:\n func = map_or_fn\n if not callable(func):\n raise ValueError('`map_or_fn` must be a dict, a callable, or None. Received %s: %s'\n % (str(type(map_or_fn)), str(map_or_fn)))\n keys = self.peys()\n new_keys = keys.apply(func).puniq()\n if len(keys) != len(new_keys):\n raise ValueError('rekey map must return the same number of unique keys as the original pdict. '\n 'Only found %d of %d expected keys.' % (len(new_keys), len(keys)))\n vals = self.palues().uproot()\n self.clear()\n self[new_keys] = vals\n return self", "def substitute_keys_in_functions(functions, new_keys):\n for _, func in functions.items():\n func['ret_type'] = new_keys[func['ret_type']]\n substitute_params_keys(func['params'], new_keys)", "def __setitem__(self, key, val):\n for k,v in list(self.__dict__.items()):\n if k == key:\n self.__dict__[key] = val\n return \n print((\"Item %s could not be updated...\" %key))", "def _observe_simple(self):\n return {}", "def __setitem__(self, key, value):\n self._maps[0][key] = value", "def setChanged(self,key):\n if key not in self.data:\n raise ArgumentError(\"No settings data for \"+key)\n if key not in self.changed:\n self.changed.append(key)", "def __setitem__(self, key, val):\n dict.__setitem__(self, key, val)", "def __setitem__(self, key, value):", "def mutate_dict_in_place(func, mapping):\n for key, value in mapping.items():\n if isinstance(value, dict):\n mutate_dict_in_place(func, value)\n else:\n mapping[key] = func(value)", "def dict(self, keys) -> dict:\n return {k: self(k) for k in keys}", "def remap_keys(ds, new_keys):\n logger.info(\"Remapping keys of every element using config:\\n %s\", _dict_to_logstring(new_keys))\n\n def remap_keys(x):\n return {new_keys.get(k, k): v for k, v in x.items() if new_keys.get(k, k) is not None}\n return ds.map(remap_keys, num_parallel_calls=TF_AUTOTUNE)", "def __getstate__(self):\n return {k: v for k, v in self.__dict__.iteritems() if k not in ['x', 'y', '_x', '_y']}", "def update(self, new_values):\n values_copy = new_values.copy()\n for key in self.SET_KEYS:\n if key in values_copy:\n values_copy[key] = set(values_copy[key])\n super(ConfigDict, self).update(values_copy)", "def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)", "def handle_dict(self, object, name, old, new):\n raise NotImplementedError", "def f(map, key):\n def decorator(function):\n map[key] = function\n return function\n return decorator", "def _modkeys(self, dict, mod):\n newdict = {}\n for (k, v) in dict.items():\n newk = k + mod\n newdict[newk] = v\n return newdict", "def __setitem__(self, key, value):\n if key in self.define:\n warnings.warn('Key {} is being overwritten to {}. It had a value of {}. Hope you know what you are doing.'.format(key, value, self.define[key]))\n self.define[key] = value", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __setitem__(self, key, value):\n pass", "def __methodDict(cls, _dict):\n baseList = list(cls.__bases__)\n baseList.reverse()\n for _super in baseList:\n __methodDict(_super, _dict)\n for key, value in cls.__dict__.items():\n if type(value) == types.FunctionType:\n _dict[key] = value", "def _reload_values(self):\r\n raise NotImplementedError", "def __setitem__(self, key, val):\n self()[key] = val", "def __setitem__(self, key, value):\n if key not in self.ordered_keys:\n self.ordered_keys.append(key)\n super().__setitem__(key, value)", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def __setitem__(self, key, value):\n self.__dict__[key] = value", "def update_config(original, new):\n for k, v in new.items():\n if isinstance(v, abc.Mapping):\n original[k] = update_config(original.get(k, {}), v)\n else:\n original[k] = v\n return original", "def __setitem__(self,key,value):\n self._register[key] = value\n self._register.sync()", "def handle_sc_event(store, changed_keys, info):\n\n for key in changed_keys:\n SC_HANDLERS[key](key=key, info=info)", "def _reference(self):\r\n return {1:2, \"key1\":\"value1\", \"key2\":(1,2,3)}", "def test_load_updates_dict(self):\n new_dict = {\n 'test_new_key': 'test_new_value',\n 'test_key1': 'new_value',\n }\n self.extension.registration.settings = new_dict\n self.settings.load()\n\n # Should have added test_new_key, and modified test_key1\n self.assertEqual(new_dict['test_new_key'],\n self.settings['test_new_key'])\n self.assertEqual(new_dict['test_key1'], self.settings['test_key1'])\n\n # Should have left test_key2 alone\n self.assertEqual(self.test_dict['test_key2'],\n self.settings['test_key2'])", "def update_keymap(self, new_keymap):\n self.keymap.update(new_keymap)", "def append_event_to_params_dict(self, new_name_and_parameters):\n\n params_dict.update(new_name_and_parameters)", "def testMapUpdate(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap({'step': data_types.BuildStats()})\n with self.assertRaises(AssertionError):\n m.update({1: 2})\n with self.assertRaises(AssertionError):\n m.update(step2=1)\n m.update(step=data_types.BuildStats())\n self.assertEqual(m, {'step': data_types.BuildStats()})", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def set(self, key, value):", "def set(self, key, value):", "def ENFORCEMENT_FUNCTIONS_AS_MODIFIERS(EnforcerDict):\n\n class Other:\n def __init__(self):\n self.keystring = ''\n self.valuesum = 0\n def add_function(self, items):\n for key, value in items:\n if value % 2 == 0 and key.islower():\n self.valuesum += value\n self.keystring += key\n yield key, value\n # noinspection PyUnreachableCode\n def remove_function(self, items):\n return\n yield\n\n global other\n other = Other()\n\n enforcer_dict = EnforcerDict(\n dict(A=1, b=2, c=3, D=4),\n add_function=other.add_function,\n remove_function=other.remove_function\n )\n\n assert other.keystring == 'b'\n assert other.valuesum == 2\n\n del enforcer_dict['b']\n\n assert other.keystring == 'b'\n assert other.valuesum == 2\n\n return enforcer_dict", "def __setitem__(self, key, value):\n self.other[key] = value", "def updated_with(orig_dict, *new_values):\n newdict = dict(orig_dict)\n for vals in new_values:\n if vals:\n newdict.update(vals)\n return newdict", "def update_key(self):\n self.__prev_key = self.__new_key", "def _map___iter__(self):\n return self.iterkeys()", "def change_config(self, changesDict):\n for key in sorted(changesDict.keys()):\n self.configDict.update({key: changesDict.get(key)})", "def update(self,dict):\n for key in list(dict.keys()):\n print(\"Hey, I'm updating something\")\n self.d[key]=dict[key]", "def changed(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, key, value):\r\n self.setdefault(key, []).append(value)", "def _reset_changes(self):\r\n self._original = {}\r\n if self.last_updated is not None:\r\n self._original['last_updated'] = self.last_updated", "def tsc_change(self) -> Dict[str, str]:\n return {\n \"type\": self._action.value,\n \"name\": \", \".join(self._names),\n \"link\": self._link,\n }", "def __update_params(self,**kwargs):\n updatedArgSet = set(self._updateParamsArgs) & kwargs.viewkeys()\n if len(updatedArgSet) > 0:\n args = self._subDictionary(self._updateParamsArgs)\n newArgs = self._onParamsUpdate(**args)\n updatedArgs =dict()\n for k in updatedArgSet:\n try:\n updatedArgs[k] = newArgs[k]\n except:\n pass\n\n self.__dictionary.update(newArgs)\n else:\n pass", "def pre_update(self, **values):\r\n pass", "def modify_on(class_reference, from_dict, to_dict, all=False, custom_condition='', custom_function=False):\n _entries = select_from(class_reference, all, custom_condition, **from_dict)\n _modify = 0\n if custom_function:\n for _entry in _entries:\n for _key in to_dict:\n _entry.__dict__['_'+_key] = to_dict[_key](_entry.__dict__['_'+_key])\n _entry.put()\n _modify += 1\n else:\n for _entry in _entries:\n for _key in to_dict:\n _entry.__dict__['_'+_key] = to_dict[_key]\n _entry.put()\n _modify += 1\n return _modify", "def _update(self, *keys_and_val):\n if len(xxx) < 2:\n raise NotEnoughInfo\n value, *location = xxx[::-1]\n location.reverse()\n final_key = location.pop()\n ptr__target_dct = get_target_dct(location)\n ptr__target_dct[final_key] = value\n return", "def __setstate__(self, dict):\n self.__dict__.update(dict)\n self.start_callback = None\n self.finalize_callback = None", "def __setitem__(self, key, value) -> None:\n # Allows value modification only in __init__.\n caller_method = inspect.getouterframes(inspect.currentframe(), 2)[1][3]\n if caller_method != \"__init__\":\n raise AttributeError\n\n self.__stash[key] = value", "def inverse_update(self, data):\n if not isinstance(data, dict) or not isinstance(self, transforms.MapTransform):\n return data\n d = dict(data)\n for k in self.key_iterator(data):\n transform_key = transforms.TraceableTransform.trace_key(k)\n if transform_key not in data or not data[transform_key]:\n continue\n d = transforms.sync_meta_info(k, data, t=False)\n return d", "def restart_function_map():\n rfunc_map = {}\n if run_in_apache():\n rfunc_map['apache2'] = restart_pid_check\n return rfunc_map", "def rename_state_dict_keys(source, key_transformation, target=None):\n if target is None:\n target = source\n\n state_dict = torch.load(source)\n # state_dict = state_dict.state_dict() \n new_state_dict = OrderedDict()\n\n for key, value in state_dict.items():\n new_key = key_transformation(key)\n new_state_dict[new_key] = value\n\n torch.save(new_state_dict, target)", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def observation(self):\n return {k: observer(self._state)\n for k, observer in self.observers.items()}", "def replace_cfs(old_key, new_key):\n altered_methods = []\n for name in methods:\n changed = False\n data = Method(name).load()\n for line in data:\n if line[0] == old_key:\n line[0], changed = new_key, True\n if changed:\n Method(name).write(data)\n altered_methods.append(name)\n return altered_methods", "def _modified(self):\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__modified__'):\n for subkey, value in getattr(self, key)._modified():\n yield (\"%s.%s\" % (key, subkey), value)\n else:\n if key in self.__modified__:\n yield (key, getattr(self, key))", "def update(self, key, new_value):\n raise NotImplementedError", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def update(self, changes, pipe=None):\n if not changes:\n return\n\n if self.key_name in changes:\n raise InvalidOperation('cannot update the redis key')\n\n deletes = {k for k, v in changes.items() if IS(v, None)}\n updates = {k: v for k, v in changes.items() if k not in deletes}\n\n with self._pipe(pipe) as pipe:\n\n core = self.core(pipe=pipe)\n\n def build(k, v):\n core.hset(self.key, k, v)\n\n def cb():\n self._data[k] = v\n\n pipe.on_execute(cb)\n\n for k, v in updates.items():\n build(k, v)\n\n self.remove(deletes, pipe=pipe)", "def updateSimState(self):\n self.sim_state = {k: v for k,v in self.state.iteritems()}", "def edit_work(self, new_values):\n self.eisenhower_priority()\n self.work_refresh()\n\n for attr, new_val in new_values.items():\n self.__dict__[attr] = new_val\n return self.__dict__", "def change(self, key, old_value, new_value):\n try:\n parts = self.list(key)\n try: parts[parts.index(old_value)] = new_value\n except ValueError:\n self[key] = new_value\n else:\n self[key] = \"\\n\".join(parts)\n except KeyError: self[key] = new_value", "def fill_cache(cache, values_dict):\n cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)", "def _update_key(cls, spec):\n if cls.KEY is not None:\n cls._set_key(spec, spec[\"keys\"].popleft())\n elif cls.REF is not None:\n spec[\"ref\"] = cls.REF", "def _update_proxy(self, change):\n # The superclass implementation is sufficient.\n super(MapMarker, self)._update_proxy(change)", "def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')" ]
[ "0.645274", "0.6258521", "0.616605", "0.6099212", "0.60495317", "0.59831303", "0.5854459", "0.585022", "0.5841429", "0.57649016", "0.57519615", "0.5711017", "0.55702174", "0.5550343", "0.5543891", "0.5535223", "0.54757035", "0.54222584", "0.54219085", "0.54190826", "0.5399805", "0.5397984", "0.5397984", "0.5392318", "0.5380758", "0.53718084", "0.53602445", "0.53602445", "0.5358211", "0.53489554", "0.53425384", "0.533187", "0.53269076", "0.53049445", "0.527872", "0.5267525", "0.5252011", "0.52495915", "0.523931", "0.52362126", "0.5227129", "0.5220679", "0.51730657", "0.5155813", "0.5153452", "0.5149735", "0.51445395", "0.51445395", "0.51445395", "0.51345426", "0.5114005", "0.5113844", "0.5104606", "0.5102856", "0.51013637", "0.50867224", "0.50776255", "0.50570095", "0.50466377", "0.5039575", "0.50324285", "0.5024085", "0.5022563", "0.5019064", "0.5018644", "0.5018644", "0.5016088", "0.50151145", "0.50148565", "0.5013379", "0.50117147", "0.50059354", "0.5005628", "0.50049275", "0.5001771", "0.49976677", "0.49959445", "0.49852628", "0.49832192", "0.49821043", "0.49765837", "0.4972342", "0.49681988", "0.49680173", "0.49677268", "0.49545053", "0.49525982", "0.49516788", "0.49489188", "0.49432385", "0.49414957", "0.49369472", "0.49369472", "0.49361652", "0.49333268", "0.49332115", "0.4930842", "0.4927699", "0.49262625", "0.49261916", "0.4924727" ]
0.0
-1
Returns a sorted list of query ids
def getQueries(self): return sorted( self.qryDocs.keys() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ids(self):\n for object_query in self.query:\n objects = self._get_objects(object_query)\n objects = self._apply_order_by_and_limit(\n objects,\n order_by=object_query.get(\"order_by\"),\n limit=object_query.get(\"limit\"),\n )\n object_query[\"ids\"] = [o.id for o in objects]\n return self.query", "def get_ids(self) -> List[str]:", "def sort_id(self):\n return self.sort('id')", "def get_rls_ids(self, table: \"BaseDatasource\") -> List[int]:\n ids = [f.id for f in self.get_rls_filters(table)]\n ids.sort() # Combinations rather than permutations\n return ids", "def get_ids(self, start_id, num):\n \n rows = self.get_rows()\n # Sort by restaurant ID\n all_ids = sorted(\n list(\n set(\n map(lambda k: int(k['id']), \n filter(lambda k: int(k['id']) >= start_id, rows)))))\n return all_ids[:num]", "def getIDs():", "def get_ids(self):\n return [item.id for item in self.items]", "def ids(self):\n return list(self._id_generator())", "def ids(self):\n return list(self._id_generator())", "def _filter_for_submission_ids(query: DocumentNode) -> [int]:\n acc = Config.accumulation_size + 1\n response = DB.client.execute(query)\n # Count occurrences of every ID\n elements = Counter([submission['photo_id'] for submission in response['results']])\n # Filter for acc size\n elements = filter(lambda x: x[1] < acc,\n [(submissions_by_count, elements[submissions_by_count]) for submissions_by_count in elements])\n # return ID of filtered elements\n return [submission[0] for submission in elements]", "def get_ordered_ids(tree):\n ordered_ids = []\n ordered_ids.extend(id(node) for node in tree.gen_tips())\n ordered_ids.extend(id(node) for node in tree.gen_internal_nodes())\n return ordered_ids", "def id_lst(self):\n id_set = set()\n for key in self.forward:\n for val in self.forward[key]:\n id_set.add(val)\n return list(id_set)", "def optimize_query_order(queries):\n pass #TODO later when/as needed, now returns queries as-is\n return queries", "def _id_seq(self):\n return list(self.keys())", "def list_ids(self, start: int = None, end: int = None) -> List:\n return [i.unique_id for i in self.data[start:end]]", "def get_ids(self):\n return self._ids", "def ids(self):\n\n return self._d.keys()", "def video_ids(self):\n return self._sorted_ids", "def ids(self):\n return self._ids", "def get_es_ids(self):\n search = self.search.source(['uri']).sort(['uri'])\n es_ids = [item.meta.id for item in search.scan()]\n return es_ids", "def get_ids(self):\n return self.redis.hkeys(self.feed_items)", "def getMatchIds(self):\n return sorted(self._matches.keys())", "def load_all_search_ids(self) -> List[Hashable]:\n search_ids = self._redis.lrange(\"search_id_list\", 0, -1)\n return search_ids", "def module_ids(self, rev=False):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tids = sorted(list(self.shutit_map.keys()),key=lambda module_id: self.shutit_map[module_id].run_order)\n\t\tif rev:\n\t\t\treturn list(reversed(ids))\n\t\treturn ids", "def id_priority_list(self):\n return self._id_priority_list", "def all_id() -> list:\n return [str(i[\"id\"]) for i in Blogs_Manager.TablePost.all_query()]", "def dict_list(query_id, qrels):\n rel_list = []\n for query_dict in qrels:\n if int(query_dict['query_num']) == query_id:\n rel_list.append(query_dict)\n\n return rel_list", "def SampleIds(self):\r\n return sorted(self._metadata.keys())", "def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results", "def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results", "def ids(self):\n return frozenset([seq.id for seq in self])", "def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]", "def ids(self):\n return (x[\"_id\"] for x in self.document._meta.collection.find(self.spec, fields = (\"_id\",)))", "def get_seq_ids(self, acl, howmany):\n ids = []\n it = 0\n for seq_id in moves.range(self._min_search_seqid,\n self._max_search_seqid):\n ids.append(seq_id)\n it += 1\n if(it >= howmany):\n break\n return ids", "def sort_ids(ids, mapping):\r\n\r\n def _lookup(id):\r\n try:\r\n return len(mapping[id])\r\n except (KeyError):\r\n return 0\r\n\r\n deco = [(_lookup(id), id) for id in ids]\r\n deco.sort(reverse=True)\r\n return [id for _, id in deco]", "def getEventIds(self):\n eventIdsLst = []\n for event in self.eventsLst:\n eventIdsLst.append(event['id'])\n return eventIdsLst", "def ids(self):\n return self.obj_to_id.values()", "def IDLIST(cls, tablename, **kwargs):\n limit = \"LIMIT \" + kwargs.get ( \"limit\", \"ALL\" ) \n offset = \"OFFSET \" + kwargs.get ( \"offset\", \"0\" )\n \n order = kwargs.get ( \"order\", ['objectid ASC'] )\n order = \",\".join (order)\n where = kwargs.get ( \"where\", ['TRUE'] )\n where = \" AND \".join (where)\n query = \"SELECT objectid FROM {0}.{1} WHERE {2} ORDER BY {3} {4} {5}\".format (\n CFG.DB.SCHEMA, tablename, where, order, limit, offset )\n rowset = map(lambda x: x[0], CFG.CX.query ( query ).getresult() )\n return rowset", "def test_sort_ids(self):\r\n\r\n mapping = {\"1\": [\"0\", \"2\", \"5\", \"6\"],\r\n \"3\": [],\r\n \"4\": [],\r\n \"11\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\r\n \"8\": [\"7\"]}\r\n\r\n self.assertEqual(sort_ids([\"1\", \"3\", \"4\", \"8\", \"11\"], mapping),\r\n [\"11\", \"1\", \"8\", \"4\", \"3\"])", "def test_sort_ids(self):\r\n\r\n mapping = {\"1\": [\"0\", \"2\", \"5\", \"6\"],\r\n \"3\": [],\r\n \"4\": [],\r\n \"11\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\r\n \"8\": [\"7\"]}\r\n\r\n self.assertEqual(sort_ids([\"1\", \"3\", \"4\", \"8\", \"11\"], mapping),\r\n [\"11\", \"1\", \"8\", \"4\", \"3\"])", "def ids(self):\n\n if not hasattr(self, \"_ids\"):\n query = db.Query(\"pub_proc_cg c\", \"c.id\").unique().order(\"c.id\")\n query.join(\"query_term t\", \"t.doc_id = c.id\")\n query.join(\"query_term s\", \"s.doc_id = t.int_val\")\n query.where(\"t.path = '/Term/SemanticType/@cdr:ref'\")\n query.where(\"s.path = '/Term/PreferredName'\")\n query.where(\"s.value = 'Drug/agent'\")\n rows = query.execute(self.cdr_cursor).fetchall()\n self._ids = [row.id for row in rows]\n self.logger.info(\"found %d drug terms\", len(self._ids))\n return self._ids", "def get_objects(self,ids):\n return Order.objects.filter(pk__in=ids).order_by('number')", "def getIds(self) -> List[int]:\n return list(self.users.keys())", "def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")", "def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")", "def _params(self, qs):\n return [str_id for str_id in qs.split(',')]", "def get_recordIds(self):\n record_ids = []\n for item in self.order_items:\n record_ids.append(item.get_recordId())\n \n return record_ids", "def dmc_order(self):\n return sorted(self.lookup_table, key=lambda clr: int(clr.id) if clr.id.isdigit() else 0)", "def _get_all_app_ids(config, client):\n rv = set()\n total_pages = client.get_published_apps(config.username, 0).json()[\"total_pages\"]\n for current_page in range(total_pages):\n current_page_results = client.get_published_apps(config.username, current_page).json()['results']\n for result in current_page_results:\n rv.add(result['id'])\n return rv", "def get_ids(corpus):\n corpusids = []\n for chunk in corpus:\n for _id in chunk.keys():\n corpusids.append(_id)\n return corpusids", "def get(self):\n return [\n [a, v]\n for a, v in sorted(self.__dict__.items())\n if a != \"query_name\" and len(v)\n ]", "def get_ids(self):\n return self._graphs.keys()", "def get_obs_ids(self):\n return sorted(self.obsinfos.keys())", "def get_objects(self,ids):\n return model.objects.filter(pk__in=ids).order_by(search_field)", "def object_ids(self):\n return self._extract_set('id')", "def get_queries(self, query_ids):\n return [\n self.resource_loader.query_cache.get(query_id) for query_id in query_ids\n ]", "def identer(self) -> List[str]:\n self._populer_identer()\n if self._identer:\n return [str(ident) for ident in self._identer if ident]\n return []", "def getLocationsIds():\n with driver.session() as s:\n ids = s.write_transaction(getLocationsId)\n\n lIds = []\n for idEl in ids:\n lIds.append(idEl[\"ID(l)\"])\n\n return lIds", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def _params_to_ints(self, qs):\n return [int(str_id) for str_id in qs.split(',')]", "def getIDs(self):\n return self.multiengine.getIDs()", "def print_ids(self):\n ids = [self.data[x][DATA_ID_INDEX] for x in self.index_list]\n print(ids)", "def get_ids_strings(self, ids):\n #Split ids by list no longer than 1000 units,\n #because vk api can only gets 1000 ids per one call \n splitted_ids = list(self.chunks(ids, 1000))\n ids_in_list = []\n \n #crate list of strings with ids\n for split_ids in splitted_ids:\n user_ids = ''\n #make string ids list. Do it because of api requirement\n for id in split_ids:\n user_ids += str(id) + \",\"\n #remove last \",\"\n user_ids = user_ids[:-1]\n ids_in_list.append(user_ids)\n\n return ids_in_list", "def get_refresh_ids(self):\n ids = []\n for bucket in self.router.lonely_buckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_doc_id_titles(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id, title FROM documents\")\n results = [(r[0], r[1]) for r in cursor.fetchall()]\n cursor.close()\n return results", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def querySet_to_list(qs):\n return [dict(q) for q in qs]", "def get_all_qids(self):\n return self._entity_symbols.get_all_qids()", "def get_constraint_ids():\n return sorted([p.getConstraintUniqueId(i) for i in range(p.getNumConstraints())])", "def get_all_supplier_id() -> List[int]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select id from supplier order by id\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list", "def get_talk_ids(self):\r\n return QtSql.QSqlQuery('''SELECT Id FROM presentations''')", "def get_ids(**metafilter):\n\n metafilter = _clean(metafilter)\n search = _build(metafilter).source(False)\n\n for hit in search.scan():\n yield hit.meta.id", "def compress_doc_ids(doc_ids: list):\r\n int_doc_ids = [int(x) for x in doc_ids]\r\n sorted_doc_ids = sorted(int_doc_ids)\r\n return sorted_doc_ids", "def get_id_set(self):\n s = set()\n for player in Player.select(Player.player_id):\n s.add(player.player_id)\n return s", "def get_all_ids(cls):\n subscriptions = db.session.query(Subscription.id).all()\n return [s.id for s in subscriptions]", "def ids(self):\n\n if len(self._id_ranges) % 2 != 0:\n raise AutoIDException(message=ExceptionsMessage.AutoIDIllegalRanges)\n\n ids = []\n for i in range(int(len(self._id_ranges) / 2)):\n begin = self._id_ranges[i * 2]\n end = self._id_ranges[i * 2 + 1]\n for j in range(begin, end):\n ids.append(j)\n\n return ids", "def get_ids(self, criteria: Dict = None):\n\n if criteria is None:\n criteria = {}\n\n ids = self.m.query(\n criteria=criteria,\n properties=[\"material_id\"])\n print(\"number %s\" % len(ids))\n ids = pd.DataFrame(ids).values.ravel().tolist()\n self.ids = ids\n return ids", "def qids_to_splits(qids):\n qidmap = {}\n i = 0\n for qid in qids:\n if not qid in qidmap:\n qidmap[qid] = i\n i+=1\n new_qids = []\n for qid in qids:\n new_qids.append(qidmap[qid])\n qidcount = np.max(new_qids)+1\n splits = [[] for i in range(qidcount)]\n for i, qid in enumerate(new_qids):\n splits[qid].append(i) \n return splits", "def getRefreshIDs(self):\n ids = []\n for bucket in self.router.getLonelyBuckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids", "def keys(self):\n return list(self.token2id.values())", "def job_ids(self):\n return self.connection.lrange(self.key, 0, -1)", "def get_all_ids(self, index_name, doc_type, query_body):\n\n print 'getting all ids...'\n\n # query scroll\n id_list = []\n\n scroll = self.es.search(\n index=index_name,\n doc_type=doc_type,\n scroll='10m',\n size=10000,\n fields=['_id'],\n body=query_body)\n\n scroll_size = scroll['hits']['total']\n size = 0\n # retrieve results\n while scroll_size > 0:\n # scrolled data is in scroll['hits']['hits']\n hits_list = scroll['hits']['hits']\n for hit in hits_list:\n doc_id = hit['_id']\n id_list.append(doc_id)\n # update scroll size\n scroll_size = len(scroll['hits']['hits'])\n size += scroll_size\n print \"scrolled - \", str(size)\n # prepare next scroll\n scroll_id = scroll['_scroll_id']\n # perform next scroll\n scroll = self.es.scroll(scroll_id=scroll_id, scroll='10m')\n return id_list", "def ordering(self):\n value = []\n for i in self:\n if isinstance(i, PQ):\n value.extend(i.ordering())\n else:\n value.append(i)\n\n return value", "def get_list(self):\n return sorted(self.__entries.keys())", "def get_translated_ids(id):", "def cloud_ids(self):\n if self.stage == 'trainval':\n ids = self.all_cloud_ids['train'] + self.all_cloud_ids['val']\n else:\n ids = self.all_cloud_ids[self.stage]\n return sorted(list(set(ids)))", "def vertex_ids(self):\n return self.get_ids()", "def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]", "def make_order_by_combination(self):\n order_by_list = []\n order_by = self.request.GET.get(\"order_by\", None)\n\n if order_by:\n order_by_list = [SORT_BY_REFERENCE_DICT[i.strip()]\n for i in order_by.split(\",\")]\n\n return order_by_list", "def id_ranges(self):\n return self._id_ranges", "def logbook_get_ids_names():\n return IMPL.logbook_get_ids_names()", "def get_all_ids(self):\r\n return self.__person_repository.get_all_ids()", "def build_run_id_query(data):\n data_query_set = [Q(metadata__runId=value) for value in set(data)]\n query = data_query_set.pop()\n for item in data_query_set:\n query |= item\n return query", "def _sorted_seat_ids(seats: list):\n seat_ids = [_seat_id(**seat) for seat in seats]\n return sorted(seat_ids)", "def get_all(self, queries):\n return [self._unpack(x.ids) for x in self.server.select2([\n ttypes.SelectQuery(\n [ttypes.SelectOperation(\n operation_type=\\\n ttypes.SelectOperationType.SimpleQuery,\n term=self._query_term(*x)\n )\n ],\n ttypes.Page(self.page_length, -1)\n )\n for x in queries])]", "def list_pnums(self):\n return sorted([key for key in self.catalog])" ]
[ "0.69242555", "0.6751612", "0.6483866", "0.6429385", "0.6352032", "0.62008715", "0.6198006", "0.6170076", "0.6170076", "0.61510664", "0.6146125", "0.6133298", "0.611966", "0.6042621", "0.603371", "0.5974146", "0.59307647", "0.5919666", "0.590026", "0.58894217", "0.5859048", "0.5839993", "0.58137715", "0.58083814", "0.579837", "0.57792974", "0.5758356", "0.57561976", "0.5748821", "0.5748821", "0.57474923", "0.5721008", "0.5711596", "0.5681493", "0.56786996", "0.5671927", "0.5669716", "0.5662339", "0.56578857", "0.56578857", "0.56557065", "0.5646557", "0.5627181", "0.5623585", "0.5623585", "0.5621224", "0.56104136", "0.557846", "0.5559397", "0.5549164", "0.554756", "0.5541902", "0.5512631", "0.5511504", "0.5506021", "0.5488366", "0.54580534", "0.5452822", "0.5440928", "0.5440928", "0.5440928", "0.5440928", "0.5440928", "0.5438736", "0.54377645", "0.54359406", "0.54323465", "0.54255", "0.5422264", "0.54208916", "0.54176766", "0.5414684", "0.5406189", "0.54010993", "0.53972524", "0.5395538", "0.53943056", "0.53924525", "0.5383156", "0.5382449", "0.5378681", "0.5378248", "0.5374031", "0.53688526", "0.53631574", "0.5354016", "0.535148", "0.53512686", "0.53490895", "0.5348765", "0.53475285", "0.5344779", "0.53441685", "0.5329264", "0.5324206", "0.5303236", "0.52947074", "0.52926666", "0.5289736", "0.5276282" ]
0.658589
2
Returns a list of the retrieved documents for the query
def getDocs(self, qryid, counter = None): if (counter is None) or (counter>=len(self.qryDocs[qryid])): return self.qryDocs[qryid] else: return self.qryDocs[qryid][:counter]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]", "def query(self):\n query_url = self.get_query_url()\n logging.info('Querying: ' + query_url)\n json_data = request.urlopen(query_url).read().decode()\n logging.debug('Retrieved the following ' + json_data)\n response = json.loads(json_data)\n\n return self.get_docs_from_response(response)", "def list_documents(\n self, index: str, query: Dict[str, Any] = None\n ) -> Iterable[Dict[str, Any]]:\n return es_scan(self.__client__, query=query or {}, index=index)", "def getDocumentAll(self, query = {}, keys = []):\n query = query or {}\n if \"include_docs\" not in query:\n query[\"include_docs\"] = True\n\n if not keys:\n return self.client.get(self.name +\"/_all_docs\", query)\n else:\n return self.client.post(self.name +\"/_all_docs\", query,\n {\"keys\": keys}).getBodyData()", "def list(self, request, *args, **kwargs):\n self._process_settings(request)\n query_name = None\n if len(args) == 1:\n query_name = args[0]\n # get query DSL from query container\n query_dsl = {}\n es_response_raw = req_session.get(\n '{}/{}/_{document_type}/_search'.format(\n settings.ELASTIC_SEARCH_HOST,\n '{site}__{app}'.format(site=settings.SITE, app=self.app),\n document_type=self.document_type),\n data=json.dumps(query_dsl))\n else:\n es_response_raw = req_session.get(\n '{}/{}/_{document_type}/_search'.format(\n settings.ELASTIC_SEARCH_HOST,\n '{site}__{app}'.format(site=settings.SITE, app=self.app),\n document_type=self.document_type))\n if es_response_raw.status_code != 200:\n exceptions.XimpiaAPIException(_(u'Could not search collection'))\n es_response = es_response_raw.json()\n logger.info(u'DocumentViewSet.list :: Performed search \"{query_name}\" '\n u'for document {document_type}'.format(\n query_name=query_name,\n document_type=self.document_type))\n # make output of logical documents from physical ones\n return Response(es_response['hits']['hits'])", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def get_documents(self, index, **kwargs):\n return self._build_search(index, **kwargs).params(request_timeout=2000).scan()", "def bulk_get_documents():\n ids = flask.request.json\n if not ids:\n raise UserError(\"No ids provided\")\n if not isinstance(ids, list):\n raise UserError(\"ids is not a list\")\n\n with blueprint.index_driver.session as session:\n # Comment it out to compare against the eager loading option.\n # query = session.query(IndexRecord)\n # query = query.filter(IndexRecord.did.in_(ids)\n\n # Use eager loading.\n query = session.query(IndexRecord)\n query = query.options(\n joinedload(IndexRecord.urls).joinedload(IndexRecordUrl.url_metadata)\n )\n query = query.options(joinedload(IndexRecord.acl))\n query = query.options(joinedload(IndexRecord.authz))\n query = query.options(joinedload(IndexRecord.hashes))\n query = query.options(joinedload(IndexRecord.index_metadata))\n query = query.options(joinedload(IndexRecord.aliases))\n query = query.filter(IndexRecord.did.in_(ids))\n\n docs = [q.to_document_dict() for q in query]\n return flask.Response(json.dumps(docs), 200, mimetype=\"application/json\")", "def obj_get_list(self, request=None, **kwargs):\n filter_object = self.get_filter_object(request)\n list = self.get_collection(request).find(filter_object)\n order_field, direction = self.get_order_field_and_direction(request)\n \n if (order_field is not None):\n list.sort(order_field, direction)\n \n return map(Document, list)", "def getDocsList(self):\n return self.docs_list", "def documents(self):\r\n return doc.Documents(self)", "def retrieve(self, query: str, filters: dict = None, top_k: Optional[int] = None, index: str = None) -> List[Document]:\n if top_k is None:\n top_k = self.top_k\n if index is None:\n index = self.document_store.index\n documents = self.document_store.query(query=None, filters=filters, top_k=top_k,\n custom_query=self.custom_query, index=index)\n return documents", "def do_query(documents, config_file=None, logger=None, context=None):\n num_documents = documents.count()\n return {\"num_documents\": num_documents}", "def all_documents(self):\n return [item[0] for item in\n self.sql_session.query(Document).values(Document.id)]", "def retrieve(self, query: str, filters: dict = None, top_k: Optional[int] = None, index: str = None) -> List[Document]:\n if top_k is None:\n top_k = self.top_k\n if index is None:\n index = self.document_store.index\n\n documents = self.document_store.query(query, filters, top_k, self.custom_query, index)\n return documents", "def get_documents() -> list[Document]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Document)]", "def fetch_all(): \n client, index_name = connection_es()\n res = client.search(index = index_name+\"*\")\n return res", "def search_documents():\n req_body = request.get_json(force=True)\n search_query = req_body.get('query')\n\n results = app.search_flow.search(\n inputs=Document(text=search_query),\n return_results=True\n )\n\n res = {\n 'matches': [match.id for match in results[0].docs[0].matches]\n }\n return res", "def getMyDocuments( self, REQUEST=None ):\n membership = getToolByName( self, 'portal_membership', None )\n if membership is None:\n return\n\n user = membership.getAuthenticatedMember()\n uname = user.getUserName()\n\n total_objects, documents = self.searchResults( type='documents', with_limit=1, REQUEST=REQUEST, \\\n creator=uname, implements='isHTMLDocument', sort_on='created', sort_order='reverse', \\\n sort_limit=50 )\n\n return ( total_objects, documents, )", "def _run_async_query(self, context):\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n results = result.get(\"results\", [])\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def _run_async_query(self, context):\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n results = result.get(\"results\", [])\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def get_documents(doc_type):\n doc_type = 1 if doc_type == 'registration' else 2\n return Documents.query.filter_by(type=doc_type).all()", "def documents(self, schema=None, wrapper=None, **params):\n return ViewResults(self.raw_view, '_all_docs',\n wrapper=wrapper, schema=schema, params=params)", "def get_docs_sources(self):\n docs = [doc for doc,_,_ in self.doc_to_get]\n \n retry_until_ok(self.docman.elastic.indices.refresh, index=\"\")\n documents = self.docman.elastic.mget(body={'docs': docs})\n return documents", "def documents(self) -> list[str]:\n return self._documents", "def get_document_list(\n self,\n project_id: int,\n url_parameters: dict = {}\n ) -> requests.models.Response:\n return self.get(\n 'v1/projects/{project_id}/docs{url_parameters}'.format(\n project_id=project_id,\n url_parameters=self.build_url_parameter(url_parameters)\n )\n )", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "async def list_documents(self, app_id, namespace, index_name, start_doc_id,\n include_start_doc, limit, keys_only,\n max_doc_id=None, include_max_doc=True):\n collection = get_collection_name(app_id, namespace, index_name)\n\n if start_doc_id or max_doc_id:\n # Apply range filter to ID\n start_doc_id = '*' if start_doc_id is None else start_doc_id\n max_doc_id = '*' if max_doc_id is None else max_doc_id\n left_bracket = '[' if include_start_doc else '{'\n right_bracket = ']' if include_max_doc else '}'\n solr_filter_query = 'id:{}{} TO {}{}'.format(\n left_bracket, start_doc_id, max_doc_id, right_bracket\n )\n else:\n solr_filter_query = None\n # Order by ID\n solr_sort_fields = ['id asc']\n\n solr_projection_fields = None\n if keys_only:\n # Skip everything but ID\n solr_projection_fields = ['id']\n\n # Use *:* to match any document\n solr_search_result = await self.solr.query_documents(\n collection=collection, query='*:*', filter_=solr_filter_query,\n limit=limit, fields=solr_projection_fields, sort=solr_sort_fields\n )\n docs = [_from_solr_document(solr_doc)\n for solr_doc in solr_search_result.documents]\n return docs", "def GetDocumentListFeed(self):\n q = gdata.docs.service.DocumentQuery();\n return self.QueryDocumentListFeed(q.ToUri())", "def get_many(collection, query: dict, selection=None) -> List[dict]:\n data = []\n if selection is None:\n for item in collection.find(query):\n data.append(item)\n else:\n for item in collection.find(query, selection):\n data.append(item)\n return data", "def list_all(mongo_collection):\n return [doc for doc in mongo_collection.find()]", "def get_all_documents(self, collection):\n\n return self.client[self.db][collection].find()", "def get_all(cls, **filters) -> List[dict]:\n limit = filters.pop(\"limit\", 0) or 0\n offset = filters.pop(\"offset\", 0) or 0\n errors = cls.validate_query(filters)\n if errors:\n raise ValidationFailed(filters, errors)\n\n cls.deserialize_query(filters)\n\n if cls.logger.isEnabledFor(logging.DEBUG):\n if filters:\n cls.logger.debug(f\"Query documents matching {filters}...\")\n else:\n cls.logger.debug(f\"Query all documents...\")\n documents = cls.__collection__.find(filters, skip=offset, limit=limit)\n if cls.logger.isEnabledFor(logging.DEBUG):\n nb_documents = (\n cls.__collection__.count_documents(filters, skip=offset, limit=limit)\n if limit\n else cls.__collection__.count_documents(filters, skip=offset)\n )\n cls.logger.debug(\n f'{nb_documents if nb_documents else \"No corresponding\"} documents retrieved.'\n )\n return [cls.serialize(document) for document in documents]", "def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results", "def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results", "def get_all_documents(bibliography: Param, session: CondorSession) -> Response:\n if not bibliography:\n return Response(\n {'message': 'The especified eid is not found on database'},\n status=400,\n )\n documents = [\n sc.Document(doc) for doc in Document.list(session, bibliography)\n ]\n return Response(documents)", "def retrieve_all_documents(\n self,\n collection_name: str,\n sort: List = [],\n asc: bool = True,\n include_vector: bool = True,\n include_fields: List = [],\n retrieve_chunk_size: int=1000,\n **kwargs\n ):\n num_of_docs = self.collection_stats(collection_name)['number_of_documents']\n with self.progress_bar(list(range(int(num_of_docs/ retrieve_chunk_size)))) as pbar:\n d = self.retrieve_documents(\n collection_name=collection_name, page_size=retrieve_chunk_size, sort=sort, asc=asc, include_vector=include_vector,\n include_fields=include_fields, **kwargs\n )\n all_docs = d[\"documents\"]\n pbar.update(1)\n while len(d[\"documents\"]) > 0:\n d = self.retrieve_documents(\n collection_name=collection_name,\n page_size=retrieve_chunk_size,\n cursor=d[\"cursor\"],\n sort=sort,\n asc=asc,\n include_vector=include_vector,\n include_fields=include_fields\n )\n all_docs += d[\"documents\"]\n pbar.update(1)\n return all_docs", "def documents_search(request):\n\tif request.method == 'GET':\n\t\tparams = request.GET\n\telif request.method == 'POST':\n\t\tparams = request.POST\n\telse:\n\t\treturn HttpResponse(status=405)\n\n\t# grab params directly to implement defaults as\n\t# opposed to panicy django forms behavior.\n\tquery = params.get('q', '')\n\ttry:\n\t\tstart = int(params.get('start', '0'))\n\texcept:\n\t\tstart = 0\n\ttry:\n\t\tlimit = min(int(params.get('limit', DEFAULT_MAPS_SEARCH_BATCH_SIZE)),\n\t\t\t\t\tMAX_MAPS_SEARCH_BATCH_SIZE)\n\texcept: \n\t\tlimit = DEFAULT_MAPS_SEARCH_BATCH_SIZE\n\n\ttry:\n\t\trelated_id = int(params.get('related_id', None))\n\texcept: \n\t\trelated_id = None\n\n\trelated_type = params.get('related_type', None)\n\n\tsort_field = params.get('sort', u'')\n\tsort_field = unicodedata.normalize('NFKD', sort_field).encode('ascii','ignore')\t \n\tsort_dir = params.get('dir', 'ASC')\n\tresult = _documents_search(query, start, limit, sort_field, sort_dir, related_id, related_type, request.user)\n\n\tresult['success'] = True\n\treturn HttpResponse(json.dumps(result), mimetype=\"application/json\")", "def test_get_documents_populated(index_with_documents):\n response = index_with_documents().get_documents()\n assert isinstance(response.results, list)\n assert len(response.results) == 20", "def get_documents_with_q(self, index, query=Q(), source=None, add_index_name = False):\n \n s = Search(using=self.es, index=index)\n if source:\n s = s.source(source)\n # Dotted fields, replace . by __\n q = s.query(query)\n #print(str(q.to_dict()).replace(\"'\",'\"'))\n results = s.query(query).scan()\n \n if add_index_name:\n all_dicts = []\n for hit in results:\n result_dict = hit.to_dict()\n result_dict['_index'] = hit.meta.index\n all_dicts.append(result_dict)\n \n fa = pd.DataFrame.from_dict(all_dicts)\n else:\n fa = pd.DataFrame([hit.to_dict() for hit in results])\n \n return fa", "def get_documents(self):\n documents = self.tree.execute(\"$.documents\")\n for doc in documents:\n sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}\n self.document_dict[doc['@id']] = {'sentences': sentences,\n 'location': doc['location']}\n return", "def search():\n results = []\n for row in db.session.query(DBcorpus):\n serialized = fix_corpus_format(CorpusSchema().dump(row).data)\n results.append(serialized)\n return results, 200", "def _run_async_query(self, context):\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n self._total_results = len(results)\n self._count_valid = True\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def getQueries(self):\n return sorted( self.qryDocs.keys() )", "def get_documents(self, project_id, param=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' \n response = zoho_http_client.get(url, self.details, param)\n return parser.get_documents(response)", "def get_collection():\n\n args = request.args.to_dict()\n\n # Convert size to int, for Python.\n if 'size' in args:\n args['size'] = int(args['size'])\n\n results = collection.search(request.args.get('q', '*'), **args)\n\n return jsonify(records=[r.dict for r in results])", "def fetch_all_from_db(collec):\n db = client.get_database(\"tweetstorm\")\n collection = db.get_collection(collec)\n ret = list(collection.find())\n logger.info(str(len(ret)) + ' documents read from the db')\n return ret", "def do_search(queries):\n global documents, list_document\n results = {}\n query = tokenize(queries)\n if query == []:\n sys.exit()\n # find document ids containing all query terms. Works by\n # intersecting the posting lists for all query terms.\n relevant_document_ids = intersection(\n [set(postings[term].keys()) for term in query])\n if not relevant_document_ids:\n documents.clear()\n list_document[:] = []\n flash('empty')\n else:\n scores = sorted([(id,similarity(query,id))\n for id in relevant_document_ids],\n key=lambda x: x[1],\n reverse=True)\n print \"Score: filename\"\n global total_document_found\n total_document_found = 0\n for (id,score) in scores:\n print str(score)+\": \"+document_filenames[id]\n results[document_filenames[id]] = score\n total_document_found += 1\n flash(\"Total document found : \" + str(total_document_found) + \" of \" + str(N))\n return results", "def show_documents():\n\n document = Document(connection=connection, cursor=cursor)\n\n all_documents = document.get_all_documents()\n\n context = {\n 'all_documents': all_documents\n }\n\n return render_template('pages/tables/documents.html', **context)", "def Documents(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('documents', default)\n return [HEP.DocumentObject(i) for i in tmp]", "def docs(self, searcher, exclude_docs=None):\r\n\r\n try:\r\n return self.matcher(searcher, exclude_docs=exclude_docs).all_ids()\r\n except TermNotFound:\r\n return []", "def getAccessibleDocuments( self, REQUEST=None ):\n membership = getToolByName( self, 'portal_membership', None )\n if membership is None:\n return\n\n user = membership.getAuthenticatedMember()\n uname = user.getUserName()\n IsManager = user.IsManager()\n IsAdmin = user.IsAdmin()\n\n membership.updateLoginTime( uname )\n\n total_objects, documents = self.searchResults( type='documents', with_limit=1, REQUEST=REQUEST, \\\n implements='isHTMLDocument', sort_on='created', sort_order='reverse', \\\n sort_limit=50 )\n\n if not IsManager and documents:\n res = []\n system_objects = CustomSystemObjects()\n for x in documents:\n try: path = x.getPath()\n except: continue\n IsSystem = 0\n for key in system_objects:\n if path.find( key ) > -1:\n IsSystem = 1\n break\n if not IsSystem:\n res.append( x )\n return ( total_objects, res, )\n\n return ( total_objects, documents, )", "def list_all(mongo_collection):\n docs = []\n list_all = mongo_collection.find()\n for i in list_all:\n docs.append(i)\n return docs", "def read_queryset(self, using=None):\n return self.index_queryset(using=using)", "def list_documents(self, engine_name, current=1, size=20):\n data = { 'page': { 'current': current, 'size': size } }\n return self.swiftype_session.request('get', \"engines/{}/documents/list\".format(engine_name), json=data)", "def List(cls):\n return WordList.query().fetch_async()", "def get_all_documents(root_name):\n # Using our db connection, get all documents with the root, **root_name**\n documents = db.child(root_name).get()\n \n # The **doc_list** will eventually be a list of document objects\n # formatted in a more workable manner than the OrderedDicts that we get. \\* <br> \n doc_list = []\n\n for doc in documents.each():\n \"\"\"\n Each **doc** within **documents** has a key and a val. The key is the document id, which\n we'll use to reference the document for db manipulation later, and the val\n contains the document object itself. That'll be things like the 'title', \n 'username', 'chapter_list'\n \"\"\"\n doc_object = doc.val()\n doc_object['id'] = doc.key()\n doc_list.append(doc_object)\n\n return doc_list", "def search():\n args = request.args.to_dict()\n query = QueryModel(args)\n result = repository.search_text(COLLECTION_NAME, query.value)\n return {\"texts\": result}", "def get_documents(self, engine_name, document_ids):\n endpoint = \"engines/{}/documents\".format(engine_name)\n data = json.dumps(document_ids)\n return self.swiftype_session.request('get', endpoint, data=data)", "def list_documents(self, database, collection, **kwargs):\n kwargs = validators.check_list_documents_params(**kwargs)\n r = self.__get_response(settings.LST_DOCS,\n {\"db\": database, \"col\": collection}, **kwargs)\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])", "def fetch_document_contents(query, doc_list):\n\n output_str = 'Query [{0}] fetched {1} results:\\n'.format(query, len(doc_list))\n flag = False\n doc_list.sort(key=lambda x: x[1], reverse=True)\n contents = {}\n\n with open(DATA_FILE, \"r\") as file:\n for line in file:\n if re.match(\"<DOC .*\", line): # New Document starts\n doc_id = int(re.search(r'\\d+', line).group())\n contents[doc_id] = ''\n flag = True\n\n if flag:\n contents[doc_id] += line\n if re.match(\"</DOC.*\", line):\n flag = False\n\n file.close()\n\n for item in doc_list:\n output_str += '--------------------------------------------------------------\\n'\n output_str += 'Document= {0} (Score= {1})\\n'.format(item[0], item[1])\n output_str += contents[item[0]]\n\n return output_str", "def get_documents(self, subtypes=None, refresh=False):\n search = ScopusSearch('au-id({})'.format(self.identifier), refresh)\n if subtypes:\n return [p for p in search.results if p.subtype in subtypes]\n else:\n return search.results", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def all_docs(self, by_seq=False, **params):\n if by_seq:\n try:\n return self.view('_all_docs_by_seq', **params)\n except ResourceNotFound:\n # CouchDB 0.11 or sup\n raise AttributeError(\"_all_docs_by_seq isn't supported on Couchdb %s\" % self.server.info()[1])\n\n return self.view('_all_docs', **params)", "def get(self, *args):\n return self.docs.get(*args)", "def __iter__(self):\n for document in self.query:\n yield self._to_document(document)", "def getDocuments(self):\n documents = []\n\n for document in self.metaData.jsonObj['documents']:\n d = HyperLinkResource(document)\n documents.append(Document(self._client, d.selfLink))\n\n return documents", "def items(self):\n return self.docs.items()", "def search():\n # response = request.json['search']\n jsonObj = request.get_json()\n query = str(jsonObj['query'])\n regex = re.compile(query, re.IGNORECASE)\n results = []\n # Build a pymongo command to search the document by query term. Only executes if active is set to True.\n # Only matches _id\n active = True\n client = MongoClient(db_config)\n if active == True:\n # Search Publications\n db = client['Publications']\n publications = db['Publications']\n pcount = publications.find({\"_id\": regex}).count()\n p = publications.find({\"_id\": regex})\n # Search Corpus\n db = client['Corpus']\n corpus = db['Corpus']\n ccount = corpus.find({\"_id\": regex}).count()\n c = corpus.find({\"_id\": regex})\n\n htmlResult = \"\"\n if pcount == 0:\n htmlResult = \"<h4>No publications found.</h4>\"\n else:\n htmlResult = \"<h4>Publications: \" + str(pcount) + \"</h4>\"\n htmlResult += \"<ul>\"\n for item in p:\n args = '?_id=' + item[\"_id\"] + '&amp;path=' + item[\"path\"]\n htmlResult += '<li><a href=\"/publications/edit' + args + '\">' + item[\"_id\"] + '</a></li>'\n htmlResult += \"</ul>\"\n\n htmlResult += \"<hr>\"\n\n if ccount == 0:\n htmlResult += \"<h4>No corpus items found.</h4>\"\n else:\n htmlResult += \"<h4>Corpus: \" + str(ccount) + \"</h4>\"\n htmlResult += \"<ul>\"\n for item in c:\n args = '?_id=' + item[\"_id\"] + '&amp;path=' + item[\"path\"]\n htmlResult += '<li><a href=\"/corpus/collection/edit' + args + '\">' + item[\"_id\"] + '</a></li>'\n htmlResult += \"</ul>\"\n\n # Return the Ajax response\n return htmlResult", "def get_all_running(self) -> List[DocumentReference]:\n return self.get_all_documents(Type._RUNNING)", "def get_results(self, nb_results=1000):\n\t\tdocs = self.searcher.search(self.constrained_query.build(), nb_results).scoreDocs\n\t\tself.constrained_query = BooleanQuery.Builder()\n\n\t\thits = []\n\t\tfor i in range(len(docs)):\n\t\t\thits.append({})\n\t\t\tfor field in self.reader.document(docs[i].doc).getFields():\n\t\t\t\thits[i][field.name()] = field.stringValue()\n\n\t\thits = self.remove_duplicates(hits)\n\t\treturn hits", "def get_all_documents(self, type: Type) -> List[DocumentReference]:\n runners = []\n collection = self.client.collection(type.value).list_documents()\n for document in collection:\n runners.append(document)\n\n return runners", "def get_all(collection):\n data = []\n for item in collection.find({}):\n data.append(item)\n return data", "def list_documents(self, report_type: Type, key: str = None) -> List[str]:\n documents = []\n collection = self.client.collection(f'{report_type}').list_documents()\n for document in collection:\n if key:\n if document.id == key:\n for _document in document.get().to_dict():\n documents.append(_document)\n else:\n documents.append(document.id)\n\n return documents", "def get_list(self, *fields, **query_data):\n from utils import ElasticSearchBase\n kwargs = {}\n if query_data.get('size'):\n kwargs['size'] = query_data.pop('size')\n # mapping the data. creating the data which can parsed by query builder.\n data = dict()\n # creating fields list which we expect elastic search should return\n fields = [self.get_es_key(self.index_name, field) for field in fields] \\\n if fields else FIELDS_MAP[self.index_name].keys()\n for k, v in query_data.iteritems():\n # mapping the keys with the one in elastic search\n data[self.get_es_key(self.index_name, k)] = v\n\n esb = ElasticSearchBase(query=data, fields=fields, **kwargs)\n search_result = esb.fetch_es_response(index_name=self.index_name,\n lucene_query=True)\n # translating the result readable to client (keys expected by api)\n if search_result.get('error'):\n return []\n results = []\n try:\n for res in search_result['hits']['hits']:\n # now preparing the list of dict with the keys which user passes\n # initially\n results.append({FIELDS_MAP[self.index_name][field]:\n res['fields'][field][0] for field in fields})\n except KeyError as e:\n LOGGER.exception(e)\n # the required keys are not there.Issue could be configuration error\n raise EnvironmentError('Issue in fetch or parsing the result. '\n 'Please report this to admin!')\n return results", "def documents_dslquery(dsl_dict, **kwargs):\n return _dslquery('documents', dsl_dict, **kwargs)", "def solr_query(config, solr_host, fq, solr_collection_name):\n # solr_collection_name = config['solr_collection_name']\n\n getVars = {'q': '*:*',\n 'fq': fq,\n 'rows': 300000}\n\n url = f'{solr_host}{solr_collection_name}/select?'\n response = requests.get(url, params=getVars)\n return response.json()['response']['docs']", "def boolean_retrieve(self, query):\n # ------------------------------------------------------------------\n # TODO: Implement Boolean retrieval. You will want to use your\n # inverted index that you created in index().\n # Right now this just returns all the possible documents!\n docs = set(self.get_posting(query[0])) # returns empty list if first word of query not found\n \n if docs: # no need to loop through rest of query if first word not found\n for word in query[1:]:\n docs = docs.intersection(self.get_posting(word)) # intersect set\n\n docs = list(docs) # change back to list after intersection()\n # ------------------------------------------------------------------\n return sorted(docs) # sorted doesn't actually matter", "def iter_documents(self):\n raise NotImplementedError", "def get_all(self, queries):\n return [self._unpack(x.ids) for x in self.server.select2([\n ttypes.SelectQuery(\n [ttypes.SelectOperation(\n operation_type=\\\n ttypes.SelectOperationType.SimpleQuery,\n term=self._query_term(*x)\n )\n ],\n ttypes.Page(self.page_length, -1)\n )\n for x in queries])]", "def prefiltered_docs(self):\n relevant_docs = set()\n for term in self.query_terms:\n try:\n for entry in self.inv_idx.idx[term]:\n relevant_docs.add(entry.doc_id)\n except KeyError:\n pass\n return relevant_docs", "def index(cls, db):\n return (Todo(**doc) for doc in cls.collection(db).find())", "async def get_many(self, **query):\n\n return [await self._expand(obj) for obj in await self.db.get_many(**query)]", "def phrase_retrieve(self, query):\n # ------------------------------------------------------------------\n # TODO: Implement Phrase Query retrieval (ie. return the documents \n # that don't just contain the words, but contain them in the \n # correct order) You will want to use the inverted index \n # that you created in index(), and may also consider using\n # boolean_retrieve. \n # NOTE that you no longer have access to the original documents\n # in self.docs because it is now a map from doc IDs to set\n # of unique words in the original document.\n # Right now this just returns all possible documents!\n docs = []\n first_hash = self.boolean_retrieve(query) # narrows down possible documents\n\n for doc in first_hash:\n title = self.titles[doc]\n word_list = []\n \n for word in query:\n word_list.append(self.inv_index[word][title]) # list for each query word from inverted index\n\n if len(word_list) == 1:\n docs.append(doc) # only one word in query\n break\n\n is_match = bool # undefined boolean value for match or not\n\n for i in word_list[0]: # first word occurrence positions\n for j in range(1, len(query)): # next words in query\n if (i + j) in word_list[j]: # check if words in positional order for document\n is_match = True # stays true throughout range(1, len(query)) if match\n else:\n is_match = False # update match status \n break\n if is_match:\n docs.append(doc)\n break\n \n # ------------------------------------------------------------------\n return sorted(docs) # sorted doesn't actually matter", "async def find_all(self, lazy=None, alias=None):\n to_list_arguments = {}\n if self._limit is not None:\n to_list_arguments[\"length\"] = self._limit\n else:\n to_list_arguments[\"length\"] = DEFAULT_LIMIT\n\n cursor = self._get_find_cursor(alias=alias)\n\n self._filters = {}\n\n docs = await cursor.to_list(**to_list_arguments)\n\n # if _loaded_fields is not empty then documents are partly loaded\n is_partly_loaded = bool(self._loaded_fields)\n\n result = []\n for doc in docs:\n obj = self.__klass__.from_son(\n doc,\n # set projections for references (if any)\n _reference_loaded_fields=self._reference_loaded_fields,\n _is_partly_loaded=is_partly_loaded,\n )\n\n if (lazy is not None and not lazy) or not obj.is_lazy:\n await obj.load_references(obj._fields)\n\n result.append(obj)\n\n return result", "def get_all():\n return SavedQuery.get_all()", "def fetch_all_url_records():\r\n db_cursor = db_collection.find({'deleted': False})\r\n result = list(db_cursor)\r\n db_cursor.close()\r\n\r\n return result", "def get(self, *args, **kwargs):\n self.before_get(args, kwargs)\n\n qs = QSManager(request.args, self.schema)\n objects_count, objects = self._data_layer.get_collection(qs, kwargs)\n\n schema_kwargs = getattr(self, 'get_schema_kwargs', dict())\n schema_kwargs.update({'many': True})\n\n schema = compute_schema(self.schema,\n schema_kwargs,\n qs,\n qs.include)\n\n result = schema.dump(objects).data\n\n view_kwargs = request.view_args if getattr(self, 'view_kwargs', None) is True else dict()\n add_pagination_links(result,\n objects_count,\n qs,\n url_for(self.view, **view_kwargs))\n\n result.update({'meta': {'count': objects_count}})\n\n self.after_get(result)\n return result", "def read(self):\n return self.client.get(\n index=self.index,\n id=self.document_id,\n ignore=[404],\n )['_source']", "def stats_docs(self, host):\n\n s = self.get_stats(host, 'docs')\n\n data = {\n 'count': s['count'],\n 'deleted': s['deleted']\n }\n\n return data", "def query(self, queries):\n final_result = []\n results = self.__return_query('query', queries)\n if results == None:\n return None\n else:\n if len(results) > 1:\n for result in results:\n final_result.append(result['data'])\n else:\n final_result = results\n return final_result", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = merge_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def get_filtered_documents(self, numberOfMessages: int):\n sqs_messages = []\n results = app.Results()\n try:\n self.Logger.info(f'Get {numberOfMessages} of messages from sqs')\n self.Logger.debug(f'Getting {numberOfMessages} of messages from sqs {self.sqs_queue_url}')\n get_documents = self.sqs_client.receive_message(\n QueueUrl=self.sqs_queue_url,\n MaxNumberOfMessages=numberOfMessages\n )\n [sqs_messages.append(json.loads(message['Body'])) for message in get_documents['Messages']]\n results.Results = sqs_messages\n results.ActionStatus = 0\n except Exception as e:\n results.ActionStatus = -1\n self.Logger.info(f\"An issue occured during the process of getting messages from sqs.\")\n self.Logger.debug(f'An issue occured during the process of getting messages from sqs. Message {e}')\n return results", "def retrieve_documents(s, db):\n terms = ['\"%s\"' %stem(term) for term in tokenize(s)]\n \n conn = sqlite3.connect(db)\n c = conn.cursor()\n c.execute('''SELECT docs, tfs FROM inv_index \n WHERE term IN (%s)''' %(','.join(terms)))\n res = c.fetchall()\n\n if not res:\n return None\n \n # if only one result, get the doc(s) with highest tf\n if len(res) == 1:\n doc_ids = str_to_int_vect(res[0][0])\n tfs = str_to_int_vect(res[0][1])\n doc_ids = [doc_ids[i] for i in get_max_indexes(tfs)]\n else:\n # multiple results, get the intersection of doc ids\n sets = [set(str_to_int_vect(d)) for d, _ in res]\n doc_ids = list(set.intersection(*sets))\n\n # if no intersection, then return the documents with highest tf-idf\n if len(doc_ids) == 0:\n c.execute('SELECT id FROM docs')\n n = len(c.fetchall())\n for d, t in res:\n tf_idf = tfidf(n, len(str_to_int_vect(d)), str_to_int_vect(t))\n doc_ids += get_max_indexes(tf_idf)\n \n doc_ids = [str(i) for i in doc_ids]\n c.execute('''SELECT doc FROM docs WHERE id IN (%s)''' %(','.join(doc_ids)))\n return [res[0] for res in c.fetchall()]", "def test_get_documents_default(empty_index):\n response = empty_index().get_documents()\n assert isinstance(response.results, list)\n assert response.results == []", "def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")", "def retrieveDocuments(self):\n documents = {}\n for reuter in self.REUTERS_FILES:\n print(reuter)\n reuter_stream = open(self.BASEPATH + reuter, encoding=\"latin-1\")\n reuter_content = reuter_stream.read()\n soup = BeautifulSoup(reuter_content, \"html.parser\")\n articles = soup.find_all('reuters')\n for article in articles:\n body = \"\"\n title = \"\"\n words = \"\"\n newid = article['newid']\n if not article.title is None:\n title = article.title.string\n if not article.body is None:\n body = article.body.string\n words = title + \" \" + body\n documents[newid] = words\n print(f\"Retrieval Complete! - Total Documents: {len(documents)}\")\n return documents", "def results(self, query=None, batch=True, b_size=10, b_start=0):\n # Disable theming for ajax requests\n if 'ajax' in self.request.form:\n del self.request.form['ajax']\n self.request.response.setHeader('X-Theme-Disabled', 'True')\n\n if query is None:\n query = {}\n\n query['b_start'] = b_start = int(b_start)\n query['b_size'] = b_size\n query = self.filter_query(query)\n\n if query is None:\n results = []\n else:\n query.update({'qt': 'hlsearch'});\n catalog = getToolByName(self.context, 'portal_catalog')\n try:\n results = catalog(**query)\n except ParseError:\n logger.exception('Exception while searching')\n return []\n except SolrException:\n logger.exception('Exception while searching')\n return []\n\n self.solr_response = results\n results = IContentListing(results)\n if batch:\n results = Batch(results, b_size, b_start)\n return results" ]
[ "0.77354777", "0.7667796", "0.76610154", "0.7576085", "0.7447628", "0.74134624", "0.71889716", "0.7155771", "0.71408826", "0.69945997", "0.6970507", "0.6968448", "0.6950308", "0.69352466", "0.6923409", "0.69165117", "0.6892293", "0.6890513", "0.68716574", "0.68527853", "0.68527853", "0.683987", "0.6811962", "0.6802982", "0.67937815", "0.678251", "0.6772294", "0.67550147", "0.6734504", "0.67054296", "0.6702857", "0.67003906", "0.6688217", "0.66870385", "0.66870385", "0.66824186", "0.6676947", "0.6673432", "0.6671015", "0.66676533", "0.6665504", "0.665714", "0.6580672", "0.65716547", "0.65581363", "0.65532035", "0.6549333", "0.65140325", "0.6500855", "0.64840615", "0.64710015", "0.64692545", "0.6461747", "0.6447787", "0.6440105", "0.6433203", "0.6423235", "0.64060754", "0.63964915", "0.6378621", "0.6378423", "0.637492", "0.63569516", "0.63569516", "0.63569516", "0.63468134", "0.6342892", "0.63327086", "0.63296837", "0.63084424", "0.62957025", "0.62849164", "0.62847805", "0.62820804", "0.6271221", "0.6269519", "0.6264304", "0.62548184", "0.62251675", "0.62053484", "0.62040365", "0.6196151", "0.61959183", "0.619473", "0.6194363", "0.61869454", "0.61844265", "0.61775744", "0.6175663", "0.61752707", "0.61716807", "0.61711997", "0.6158303", "0.61582744", "0.61538637", "0.6140803", "0.6128194", "0.612811", "0.6127652", "0.6119746" ]
0.64337456
55
Returns a list of the retrieved documents' scores
def getScores(self, qryid, counter = None): if counter is None: return self.qryScores[qryid] else: return self.qryScores[qryid][:counter]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_scores(self):\n return self.score", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def scores_(self):\n return self.predictor.scores_", "def scores(self) -> List[float]:\n if not self.prediction:\n return []\n return [sentence.score for sentence in self.prediction.sentences]", "def getScores(self,query):\n pass", "def getScore(self):\n\t\tself.scoreList = [submissionsss.score for submissionsss in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.scoreList", "def get_score_list(self, query: list, top_k: int = 0, e: int = 0.5):\n scores = list()\n for i in range(self.document_count):\n node = (i, self.get_score(query=query, index=i, e=e))\n scores.append(node)\n scores.sort(key=lambda x: x[1], reverse=True)\n\n return scores if top_k == 0 else scores[:top_k]", "def get_scores(self):\n return pd.DataFrame(self._scores)", "def find_all(self):\n cursor = self._connection.cursor()\n cursor.execute('SELECT * FROM scores ORDER BY level')\n all_scores = cursor.fetchall()\n return all_scores", "def get_score(self):\n files_flare = self.generate_flare_set()\n files_non_flare = self.generate_non_flare_set()\n timeseries = []\n y = []\n scores = {}\n column_mapping = self.__get_column_mapping()\n for col in tqdm(range(1, 25)):\n for file in tqdm(files_flare):\n s = Sample(\"FL\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n\n for file in tqdm(files_non_flare):\n s = Sample(\"NF\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n embed = self.get_embed_vector(timeseries)\n\n embed_y = KMeans(n_clusters=5).fit_predict(embed)\n y = np.array(y).flatten()\n scores[column_mapping[col]] = self.relevance_score(embed_y, y)\n timeseries = []\n y = []\n scores_data = pd.DataFrame.from_dict(scores, orient='index', columns=['Relevance Score']).sort_values(\n by='Relevance Score', ascending=False)\n return scores_data", "def get_scores(self, query):\n score = np.zeros(self.data['corpus_size'])\n doc_len = np.array(self.data['doc_len'])\n for q in query:\n q_freq = np.array([(doc.get(q) or 0) for doc in self.data['doc_freqs']])\n score += (self.data['idf'].get(q) or 0) * (q_freq * (self.data['k1'] + 1) /\n (q_freq + self.data['k1'] * (\n 1 - self.data['b'] + self.data['b'] * doc_len /\n self.data['average_docs_len'])))\n return score", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def get_score_book(self) -> List[Tuple[str, float]]:\n returned = []\n\n for item, size in self.score_book.items():\n my_tuple = item, size\n returned.append(my_tuple)\n\n return returned", "def score_sentences(self, document, texts):\n sent_scores = []\n # call word_frequency to get a word frequency table (or rather list of words) from the respective article\n scorable_words = self.word_frequency(texts[self.sent_pos])\n # split the summaries by @highlight token\n summary_split = document.split(\"@ highlight\")\n sentenceValue = 0\n sent_len = 0\n # for each summary calculate the sentence value\n for summary in summary_split:\n words = nltk.word_tokenize(summary)\n sent_len = len(words)\n for word in words:\n if word in scorable_words:\n sentenceValue =+ 1\n # normalise sentence value based on sentence length so that longer sentences do not get an automatic advantage over shorter ones\n # as null rows havent been dropped yet there may be scores of 0\n if (sentenceValue !=0 and sent_len !=0):\n sentenceValue = sentenceValue / sent_len\n sent_scores.append((summary, sentenceValue))\n return sent_scores", "def get_scores(self, query):\n self.init()\n tokens = self.tokenizer.tokenize(query)\n return self.bm25_instance.get_scores(query=tokens)", "def getScore(data):\n return score", "def test_score_across_multiple_documents(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Syria reacts to Erdogan's threats: Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor(tokenizer=tokenizer)\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates, normalize_scores=False)\n self.assertEqual(3 * math.log(10 / 1, 10), scores.get('erdogan'))\n self.assertEqual(3 * math.log(10 / 2, 10), scores.get('threats'))", "def getScores():\r\n results = \"\"\r\n with sqlite3.connect(database_file) as conn:\r\n cursor = conn.cursor()\r\n team_scores = cursor.execute(\"\"\" SELECT * FROM scores;\"\"\")\r\n\r\n for row in team_scores.fetchall():\r\n teamname, auto, rc, spirit, video = row\r\n results += result_string.format(teamname, auto, rc, spirit, video) + \"\\n\"\r\n return results", "def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_term][1][doc_id][0]\n df = final_dictionary[query_term][0]\n\n score += ((1 + log10(tf)) * log10(TOTAL_DOCS / df))\n\n final_score.append([doc_id, score])\n\n return final_score", "def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores", "def retrieve_all_scores(database_connection: mysql.connector.connect\n ) -> List[int]:\n cursor = database_connection.cursor()\n query = (\"SELECT pm.panelistscore FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"WHERE s.bestof = 0 AND s.repeatshowid IS NULL \"\n \"AND pm.panelistscore IS NOT NULL \"\n \"ORDER BY pm.panelistscore ASC;\")\n cursor.execute(query)\n result = cursor.fetchall()\n\n if not result:\n return None\n\n scores = []\n for row in result:\n scores.append(row[0])\n\n return scores", "def get_scores(self, params):\n ep = ENDPOINTS.GET_SCORES\n self._check_parameters(ep, params)\n url = self.base_url.format(ep.EXTENSION)\n url = self._extend_url(url, params)\n return self._process_url(url)", "def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)", "def coherence_scores(topic_words):\n\tscores = {}\n\tfor score_type in score_types:\n\t\turl = _palmetto_url.format(score=score_type)\n\t\tr = requests.get(url, {'words': ' '.join(topic_words)})\n\t\tscores[score_type] = float(r.text)\n\n\treturn scores", "def updateScores(rankedLists):\n docToRank = {}\n for rankedList in rankedLists:\n\n f = open(rankedList, 'r')\n for line in f:\n documentID = line.split()[2]\n docno = documentID\n score = float(line.split()[4])\n position = int(line.split()[3])\n docToRank[docno] = (position,score)\n f.close()\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n documents = db.documents.find({})\n for document in documents:\n key = document[\"query_id\"]+\"-\"+document[\"username\"]\n document['position'] = docToRank[key][0]\n document['score'] = docToRank[key][1]\n document['posted_document'] = document['current_document']\n db.documents.save(document)", "def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]", "def evaluate(self):\n scores = []\n scores.append(self.word_analogy())\n print(\"Word Analogy (acc): \", scores[0])\n scores.append(self.word_similarity())\n print(\"Word Similarity (MSE): \", scores[1])\n scores.append(self.concept_categorization())\n print(\"Concept Categorization (purity): \", scores[2])\n scores.append(self.sentiment_analysis())\n print(\"Sentiment Analysis (acc): \", scores[3])\n return scores", "def print_scores(self):\n print(\"scores: \", self.get_scores())", "def retrieve_scores(self, filenames, batches = []):\n \n def get_batch_id(filename):\n return int(filename[filename.rfind(\"_\") + 1:])\n \n data = []\n \n # Filenames have to be sorted to ensure correct batch is extracted\n filenames = sorted(filenames, key = get_batch_id)\n \n if not batches: \n for filename in filenames:\n data.append(self.load_batch_scores(filename))\n else:\n for entry in batches:\n try:\n data.append(self.load_batch_scores(filenames[entry]))\n except IndexError:\n print (\"Attempted to access filename of index\", entry)\n return data", "def find_scores(self):\n p1_score = self.p1_store()\n p2_score = self.p2_store()\n return p1_score, p2_score", "def score_doc_list(docList):\n return [(vectorize_doc(doc), doc) for doc in docList]", "def get_sent_scores(tfidf_scores, text_sents, doc_info):\n sent_info = []\n\n for doc in doc_info:\n sent_score = 0\n for i in range(0, len(tfidf_scores)):\n temp_dict = tfidf_scores[i]\n if doc['doc_id'] == temp_dict['doc_id']:\n sent_score += temp_dict['TFIDF_Score']\n\n temp = {\n 'doc_id': doc['doc_id'],\n 'sent_score': sent_score,\n 'sentence': text_sents[doc['doc_id'] - 1]\n }\n sent_info.append(temp)\n\n return sent_info", "def score(self, urlids, wordids):\r\n\t\tself.urlids = urlids\r\n\t\tself.wordids = wordids\r\n\t\tself.scores = self.tf_score()\r\n\t\treturn self.scores", "def get_student_scores(student_information):\n return [\n student_information[1]\n , student_information[2]\n , student_information[3]\n , student_information[4]\n , student_information[5]\n ]", "def getDocScore(self, qryid, docid):\n retrievedDocs = self.getDocs(qryid)\n for pos in range(len(retrievedDocs)):\n if docid == retrievedDocs[pos]:\n return self.qryScores[qryid][pos]\n return 0", "def test_search_with_scoring(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1})\n add_document(\"foo\", {\"bar\": 2})\n add_document(\"foo\", {\"bar\": 3})\n\n # And I add scoring\n score = ScriptScore(\"s = 0 + doc['bar'].value\")\n t.score(score)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(3)\n results[0][\"_source\"][\"bar\"].should.equal(3)\n results[1][\"_source\"][\"bar\"].should.equal(2)\n results[2][\"_source\"][\"bar\"].should.equal(1)", "def retrieve_score_spread(database_connection: mysql.connector.connect\n ) -> List[Dict]:\n\n cursor = database_connection.cursor()\n query = (\"SELECT pm.panelistscore, COUNT(pm.panelistscore) \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"WHERE pm.panelistscore IS NOT NULL \"\n \"AND s.bestof = 0 AND s.repeatshowid IS NULL \"\n \"GROUP BY pm.panelistscore \"\n \"ORDER BY pm.panelistscore ASC;\")\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n\n if not result:\n return None\n\n scores = []\n counts = []\n for row in result:\n scores.append(row[0])\n counts.append(row[1])\n\n return {\"scores\": scores, \"counts\": counts}", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def _get_scores(target, predicted):\n recall = scoring(target, predicted, metric=\"recall\")\n precision = scoring(target, predicted, metric=\"precision\")\n accuracy = scoring(target, predicted, metric=\"accuracy\")\n f_score = scoring(target, predicted, metric=\"f1\")\n\n return [recall, precision, accuracy, f_score]", "def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc", "def count_score(self, docsInfo, avgdl, k1, b):\n\t\tdocScore = []\n\t\tfor doc in docsInfo:\n\t\t\tcurDocScore = 0\n\t\t\tfor queryWord in range(len(doc['meetCnt'])):\n\t\t\t\tTF = float(doc['meetCnt'][queryWord])\n\t\t\t\tfreaq = sum(1 for x in docsInfo if x['meetCnt'][queryWord])\n\t\t\t\tcurDocScore += self.count_IDF(len(docsInfo), freaq) * self.count_main_fraction(TF, k1, b, avgdl, doc['len'])\n\t\t\tdocScore.append(curDocScore)\n\t\treturn docScore", "def get_score(self):\n return tuple(self.score)", "def scores(datestring):\n return render_score_page(\"scores.html\", datestring, datestring)", "def find(self, words):\n for result in self.index.simple_search(self.name, words, field='content'):\n title = result['title']\n score = int(result.score)\n yield score, title", "def childScores(self):\n return [x.score for x in self.children]", "def get_scored_list(self, rows, wordids):\n totalscores = dict([(row[0], 0) for row in rows])\n # This is where we'll put our scoring functions\n weights = [(1.0, location_score(rows)),\n (1.0, frequency_score(rows)),\n (1.0, distance_score(rows)),\n (1.0, self.page_rank_score(rows)),\n (1.0, self.link_text_score(rows, wordids)),\n # (5.0, self.nnscore(rows, wordids))\n ]\n # Sum up weighted scores\n for (weight, scores) in weights:\n for url in totalscores:\n totalscores[url] += weight * scores[url]\n return totalscores", "def __call__(self, json_res):\r\n id2hyps = {\r\n res['clip_id']: [_remove_nonascii(res['descs'][0]['desc'].strip())]\r\n for res in json_res\r\n }\r\n id2hyps = self.tokenizer.tokenize(id2hyps)\r\n assert len(id2hyps) == len(self.id2refs)\r\n\r\n ret_scores = {}\r\n for scorer, method in self.scorers:\r\n print(f\"Computing {method} score...\")\r\n score, scores = scorer.compute_score(self.id2refs, id2hyps)\r\n if isinstance(method, list):\r\n for sc, scs, m in zip(score, scores, method):\r\n ret_scores[m] = sc * 100\r\n else:\r\n ret_scores[method] = score * 100\r\n\r\n return ret_scores", "def _section_scores(self, chapter_index, section_index):\r\n # This is CSS selector means:\r\n # Get the scores for the chapter at `chapter_index` and the section at `section_index`\r\n # Example text of the retrieved elements: \"0/1\"\r\n score_css = \"div.chapters>section:nth-of-type({0}) div.sections>div:nth-of-type({1}) div.scores>ol>li\".format(\r\n chapter_index, section_index\r\n )\r\n\r\n text_scores = self.q(css=score_css).text\r\n\r\n # Convert text scores to tuples of (points, max_points)\r\n return [tuple(map(int, score.split('/'))) for score in text_scores]", "def get_score(self, query: list, index: int, e: int = 0.5) -> float:\n score = 0.0\n total = sum(self.counts[index].values())\n for token in query:\n if token not in self.counts[index]:\n continue\n idf = math.log((self.document_count + e) / (self.token_docs[token] + e))\n score += (self.counts[index][token] / total) * idf\n\n return score", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")", "def get_score(self):\r\n score = self.latest_score()\r\n return {'score': score if score is not None else 0,\r\n 'total': self._max_score}", "def compute_doc_scores(self, query_terms, inverted_indexes,\n doc_lengths, parameters):\n \n doc_scores = dict() # This is to contain each document's score\n for term in query_terms: # For each query term ...\n \n # Retrieve information regarding the current term\n term_info = inverted_indexes[term]\n n_docs_containing_term = len(term_info)\n \n # For each document that contains the term ...\n for cord_uid in term_info.keys():\n tf = term_info[cord_uid] # Retrieve the term frequency\n doc_length = doc_lengths[cord_uid] # Retrieve the document length\n \n # Compute document's score for this term\n score = self.compute_term_BM25(term, tf, n_docs_containing_term,\n Constants.doc_count,\n Constants.avg_doc_length, doc_length,\n parameters.k, parameters.b)\n \n # Store or increment the score\n if cord_uid in doc_scores:\n doc_scores[cord_uid] += score\n else:\n doc_scores[cord_uid] = score\n \n return doc_scores", "def all_scores(self):\r\n if not self.child_history:\r\n return None\r\n return [self.child_history[i].get('score') for i in xrange(0, len(self.child_history))]", "def view_scores(jenni, input):\n scores.view_scores(jenni, input)", "def rank_retrieve(self, query):\n k = 10 # notation used in videos\n scores = [0.0 for xx in range(len(self.titles))]\n # ------------------------------------------------------------------\n # TODO: Implement cosine similarity between a document and a list of\n # query words.\n\n # Right now, this code simply gets the score by taking the Jaccard\n # similarity between the query and every document.\n self.d_length = defaultdict(float)\n words_in_query = set()\n \n for word in query:\n words_in_query.add(word)\n\n query_words = Counter(words_in_query) # eliminate 'set' object not subscriptable for q_word_weight calculations\n \n for word in query:\n q_word_weight = 1 + math.log10(query_words[word])\n posting_list = self.get_posting(word)\n for doc in posting_list:\n scores[doc] += self.tfidf[doc][word] * q_word_weight # calculate numerator for scores\n\n for word in self.vocab:\n for doc in range(len(self.docs)):\n self.d_length[doc] += self.tfidf[doc][word] ** 2 # get d^2 for each document\n\n for doc in range(len(self.docs)):\n scores[doc] /= math.sqrt(self.d_length[doc]) # divide by denominator (normalizer sqrt(d^2) for each doc)\n # ------------------------------------------------------------------\n\n ranking = [idx for idx, sim in sorted(enumerate(scores),\n key = lambda xx : xx[1], reverse = True)]\n results = []\n for i in range(k):\n results.append((ranking[i], scores[ranking[i]]))\n return results", "def get_score(self):\n return self.score", "def get_scores(self):\n precision = self.right / self.count\n APs = self.right_labels / self.count\n mAP = np.mean(APs)\n distance = self.distance / self.count\n\n return {'precision': precision,\n 'APs': APs,\n 'mAP': mAP,\n 'distance': distance\n }", "def _get_scores_list(self):\n self.scores = dict()\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n score = open('{0}/{1}/{2}/extract_all.sort.uniq.txt'.format(self.path, subdir, DOCKING_RUN_FILES),'r').read().split()[-1]\n self.scores[subdir] = float(score.strip())\n except:\n pass", "def getScore(self):\r\n return self._score", "def get_data(self):\n\n self.cur.execute('SELECT year, sex, education, score from vocabulary_scores;')\n scores = dict()\n education = dict()\n count = dict()\n\n for row in self.cur :\n if row[0] in scores:\n if row[1] in scores[row[0]]:\n scores[row[0]][row[1]] += int(row[3])\n education[row[0]][row[1]] += int(row[2])\n count[row[0]][row[1]] += 1\n else:\n scores[row[0]][row[1]] = int(row[3])\n education[row[0]][row[1]] = int(row[2])\n count[row[0]][row[1]] = 1\n else:\n # scores[year] = {gender: score}\n scores[row[0]] = {row[1]: int(row[3])}\n education[row[0]] = {row[1]: int(row[2])}\n count[row[0]] = {row[1]: 1}\n\n scores, education = self.average_scores(scores, education, count)\n\n return scores, education", "def observe(self, scores, **fields):\n [self._scores.append({'value': s, 'index': i, **fields}) for s, i in zip(scores, self.index_scores)]\n for s in self.statistics:\n v = self.statistics[s](scores)\n self._results.append({'statistics': s, 'value': v, **fields})", "def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def get_score(self, student_answers):\r\n pass", "def __call__(self, query, texts, multilabel=True, workers=0):\n\n scores = []\n for q in [query] if isinstance(query, str) else query:\n # Pass (query, text) pairs to model\n result = self.pipeline([{\"text\": q, \"text_pair\": t} for t in texts], top_k=None, function_to_apply=\"none\", num_workers=workers)\n\n # Apply score transform function\n scores.append(self.function([r[0][\"score\"] for r in result], multilabel))\n\n # Build list of (id, score) per query sorted by highest score\n scores = [sorted(enumerate(row), key=lambda x: x[1], reverse=True) for row in scores]\n\n return scores[0] if isinstance(query, str) else scores", "def top_students(mongo_collection):\n students = mongo_collection.find()\n best_students = []\n for student in students:\n topics = student[\"topics\"]\n score = 0\n for topic in topics:\n score = score + topic[\"score\"]\n avg = score / len(topics)\n student[\"averageScore\"] = avg\n best_students.append(student)\n return sorted(best_students, key=lambda i: i[\"averageScore\"], reverse=True)", "def test_query_score(self):\n id = get_rand_string()\n\n # Same data and user_id\n user_id = data = get_rand_string()\n\n self.conn.add(id=id, user_id=user_id, data=data)\n self.conn.commit()\n\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"No documents fetched, expected id:%s\" % (id))\n\n doc = results[0]\n\n self.assertTrue(\"score\" in doc, \"No score returned, doc:%s\" % repr(doc))\n self.assertTrue(isinstance(doc[\"score\"], float),\n \"Score should be a float instance, doc:%s\" % repr(doc))", "def score(self, urlids, wordids):\r\n\t\tself.urlids = urlids\r\n\t\tself.wordids = wordids\r\n\t\tfor urlid in self.urlids:\r\n\t\t \tsql = \"select pagerank from pagelink where urlid=%d\" % urlid\r\n\t\t\tpr = self.cur.execute(sql).fetchone()[0]\r\n\t\t\tself.scores[urlid] = pr\r\n\t\treturn self.scores", "def get_scores(self, X_val):\n \n if not self.clf_fit:\n raise RuntimeError('Call clf.fit before clf.predict.')\n \n # Create predictions from learners\n preds = list()\n for i in range(self.num_base_learners):\n pred = self.clfs[i].predict(X_val)\n preds.append(pred)\n \n # Average results\n preds = np.vstack(preds)\n preds = preds.T\n \n scores = list()\n for pred in preds:\n scores.append(float(sum(pred))/float(preds.shape[1]))\n \n return scores", "def parse_scores(option, table_num, language, min_score, max_score):\n html = query_by_lang(BASE_URL + '/' + option, language)\n table = html.find_all(\"table\")[table_num]\n freq_list = table.find_all(\"tr\")[2:]\n\n # Create the dictionary\n list_min = find_min(freq_list)\n list_max = find_max(freq_list)\n return create_dict(freq_list, list_min, list_max, min_score, max_score)", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def build_retrieved_list(self, scores):\n\n res = self.index.rank(scores)\n tmp_res = []\n # keep scores too\n tmp_scores = []\n\n # build the list\n tmp_res = []\n #print rank, \"<--\"\n for i, k in res:\n tmp_res.append( self.indices[i] )\n tmp_scores.append( k )\n\n\n # compute the difference with the difference\n diff = list(set(self.indices.values())-set(tmp_res))\n\n # shuffle to fill the rest of the list\n np.random.shuffle(diff)\n\n scores_diff = np.zeros( (len(diff,)) )\n\n final = []\n final_scores = []\n\n final.extend(tmp_res)\n final.extend(diff)\n\n final_scores.extend(tmp_scores)\n final_scores.extend(scores_diff)\n\n # remove extension for evaluation\n f = lambda x: x.split('.')[0]\n final = map(f, final)\n\n return final, final_scores", "def print_scores(self):\n ### FILL IN ###", "def get_results(self, nb_results=1000):\n\t\tdocs = self.searcher.search(self.constrained_query.build(), nb_results).scoreDocs\n\t\tself.constrained_query = BooleanQuery.Builder()\n\n\t\thits = []\n\t\tfor i in range(len(docs)):\n\t\t\thits.append({})\n\t\t\tfor field in self.reader.document(docs[i].doc).getFields():\n\t\t\t\thits[i][field.name()] = field.stringValue()\n\n\t\thits = self.remove_duplicates(hits)\n\t\treturn hits", "def get_user_scores(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A User with that name does not exist!')\n scores = Score.query(Score.user == user.key)\n return ScoreForms(items=[score.to_form() for score in scores])", "def get_user_scores(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A User with that name does not exist!')\n scores = Score.query(Score.user == user.key)\n return ScoreForms(items=[score.to_form() for score in scores])", "def score(self):", "def tf_score(self):\r\n\t\tscores = {}\r\n\t\tfor urlid in self.urlids:\r\n\t\t\turl_score = 0.0\r\n\t\t\tfor wordid in self.wordids:\r\n\t\t\t\tsql = \"select tf from wordinfo where urlid=%d and wordid=%d\" % (urlid,wordid)\r\n\t\t\t\ttf = self.cur.execute(sql).fetchone()[0]\r\n\t\t\t\turl_score += tf\r\n\t\t\tscores[urlid] = url_score\r\n\t\treturn scores", "def getScore(self):\n return self._score", "def get_scores(self):\n return SklearnModel.evaluate_no_ground_truth_classifier_metrics(self.X_test, self.predictions)", "def query(self, query, limit=10):\n try:\n rows, word_ids = self.get_match_rows(query)\n except OperationalError:\n print(\"No documents found for \\\"{}\\\"\".format(query))\n return\n\n scores = self.get_scored_list(rows, word_ids)\n\n ranked_scores = sorted([(score, url) for (url, score) in scores.items()], reverse=1)\n for (score, urlid) in ranked_scores[0: limit]:\n print(\"{}\\t{}\".format(score, self.get_url_name(urlid)))\n\n return word_ids, [r[1] for r in ranked_scores]", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_overall_score(self, user):\n\n quizzes = ['iq', 'math', 'english']\n\n prev_scores = []\n new_scores = []\n\n for quiz in quizzes:\n quiz_obj = self.get_object(quiz)\n queryset = self.get_queryset(user, quiz_obj)\n\n try:\n new_scores.append(queryset[0].marks)\n prev_scores.append(queryset[1].marks)\n except:\n new_scores.append(queryset[0].marks)\n prev_scores.append(0)\n\n import statistics\n\n return statistics.mean(prev_scores), statistics.mean(new_scores)", "def retrieve_grouped_scores(database_connection: mysql.connector.connect\n ) -> Dict:\n cursor = database_connection.cursor()\n query = (\"SELECT pm.panelistscore, COUNT(pm.panelistscore) \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"WHERE pm.panelistscore IS NOT NULL \"\n \"AND s.bestof = 0 AND s.repeatshowid IS NULL \"\n \"GROUP BY pm.panelistscore \"\n \"ORDER BY pm.panelistscore ASC;\")\n cursor.execute(query)\n result = cursor.fetchall()\n\n if not result:\n return None\n\n scores = []\n for row in result:\n score = OrderedDict()\n score[\"score\"] = row[0]\n score[\"count\"] = row[1]\n scores.append(score)\n\n return scores", "def score(self, index, query, doc_id):\n return 1", "def __repr__(self):\n return self.scores()", "def score_tweets():\n\t\n\ts = -1\n\tstatus = 'Error'\n\treason = \"\"\n\ttid = -1\n\ttjson = request.json['tweetJSON']\n\tbatchResult = []\n\n\tfor tweet in tjson:\n\t\ttry:\t\t\n\t\t\ts = model.score(tweet)\n\t\t\tstatus = 'OK'\n\t\t\ttobj = json.loads(tweet)\n\t\t\ttid = tobj['id']\n\n\t\texcept:\n\t\t\treason = \"Error loading json.\"\n\n\t\tbatchResult.append({ \n\t\t\t\t\t 'status' : status,\n\t\t\t\t\t 'score' : s,\n\t\t\t\t\t 'tid' : tid,\n\t\t\t\t\t 'reason' : reason\n\t\t\t\t\t })\n\n\treturn jsonify({\n\t\t\t'batchResult' : batchResult\n\t\t})", "def scoring(self):\n pass", "def perform_get_scores(responder, options):\n match = options['<match-id>']\n all_scores = scores.get_match_scores(match)\n\n if options.get(yaml_opt, False):\n responder(yaml.dump({'scores': all_scores}))\n else:\n if all_scores is None:\n responder('No scores available for match {0}'.format(match))\n else:\n for tla, score in all_scores.iteritems():\n responder('Team {0} scored {1} in match {2}'.format(tla, score, match))", "def top_students(mongo_collection):\n all_items = mongo_collection.find({})\n for item in all_items:\n count = 0\n new_topics = item\n for sta in item.get(\"topics\"):\n count += sta.get(\"score\")\n averageScore = count/len(item.get(\"topics\"))\n\n myquery = {\"name\": item.get(\"name\")}\n newvalues = {\"$set\": {\"averageScore\": averageScore}}\n mongo_collection.update_many(myquery, newvalues)\n\n order = mongo_collection.find().sort(\"averageScore\", DESCENDING)\n\n return order", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def scores_statistics(self, scores):\r\n aux_scores = np.array(scores)\r\n return np.mean(aux_scores), np.median(aux_scores), np.std(aux_scores)", "def score_tweets(objects):\n scores = {}\n for tweet in objects:\n data = tweet._json\n rt = data['retweet_count']\n fave = data['favorite_count']\n fol = data['user']['followers_count']\n weight = 1.5\n score = ((weight * rt + fave) / (fol / 2)) * 1000\n scores[score] = data['id']\n embeds = []\n for item in sorted(scores.items(), reverse=True)[:13]: #sorted returns tuple\n embed = twitter.get_oembed(id=item[1],align='center')\n embeds.append(embed['html'])\n return embeds", "def test_search_with_scoring_and_lang(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1})\n add_document(\"foo\", {\"bar\": 2})\n add_document(\"foo\", {\"bar\": 3})\n\n # And I add scoring with a language\n score = ScriptScore(\"s = 0 + doc['bar'].value\", lang=\"mvel\")\n t.score(score)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(3)\n results[0][\"_source\"][\"bar\"].should.equal(3)\n results[1][\"_source\"][\"bar\"].should.equal(2)\n results[2][\"_source\"][\"bar\"].should.equal(1)", "def values(self):\n return self.docs.values()", "def show_scores(self):\n for text in self.score_text:\n text.draw()" ]
[ "0.724398", "0.70670176", "0.69395506", "0.69351584", "0.6927607", "0.6664462", "0.6543519", "0.6535992", "0.6532264", "0.6494205", "0.64925236", "0.64834094", "0.64812046", "0.6451317", "0.644733", "0.64114875", "0.6407428", "0.6324462", "0.6300784", "0.6266783", "0.6252493", "0.6239873", "0.6233418", "0.622684", "0.61933684", "0.6193087", "0.6188192", "0.61708856", "0.61518276", "0.6134346", "0.61278516", "0.61249036", "0.61005694", "0.6087067", "0.6087042", "0.608402", "0.605959", "0.6057428", "0.60552484", "0.60263085", "0.6021684", "0.6017121", "0.5985737", "0.5967752", "0.5947992", "0.59419554", "0.59359556", "0.5881332", "0.5879152", "0.5852831", "0.585138", "0.58425444", "0.5839578", "0.5822192", "0.582209", "0.5819368", "0.5810432", "0.58022475", "0.5794606", "0.5793412", "0.5793313", "0.5786713", "0.5786337", "0.5783148", "0.577964", "0.5762789", "0.5754577", "0.5754084", "0.573821", "0.5735182", "0.57285273", "0.57285273", "0.57285273", "0.5719674", "0.57194436", "0.57172734", "0.5702812", "0.5702812", "0.57025266", "0.5695243", "0.5693348", "0.5692983", "0.56852245", "0.5684499", "0.5684499", "0.5684499", "0.5666264", "0.5666184", "0.56631976", "0.56603557", "0.5658009", "0.56549543", "0.5653917", "0.5643637", "0.5642714", "0.5640456", "0.5630722", "0.56279343", "0.5621964", "0.56207335" ]
0.5750148
68
Return document retrieval score. If docid doesn't exist in the list, then method returns 0 score.
def getDocScore(self, qryid, docid): retrievedDocs = self.getDocs(qryid) for pos in range(len(retrievedDocs)): if docid == retrievedDocs[pos]: return self.qryScores[qryid][pos] return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, index, query, doc_id):\n return 1", "def score(self, index, query, doc_id):\n query_vec = self._get_query_representation(query, index)\n doc_vec = self._get_document_representation(doc_id, index)\n return self._similarity(query_vec, doc_vec)", "def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_term][1][doc_id][0]\n df = final_dictionary[query_term][0]\n\n score += ((1 + log10(tf)) * log10(TOTAL_DOCS / df))\n\n final_score.append([doc_id, score])\n\n return final_score", "def _get_relevance(self, doc_score: float) -> int:\n if not isinstance(doc_score, float):\n raise TypeError(\"doc_score must be float.\")\n if doc_score < 0.0 or doc_score > 1.0:\n raise ValueError(\"doc_scores must be between 0.0 and 1.0.\")\n\n relv = -1\n for i in range(len(self.intervals) - 1):\n if self.intervals[i] <= doc_score < self.intervals[i + 1]:\n relv = i\n return len(self.intervals) - 2 if relv == -1 else relv", "def test_score_across_multiple_documents(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Syria reacts to Erdogan's threats: Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor(tokenizer=tokenizer)\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates, normalize_scores=False)\n self.assertEqual(3 * math.log(10 / 1, 10), scores.get('erdogan'))\n self.assertEqual(3 * math.log(10 / 2, 10), scores.get('threats'))", "def get_score(self, query: list, index: int, e: int = 0.5) -> float:\n score = 0.0\n total = sum(self.counts[index].values())\n for token in query:\n if token not in self.counts[index]:\n continue\n idf = math.log((self.document_count + e) / (self.token_docs[token] + e))\n score += (self.counts[index][token] / total) * idf\n\n return score", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def score(self, index, query, doc_id):\n rho = 1\n doc_length = index.get_document_length(doc_id)\n total_doc_length = index.get_total_document_length()\n for term in query:\n frequency = index.get_term_frequency(term, doc_id)\n total_frequency = index.get_total_term_frequency(term)\n try:\n rho += np.log(self._term_probability(frequency, total_frequency, doc_length, total_doc_length))\n except ValueError:\n rho += 0\n return float(rho)", "def score(self, index, query, doc_id):\n rho = 1\n doc_length = index.get_document_length(doc_id)\n total_doc_length = index.get_total_document_length()\n for term in query:\n frequency = index.get_term_frequency(term, doc_id)\n total_frequency = index.get_total_term_frequency(term)\n try:\n rho += np.log(self._term_probability(frequency, total_frequency, doc_length, total_doc_length))\n except ValueError:\n rho += 0\n return float(rho)", "def score_doc_list(docList):\n return [(vectorize_doc(doc), doc) for doc in docList]", "def score(self, doc, c):\n # >>> YOUR ANSWER HERE\n # the inner loop in the TEST NAIVE BAYES, sum up the logprior of the class and all words' loglikelihood\n sum = self.logprior[c]\n words = doc.split()\n for w in words:\n if w in self.vocabulary:\n sum += self.loglikelihood[(w, c)]\n return sum\n # >>> END YOUR ANSWER", "def get_scores(self, query):\n score = np.zeros(self.data['corpus_size'])\n doc_len = np.array(self.data['doc_len'])\n for q in query:\n q_freq = np.array([(doc.get(q) or 0) for doc in self.data['doc_freqs']])\n score += (self.data['idf'].get(q) or 0) * (q_freq * (self.data['k1'] + 1) /\n (q_freq + self.data['k1'] * (\n 1 - self.data['b'] + self.data['b'] * doc_len /\n self.data['average_docs_len'])))\n return score", "def count_score(self, docsInfo, avgdl, k1, b):\n\t\tdocScore = []\n\t\tfor doc in docsInfo:\n\t\t\tcurDocScore = 0\n\t\t\tfor queryWord in range(len(doc['meetCnt'])):\n\t\t\t\tTF = float(doc['meetCnt'][queryWord])\n\t\t\t\tfreaq = sum(1 for x in docsInfo if x['meetCnt'][queryWord])\n\t\t\t\tcurDocScore += self.count_IDF(len(docsInfo), freaq) * self.count_main_fraction(TF, k1, b, avgdl, doc['len'])\n\t\t\tdocScore.append(curDocScore)\n\t\treturn docScore", "def score_doc2vec_model(override=True):\n\tstandard = load_gold_standard()\n\tdocs, files = create_documents(standard)\n\tprint (docs)\n\n\t# if override or not os.path.isfile('doc2vec.model'):\n\t# \tprint(\"hello\")\n\t# \tgenerate_model(docs)\n\n\t# model = models.Doc2Vec.load('doc2vec.model')\n\t\n\t# precision_score = 0.0\n\t# recall_score = 0.0\n\t# for f in files:\n\t# \tcounts, all_events = standard[f]\n\t# \trandom.shuffle(all_events)\n\t# \tselected_types = all_events[:MIN_EVENT_TYPES]\n\n\t# \tcandidate_docs = [c for c, _ in model.docvecs.most_similar([f], topn=TOP_K)]\n\t\t\n\t# \tprecision_score += precision_score_candidate_match(standard, selected_types, candidate_docs)\n\t# \trecall_score += recall_score_candidate_match(standard, selected_types, candidate_docs)\n\n\t# print(\"Doc2Vec precision: \" + str(precision_score/len(files)))\n\t# print(\"Doc2Vec recall: \" + str(recall_score/len(files)))", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def get_positional_score(term, document):\n score = 0\n number_of_words_in_doc = len(document.split())\n\n for position, word in enumerate(document.split()):\n if word.lower() == term.lower():\n score += (number_of_words_in_doc - position) / number_of_words_in_doc\n \n return round(score, 5)", "def rankDocuments(terms, docs, index, idf, tf, rt, likes, score):\n \n # init docvectors and queryvector to dict and array of 0, to be filled later\n docVectors=collections.defaultdict(lambda: [0]*len(terms)) \n queryVector=[0]*len(terms) \n\n if score == \"1\":\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf*idf(normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm * idf[term]\n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex] * idf[term]\n # calculate the score of each doc\n # compute the cosine similarity between queyVector and each docVector:\n docScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n else:\n # as we just want cosine similarity but not use tf-idf, we're using the term frequency as a weight\n # in our custom ranking\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf (normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm \n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex]\n # calculate the score of each doc\n # compute the cosine similarity and add rt and fav score\n # rt brings to more visibility than a like, hence a higher score\n docScores=[ [np.dot(curDocVec, queryVector) + 1.5*rt[doc] + likes[doc], doc] for doc, curDocVec in docVectors.items() ]\n docScores.sort(reverse=True)\n resultDocs=[x[1] for x in docScores]\n if len(resultDocs) == 0:\n print(\"No results found, try again\")\n return None \n return resultDocs", "def test_query_score(self):\n id = get_rand_string()\n\n # Same data and user_id\n user_id = data = get_rand_string()\n\n self.conn.add(id=id, user_id=user_id, data=data)\n self.conn.commit()\n\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"No documents fetched, expected id:%s\" % (id))\n\n doc = results[0]\n\n self.assertTrue(\"score\" in doc, \"No score returned, doc:%s\" % repr(doc))\n self.assertTrue(isinstance(doc[\"score\"], float),\n \"Score should be a float instance, doc:%s\" % repr(doc))", "def getScore(data):\n return score", "def score(self, searcher, fieldnum, text, docnum, weight, QTF = 1):\n raise NotImplementedError", "def updateScores(rankedLists):\n docToRank = {}\n for rankedList in rankedLists:\n\n f = open(rankedList, 'r')\n for line in f:\n documentID = line.split()[2]\n docno = documentID\n score = float(line.split()[4])\n position = int(line.split()[3])\n docToRank[docno] = (position,score)\n f.close()\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n documents = db.documents.find({})\n for document in documents:\n key = document[\"query_id\"]+\"-\"+document[\"username\"]\n document['position'] = docToRank[key][0]\n document['score'] = docToRank[key][1]\n document['posted_document'] = document['current_document']\n db.documents.save(document)", "def test_search_with_scoring(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1})\n add_document(\"foo\", {\"bar\": 2})\n add_document(\"foo\", {\"bar\": 3})\n\n # And I add scoring\n score = ScriptScore(\"s = 0 + doc['bar'].value\")\n t.score(score)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(3)\n results[0][\"_source\"][\"bar\"].should.equal(3)\n results[1][\"_source\"][\"bar\"].should.equal(2)\n results[2][\"_source\"][\"bar\"].should.equal(1)", "def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score", "def score(self, urlids, wordids):\r\n\t\tself.urlids = urlids\r\n\t\tself.wordids = wordids\r\n\t\tself.scores = self.tf_score()\r\n\t\treturn self.scores", "def get_score(self):\r\n return None", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def queryFscore(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):\n\n\t\tfscore = -1\n\n\t\t#Fill in code here\n\t\tprecision = self.queryPrecision(query_doc_IDs_ordered, query_id, true_doc_IDs, k)\n\t\trecall = self.queryRecall(query_doc_IDs_ordered, query_id, true_doc_IDs, k)\n\n\t\tif precision == 0 or recall == 0:\n\t\t\tfscore = 0\n\t\telse:\n\t\t\tfscore = 2*precision*recall/(precision + recall)\n\n\t\treturn fscore", "def queryRecall(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):\n\n\t\trecall = -1\n\n\t\t#Fill in code here\n\t\tn_rel = len(true_doc_IDs)\n\t\tret_rel = 0 # no. of relevant documents retrieved\n\t\tfor docID in query_doc_IDs_ordered[:k]:\n\t\t\tif docID in true_doc_IDs:\n\t\t\t\tret_rel += 1\n\n\t\trecall = ret_rel/n_rel\n\n\t\treturn recall", "def get_document(self, docid):\n raise NotImplementedError", "def score(self):\n return None", "def compute_idf(doc_info, freq_dict_all):\n idf_scores = []\n counter = 0\n\n for temp_dict in freq_dict_all:\n counter += 1\n\n for k in temp_dict['freq_dict'].keys():\n count = sum([k in tempdict['freq_dict'] for tempdict in freq_dict_all])\n temp = {\n 'doc_id': counter,\n 'IDF_Score': math.log(len(doc_info) / count),\n 'key': k\n }\n\n idf_scores.append(temp)\n\n return idf_scores", "def current_word_PL(current_word, file_reader_last_read_list, doc_dict, nb_doc):\n word_posting_list = {} # { key = doc , value = score }\n for idx, file_reader_last_read in enumerate(file_reader_last_read_list):\n if file_reader_last_read[\"last_read\"][\"word\"] == current_word:\n docs = file_reader_last_read[\"last_read\"][\"doc_score_list\"]\n add_doc_in_posting_list(word_posting_list=word_posting_list, docs=docs)\n file_reader_last_read_list[idx]=read_line_and_update(file_reader_and_last_read=file_reader_last_read)\n for key, value in word_posting_list.items():\n tf = float(value) / doc_dict[int(key)]\n idf = math.log((float(nb_doc)/len(word_posting_list)),2)\n score = tf*idf\n word_posting_list[key]=score \n word_posting_list = sort_and_cast_doc_in_posting_list(word_posting_list=word_posting_list)\n return word_posting_list", "def score(pid, cid=None, decay=True):\n return cls.decay(cls.papers())[pid]['score']", "def doc_likelihood(self):\n ranked = []\n for id in self.prefiltered_docs:\n res_doc = Result(id, 1)\n for term in self.query_terms:\n res_doc.doc_likelihood *= self.inv_idx.local_unigram(term, id)\n ranked.append(res_doc)\n return sorted(ranked, key=lambda x: x.doc_likelihood, reverse=True)", "def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc", "def document_number(self, **kw):\r\n for docnum in self.document_numbers(**kw):\r\n return docnum", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def score(self, n):\r\n \r\n if self.scores:\r\n return self.scores[n]\r\n else:\r\n return None", "def meanRecall(self, doc_IDs_ordered, query_ids, qrels, k):\n\n\t\tmeanRecall = -1\n\n\t\t#Fill in code here\n\t\tif self.relevances is None:\n\t\t\tself.relevances = get_relevances(qrels)\n\n\t\tsum_Recall = 0\n\t\tfor i, query_id in enumerate(query_ids):\t\n\t\t\ttrue_doc_IDs = list(self.relevances[query_id].keys())\n\t\t\tsum_Recall += self.queryRecall(doc_IDs_ordered[i], query_id, true_doc_IDs, k)\n\n\t\tmeanRecall = sum_Recall/len(query_ids)\n\t\treturn meanRecall", "def get_score(self):\n return self.score", "def getTokenCollectionFrequency(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.word_to_docs_path, 'rb') as bin:\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n # skip documents:\n int.from_bytes(bin.read(4 * frequency), 'big')\n if wordid_in_file == wordid:\n return frequency\n return 0", "def get_scores(self):\n return self.score", "def score(item, fd, key):\n return fd.get(key(item), 0)", "def current_word_PL(current_word, file_reader_last_read_list, doc_dict, nb_doc):\n word_posting_list = {} # { key = doc , value = score }\n for idx, file_reader_last_read in enumerate(file_reader_last_read_list):\n if file_reader_last_read[\"last_read\"][\"word\"] == current_word:\n docs = file_reader_last_read[\"last_read\"][\"doc_score_list\"]\n add_doc_in_posting_list(word_posting_list=word_posting_list, docs=docs)\n file_reader_last_read_list[idx]=read_line_and_update(file_reader_and_last_read=file_reader_last_read)\n for key, value in word_posting_list.items():\n tf = float(value) / doc_dict[int(key)]\n idf = math.log((float(nb_doc)/len(word_posting_list)),10)\n score = (tf*idf)\n word_posting_list[key]=score\n word_posting_list = sort_and_cast_doc_in_posting_list(word_posting_list=word_posting_list)\n return word_posting_list", "def mostRelevantDocs(textToCompare, numResults):\n\n from gensim import corpora, models, similarities\n import logging\n from getDocSparseVector import getDocumentCorpus, cleanAndTokenize\n import cPickle as pickle\n\n #reload(getDocSparseVector)\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n #Use heirarchical dirichlet allocation topic modeling from gensim to compute the relevance between documents\n \n \n documentDictionary = pickle.load(open(\"/Users/Larry/Code/EpistemicAssistant/relevanceComputations/documentDictionary.p\", \"rb\"))#load document dictionary\n corpus = pickle.load(open(\"/Users/Larry/Code/EpistemicAssistant/relevanceComputations/corpus.p\", \"rb\")) #load corpus\n hdp = pickle.load(open(\"/Users/Larry/Code/EpistemicAssistant/relevanceComputations/hdp.p\", \"rb\"))#load hdp model\n documents = pickle.load(open(\"/Users/Larry/Code/EpistemicAssistant/relevanceComputations/documents.p\", \"rb\"))#load documents\n \n #Cleans and tokenizes the input text \"cleanAndTokenize\"\n mainDocument = documentDictionary.doc2bow(cleanAndTokenize(textToCompare))\n \n corpusHdp = hdp[corpus]\n mainDocumentHdp = hdp[mainDocument]\n num_feat = len(documentDictionary.values()) #To get rid of warning, manually retreive dictionary feature size\n similarityIndex = similarities.MatrixSimilarity(corpusHdp, num_features=num_feat)\n sims = similarityIndex[mainDocumentHdp]\n sims = sorted(enumerate(sims), key=lambda item: -item[1])\n \n topNum=numResults; #The number of documents to use as the top matches\n topSims=sims[0:topNum]\n topDocs = []\n for sims in topSims:\n topDocs.append(documents[sims[0]])\n return topDocs #returns the most relevant documents to the textToCompare", "def get_score(self):\n return self.__score", "def meanFscore(self, doc_IDs_ordered, query_ids, qrels, k):\n\n\t\tmeanFscore = -1\n\n\t\t#Fill in code here\n\t\tif self.relevances is None:\n\t\t\tself.relevances = get_relevances(qrels)\n\n\t\tsum_Fscore = 0\n\t\tfor i, query_id in enumerate(query_ids):\t\t\t\n\t\t\ttrue_doc_IDs = list(self.relevances[query_id].keys())\n\t\t\tsum_Fscore += self.queryFscore(doc_IDs_ordered[i], query_id, true_doc_IDs, k)\n\n\t\tmeanFscore = sum_Fscore/len(query_ids)\n\n\t\treturn meanFscore", "def add_doc_in_posting_list(word_posting_list, docs):\n for doc_score in docs:\n if doc_score[\"doc\"] in word_posting_list.keys():\n word_posting_list[doc_score[\"doc\"]] = int(doc_score[\"score\"]) + int(word_posting_list[doc_score[\"doc\"]])\n else:\n word_posting_list[doc_score[\"doc\"]] = doc_score[\"score\"]", "def getScore(self):\r\n return self._score", "def get_score(self, collation):\n score = 0\n\n if not collation:\n return 0\n key = b'score:' + collation.header.hash\n\n fills = []\n\n while key not in self.db and collation is not None:\n fills.insert(0, collation.header.hash)\n key = b'score:' + collation.header.parent_collation_hash\n collation = self.get_parent(collation)\n\n score = int(self.db.get(key))\n log.debug('int(self.db.get(key)):{}'.format(int(self.db.get(key))))\n\n for h in fills:\n key = b'score:' + h\n score += 1\n self.db.put(key, str(score))\n\n return score", "def document_exists(self, docid):\n raise NotImplementedError", "def get_top_k(weight_query, doc_dict, k):\n \n # find fraction of all inlinks to doc_id\n total_num_inlinks = 0\n frac_inlinks = {}\n with open(num_inlinks_file) as f:\n doc_ids_set = doc_dict.keys()\n for i, line in enumerate(f):\n total_num_inlinks += int(line.strip())\n if i in doc_ids_set:\n frac_inlinks[i] = int(line.strip())\n \n\n for doc_id, frac in frac_inlinks.items():\n frac_inlinks[doc_id] = frac / total_num_inlinks\n\n # calculate score\n # score = alpha * frac_inlinks + (1 - alpha) * cosine similarity\n alpha = 0.5\n score = {}\n for doc_id, weight_doc in doc_dict.items():\n cosine_score = 0\n for term, weight in weight_doc.items():\n cosine_score += weight_doc[term] * weight_query[term]\n score[doc_id] = alpha * frac_inlinks[doc_id] + (1 - alpha) * cosine_score\n \n # sort based on score, high to low\n sorted_score = OrderedDict( sorted(score.items(), key=lambda t: t[1], reverse=True) )\n \n # type(top_k) == {doc_id: [score, \"doc_text\"]}\n # note top_k is not sorted based on score!\n top_k = {}\n num_results = 0\n for doc_id, score in sorted_score.items():\n num_results += 1\n top_k[doc_id] = [score, \"\"]\n if num_results == k:\n break\n return top_k", "def get_score(self):\n\n return self._score", "def similar_docs(self, doc=None, docs=[], count=10):\n #import ipdb; ipdb.set_trace()\n if doc is not None:\n docs = [doc]\n docs = [text_utils.lemmatize_text(doc) for doc in docs]\n vec = self.vectorizer.transform(docs)\n tvec = self.transformer.transform(vec)\n sims, docids = self.knn.kneighbors(tvec, return_distance=True)\n #return [self.docs[docid] for docid in docids[0][:count]], [1-sim for sim in sims[0][:count]]\n results = []\n for idx in range(len(docids[0])):\n docid = docids[0][idx]\n results.append({\n \"id\": docid,\n \"text\": self.docs[docid],\n \"score\": 1-sims[0][idx], #distance to similarity\n })\n results = sorted(results, key=lambda x: -x[\"score\"])\n return results[:count]", "def score_candidates(self,\n cand_list: List[Union[CandidateEntry, Tuple[str, float]]],\n query_info_obj_or_dict: Union[DataEntryFields, dict]) -> Dict[str, float]:\n query_text = self.get_query_text(query_info_obj_or_dict)\n\n if self.text_proc_obj_query is not None:\n query_text = self.text_proc_obj_query(query_text)\n\n query_text = self.handle_case(query_text)\n query_toks = query_text.split()\n query_terms_idfs = {w: self.calc_idf(w) for w in set(query_toks)}\n\n res = {}\n\n for doc_id, score in cand_list:\n doc_text = self.fwd_indx.get_doc_text(doc_id)\n if self.text_proc_obj_doc is not None:\n doc_text = self.text_proc_obj_doc(doc_text)\n doc_text = self.handle_case(doc_text)\n doc_toks = doc_text.split()\n doc_len = len(doc_toks)\n counts = Counter(doc_toks)\n score = 0\n for qterm in query_toks:\n tf = counts[qterm]\n if tf > 0:\n qidf = query_terms_idfs[qterm]\n norm_tf = (tf * (self.k1 + 1)) / \\\n (tf + self.k1 * (1 - self.b + self.b * doc_len * self.inv_avg_doc_len))\n score += qidf * norm_tf\n\n res[doc_id] = score\n\n return res", "def compute_doc_scores(self, query_terms, inverted_indexes,\n doc_lengths, parameters):\n \n doc_scores = dict() # This is to contain each document's score\n for term in query_terms: # For each query term ...\n \n # Retrieve information regarding the current term\n term_info = inverted_indexes[term]\n n_docs_containing_term = len(term_info)\n \n # For each document that contains the term ...\n for cord_uid in term_info.keys():\n tf = term_info[cord_uid] # Retrieve the term frequency\n doc_length = doc_lengths[cord_uid] # Retrieve the document length\n \n # Compute document's score for this term\n score = self.compute_term_BM25(term, tf, n_docs_containing_term,\n Constants.doc_count,\n Constants.avg_doc_length, doc_length,\n parameters.k, parameters.b)\n \n # Store or increment the score\n if cord_uid in doc_scores:\n doc_scores[cord_uid] += score\n else:\n doc_scores[cord_uid] = score\n \n return doc_scores", "def test_query_no_score(self):\n id = get_rand_string()\n\n # Same data and user_id\n user_id = data = get_rand_string()\n\n self.conn.add(id=id, user_id=user_id, data=data)\n self.conn.commit()\n\n results = self.conn.query(\"id:\" + id, score=False).results\n\n self.assertEquals(len(results), 1,\n \"No documents fetched, expected id:%s\" % (id))\n\n doc = results[0]\n\n self.assertTrue(\"score\" not in doc,\n \"No score should be returned, doc:%s\" % repr(doc))", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def getScore(self):\n return self._score", "def normalize_doc_scores():\n# doc_res_files_path = base_path+r\"claimLM_docLM_doc_ret_output\"\n claims_file_counters_dict = {} #for each claim numas key, have the val a counter - if not 110 per claim -> problem!\n doc_res_files_path = linux_base_path+\"/claimLM_docLM_doc_ret_output\"\n# doc_res_files_path = base_path +\"\\\\claimLM_docLM_doc_ret_output\"\n for filename in os.listdir(doc_res_files_path):\n# filename = r\"C:\\study\\technion\\MSc\\Thesis\\Y!\\support_test\\baseline_clmLMdocLM\\claimLM_docLM_doc_ret_output\\doc_res_alpha_0_beta_0.2_clm_47\"\n print \"filename:\"+filename\n doc_score_dict = {} # key is docno, val is the exp(score)\n curr_claim = filename.split(\"_clm_\")[1]\n curr_alpha = filename.split(\"_alpha_\")[1].split(\"_beta_\")[0]\n curr_beta = filename.split(\"_beta_\")[1].split(\"_clm_\")[0]\n curr_dict_name = \"docs_scores_norm_alpha_\"+curr_alpha+\"_beta_\"+curr_beta+\"_clm_\"+curr_claim+\"_dict\"\n try:\n# if os.path.exists(base_path+\"\\\\docs_norm_scores_dicts\\\\\"+curr_dict_name+\"_sorted\"):\n# print curr_dict_name +\" already there\"\n# continue\n# else:\n# print \"applying on \"+curr_dict_name\n # check if the curr alpha beta dict exists already\n doc_file = open(doc_res_files_path+\"/\"+filename,'r')\n doc = doc_file.read().strip() # score\n scores_sum = 0.0\n if curr_claim in claims_file_counters_dict.keys():\n claims_file_counters_dict[curr_claim] += 1 \n else:\n claims_file_counters_dict[curr_claim] = 1\n for i, line in enumerate(doc.split('\\n')):\n data = line.split(' ')\n query_Id = data[0]\n doc_id = data[2]\n norm_score = math.exp(float(data[4]))\n scores_sum += norm_score\n if os.path.exists(curr_dict_name) == True:\n doc_score_dict = read_pickle(curr_dict_name)\n if doc_id in doc_score_dict:\n raise Exception(\"DOC ID %s already in dict\" % doc_id)\n doc_score_dict[query_Id,doc_id] = norm_score\n # divide by scores_sum\n for ((query_Id,doc_id),score) in doc_score_dict.items():\n new_score = float(float(score)/float(scores_sum))\n doc_score_dict[query_Id,doc_id] = new_score\n #rank according to score\n doc_score_dict_sorted = collections.OrderedDict(sorted(doc_score_dict.items(), key= lambda x: (-int(x[0][0]),x[1]),reverse=True))\n save_pickle(linux_base_path+\"/\"+\"docs_norm_scores_dicts/\"+curr_dict_name+\"_sorted\",doc_score_dict_sorted)\n# save_pickle(base_path+ \"\\\\docs_norm_scores_dicts\"+curr_dict_name+\"_sorted\",doc_score_dict_sorted)\n except Exception as err: \n sys.stderr.write('problem in normalize_doc_scores in file:'+ filename) \n print err.args \n print err \n for (claim_num,counter) in claims_file_counters_dict.items():\n if counter!=110:\n print claim_num+\" not 110 files , but \" +str(counter) +\" files\"", "def get_score(self):\r\n return self.lcp.get_score()", "def queryPrecision(self, query_doc_IDs_ordered, query_id, true_doc_IDs, k):\n\n\t\tprecision = -1\n\n\t\t#Fill in code here\n\t\trel_ret = 0 # no. of relevant docs retrieved\n\n\t\tfor docID in query_doc_IDs_ordered[:k]:\n\t\t\tif docID in true_doc_IDs:\n\t\t\t\trel_ret += 1\n\n\t\tprecision = rel_ret/k\n\n\t\treturn precision", "def score(self) -> int:\n return self._score", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def computeDocsSimilarities(model, docs):\n\n nDocs = len(docs)\n vals = [ [0.0 for i in range(nDocs)] for j in range(nDocs)]\n for doc_id in range(nDocs):\n inferred_vector = model.docvecs[doc_id]\n # inferred_vector = model.infer_vector(docs[doc_id].words)\n sims = model.docvecs.most_similar([inferred_vector], topn =\n len(model.docvecs))\n\n # store similarity values in a matrix\n # Note: We are storing DISTANCES, not similarities\n for i,j in sims:\n if vals[doc_id][i] == 0.0:\n vals[doc_id][i] = round(1.0-j,4) # round is needed to symmetry\n vals[i][doc_id] = round(1.0-j,4) # round is needed to symmetry\n\n\n fullpath = path.join(prefix,results_folder, project_folder)\n fullname = fullpath + simDoc2VecMatrixFile\n # save similarity matrix on disk\n f = open(fullname, \"w\")\n for i in range(nDocs):\n for j in range(nDocs):\n f.write(\"{0:4.2f}\\t\".format(vals[i][j]))\n f.write(\"\\n\")\n f.close()\n\n print(\"... similarity written on disk file '\",simDoc2VecMatrixFile,\"' \")\n\n return vals", "def rank_retrieve(self, query):\n k = 10 # notation used in videos\n scores = [0.0 for xx in range(len(self.titles))]\n # ------------------------------------------------------------------\n # TODO: Implement cosine similarity between a document and a list of\n # query words.\n\n # Right now, this code simply gets the score by taking the Jaccard\n # similarity between the query and every document.\n self.d_length = defaultdict(float)\n words_in_query = set()\n \n for word in query:\n words_in_query.add(word)\n\n query_words = Counter(words_in_query) # eliminate 'set' object not subscriptable for q_word_weight calculations\n \n for word in query:\n q_word_weight = 1 + math.log10(query_words[word])\n posting_list = self.get_posting(word)\n for doc in posting_list:\n scores[doc] += self.tfidf[doc][word] * q_word_weight # calculate numerator for scores\n\n for word in self.vocab:\n for doc in range(len(self.docs)):\n self.d_length[doc] += self.tfidf[doc][word] ** 2 # get d^2 for each document\n\n for doc in range(len(self.docs)):\n scores[doc] /= math.sqrt(self.d_length[doc]) # divide by denominator (normalizer sqrt(d^2) for each doc)\n # ------------------------------------------------------------------\n\n ranking = [idx for idx, sim in sorted(enumerate(scores),\n key = lambda xx : xx[1], reverse = True)]\n results = []\n for i in range(k):\n results.append((ranking[i], scores[ranking[i]]))\n return results", "def get_score(self):\n\n sql = \"SELECT score FROM Users WHERE username = '\" + self.username + \"'\"\n self.cursor.execute(sql)\n return self.cursor.fetchall()[0][0]", "def document_count(self):\n return self.client.scard(self.dbprefix + 'docs')", "def retrieve_documents(s, db):\n terms = ['\"%s\"' %stem(term) for term in tokenize(s)]\n \n conn = sqlite3.connect(db)\n c = conn.cursor()\n c.execute('''SELECT docs, tfs FROM inv_index \n WHERE term IN (%s)''' %(','.join(terms)))\n res = c.fetchall()\n\n if not res:\n return None\n \n # if only one result, get the doc(s) with highest tf\n if len(res) == 1:\n doc_ids = str_to_int_vect(res[0][0])\n tfs = str_to_int_vect(res[0][1])\n doc_ids = [doc_ids[i] for i in get_max_indexes(tfs)]\n else:\n # multiple results, get the intersection of doc ids\n sets = [set(str_to_int_vect(d)) for d, _ in res]\n doc_ids = list(set.intersection(*sets))\n\n # if no intersection, then return the documents with highest tf-idf\n if len(doc_ids) == 0:\n c.execute('SELECT id FROM docs')\n n = len(c.fetchall())\n for d, t in res:\n tf_idf = tfidf(n, len(str_to_int_vect(d)), str_to_int_vect(t))\n doc_ids += get_max_indexes(tf_idf)\n \n doc_ids = [str(i) for i in doc_ids]\n c.execute('''SELECT doc FROM docs WHERE id IN (%s)''' %(','.join(doc_ids)))\n return [res[0] for res in c.fetchall()]", "def calculate_score(self):\n try:\n self.score = self.__total_comment_score / float(self.num_comments)\n except ZeroDivisionError:\n self.score = float(0)", "def score(self):", "def get_word_frequency(self, word, document):\n return self.dictionary[word]['docs'][document]", "def score(self, word, context=None):\n return self.unmasked_score(\n self.vocab.lookup(word), self.vocab.lookup(context) if context else None\n )", "def score_sentences(self, document, texts):\n sent_scores = []\n # call word_frequency to get a word frequency table (or rather list of words) from the respective article\n scorable_words = self.word_frequency(texts[self.sent_pos])\n # split the summaries by @highlight token\n summary_split = document.split(\"@ highlight\")\n sentenceValue = 0\n sent_len = 0\n # for each summary calculate the sentence value\n for summary in summary_split:\n words = nltk.word_tokenize(summary)\n sent_len = len(words)\n for word in words:\n if word in scorable_words:\n sentenceValue =+ 1\n # normalise sentence value based on sentence length so that longer sentences do not get an automatic advantage over shorter ones\n # as null rows havent been dropped yet there may be scores of 0\n if (sentenceValue !=0 and sent_len !=0):\n sentenceValue = sentenceValue / sent_len\n sent_scores.append((summary, sentenceValue))\n return sent_scores", "def __ranking_function(self, doc, query_tokens):", "def topics_score_per_doc(lda_model, list_lemma):\n #Création d'un dictionnaire gensim\n array_lemma = np.array(list_lemma)\n dictionary = gensim.corpora.Dictionary(array_lemma)\n\n #Création d'un \"bag of words\" avec la fonction doc2bow\n bow_corpus = [dictionary.doc2bow(doc) for doc in array_lemma]\n\n for i in range(len(list_lemma)):\n print(\"\\nFor document {}\".format(i+1))\n for index, score in sorted(lda_model[bow_corpus[0]], key=lambda tup: -1*tup[1]):\n print(\"\\nScore: {}\\t \\nTopic: {}\".format(score, lda_model.print_topic(index, 10)))", "def getDocSumFromId(listId, retmax = None) :\n if retmax is None :\n retmax = len(listId)\n # Epost modified fromt the Biopython cookbook\n mySearch = Entrez.read(Entrez.epost(db = \"nuccore\", id = \",\".join(listId)))\n docSumsXML = _getDocSumXML(searchResult = mySearch,\n retmax = retmax)\n docSums = _parseDocSumXML(xmlContent = docSumsXML)\n return docSums", "def lix(self, doc):\n num_words = _get_num_words(doc)\n num_sentences = _get_num_sentences(doc)\n num_long_words = _get_num_long_words(doc, min_characters=7)\n return num_words / num_sentences + 100 * num_long_words / num_words", "def get_score(self):\n return float(self._score)", "def calc_score(score):\n if not score:\n return 0\n dbot_score = 1\n if score >= 95:\n dbot_score = 3\n elif score >= 75:\n dbot_score = 2\n return dbot_score", "def scoring(self):\n pass", "def predict(self, doc):\n \n prob_positive = self._predict_doc(doc, 1)\n prob_negative = self._predict_doc(doc, 0)\n\n if prob_positive > prob_negative:\n return 1\n return 0", "def do_search(queries):\n global documents, list_document\n results = {}\n query = tokenize(queries)\n if query == []:\n sys.exit()\n # find document ids containing all query terms. Works by\n # intersecting the posting lists for all query terms.\n relevant_document_ids = intersection(\n [set(postings[term].keys()) for term in query])\n if not relevant_document_ids:\n documents.clear()\n list_document[:] = []\n flash('empty')\n else:\n scores = sorted([(id,similarity(query,id))\n for id in relevant_document_ids],\n key=lambda x: x[1],\n reverse=True)\n print \"Score: filename\"\n global total_document_found\n total_document_found = 0\n for (id,score) in scores:\n print str(score)+\": \"+document_filenames[id]\n results[document_filenames[id]] = score\n total_document_found += 1\n flash(\"Total document found : \" + str(total_document_found) + \" of \" + str(N))\n return results", "def getScores(self, qryid, counter = None):\n if counter is None:\n return self.qryScores[qryid]\n else:\n return self.qryScores[qryid][:counter]", "def get_fact_score(extracted_scores,\n subj,\n obj,\n freq_dict,\n score_type='FREQ_SCORE'):\n score_types = set('FREQ_SCORE', 'MIN_SCORE')\n # Min of Page Rank scores of both Entities\n # Upweight facts where both have high scores\n min_score = min(\n extracted_scores[subj], extracted_scores[obj]\n )\n\n # Freq Score - If both entities are present - sum of frequencies\n # Upweight facts where both entities are in passage\n if subj in freq_dict and obj in freq_dict:\n freq_score = freq_dict[subj] + freq_dict[obj]\n else:\n freq_score = min(extracted_scores[subj],\n extracted_scores[obj])\n if score_type == 'FREQ_SCORE':\n return freq_score\n elif score_type == 'MIN_SCORE':\n return min_score\n else:\n ValueError(\n 'The score_type should be one of: %s' + ', '.join(list(score_types)))", "def __getitem__(self, doc_label):\n if doc_label not in self.docs:\n raise KeyError('document `%s` not found in corpus' % doc_label)\n return self.docs[doc_label]", "def read_score(self):\n file_path = 'score.txt'\n \n with open(file_path, 'r') as f:\n score = f.read()\n\n if score == '':\n return 0\n else:\n return int(score)", "def getReviewsWithToken(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.doc_to_words_path, 'rb') as bin:\n tup = []\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n docid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n # count words:\n count = 0\n for i in range(frequency):\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n if wordid == wordid_in_file:\n count += 1\n tup.append(docid_in_file)\n tup.append(count)\n return tuple(tup)", "def boostScore(self, result: str, words:set ):\n found = 0;\n for word in words:\n if result in self.invertedIndex[word]:\n found += 1\n return found/len(words)", "def imp(term,word_dict,number_of_docs,id):\n\n with open('term_freq.txt', 'rb') as file:\n term_freq = pickle.loads(file.read())\n\n #print(term_freq)\n\n if id+1 in term_freq[term]:\n #print(term_freq[term][id])\n return term_freq[term][id+1]*word_dict[term][1]#idf(term,number_of_docs,index_list)\n else:\n return 0.0", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")", "def fk_grade(self, doc):\n num_sentences = _get_num_sentences(doc)\n num_words = _get_num_words(doc)\n num_syllables = _get_num_syllables(doc)\n if num_sentences == 0 or num_words == 0 or num_syllables == 0:\n return 0\n return (\n (11.8 * num_syllables / num_words)\n + (0.39 * num_words / num_sentences)\n - 15.59\n )", "def test_search_with_scoring_and_lang(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1})\n add_document(\"foo\", {\"bar\": 2})\n add_document(\"foo\", {\"bar\": 3})\n\n # And I add scoring with a language\n score = ScriptScore(\"s = 0 + doc['bar'].value\", lang=\"mvel\")\n t.score(score)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(3)\n results[0][\"_source\"][\"bar\"].should.equal(3)\n results[1][\"_source\"][\"bar\"].should.equal(2)\n results[2][\"_source\"][\"bar\"].should.equal(1)", "def _index_doc(self, db, doc_id):\n doc = db.get(doc_id)\n if doc is None:\n log.warning(\"Unable to find document in database: '%s'\" % doc_id)\n return\n fields = doc.get('solr_fields')\n fields = [\"payload\", \"timesaved\"]\n if not fields:\n log.debug(\"Document '%s' does not define solr_fields\" % doc_id)\n return\n updates = []\n for field in fields:\n if doc.has_key(field):\n self.__normalize(updates, field, doc[field])\n updates.extend([{'type' : 'any'}, {'_id' : doc_id}])\n return updates", "def okapi_best_match(tf_q, idf_q, doc_len, avgdl, k=0.2, b=0.75):\r\n score = (tf_q * idf_q * (k + 1)) / (tf_q + k * (1 - b + b * doc_len / avgdl))\r\n return score" ]
[ "0.74571574", "0.6616278", "0.6386327", "0.6302467", "0.62684375", "0.6095989", "0.60721517", "0.60592145", "0.60592145", "0.60391366", "0.6036707", "0.60315067", "0.59710914", "0.5947047", "0.59403086", "0.59256047", "0.5889441", "0.58889556", "0.58793545", "0.58497417", "0.5806685", "0.57910776", "0.5771377", "0.5750362", "0.5735371", "0.5724735", "0.5721542", "0.56848377", "0.5674099", "0.5649893", "0.5616074", "0.5611754", "0.55969596", "0.5584421", "0.5566184", "0.5565664", "0.55619895", "0.55619895", "0.55619895", "0.5546601", "0.5546601", "0.5546601", "0.5538284", "0.5536742", "0.5528779", "0.5527976", "0.5518083", "0.55166316", "0.5516322", "0.5513131", "0.55125576", "0.549997", "0.54723763", "0.54691386", "0.54494447", "0.54477006", "0.5438552", "0.5429349", "0.5425288", "0.54188937", "0.5409805", "0.54026735", "0.5396347", "0.53939563", "0.5374663", "0.5369971", "0.5363161", "0.53611493", "0.5357414", "0.5351887", "0.53468734", "0.534443", "0.53409845", "0.5340821", "0.5332894", "0.53303146", "0.5321253", "0.5321064", "0.53185385", "0.53030366", "0.52924085", "0.52729654", "0.5268082", "0.5253628", "0.52528447", "0.5252543", "0.5232849", "0.5226277", "0.5223188", "0.52225727", "0.52170134", "0.521679", "0.5206693", "0.52059776", "0.5202676", "0.5182096", "0.5174167", "0.5173976", "0.51713413", "0.5163338" ]
0.7950941
0
Sends a message to the nick.
def send(self, message): self.client.send('PRIVMSG', self, message, force=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_as_nick(self, command, msg):\n self._write(f':{self.ident.nick} {command} {msg}')", "def sendnick(self):\n self._send(\"NICK %s\" % (CONFIG[\"nick\"]))", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send(self, msg):\n self.message('Me', msg)", "def send_irc_message(self, event):\n\n self.log('Transmitting IRC message', lvl=debug)\n\n self.fireEvent(PRIVMSG(event.username, \"[%s] %s : %s\" % (event.msg_type, event.subject, event.body)))", "def send_message(self, ipaddress, nick, message):\n if not message:\n self.log.warning(\"Received empty or None message\")\n return False\n\n if not nick:\n self.log.warning(\"Received empty or None nick\")\n return False\n\n if not ipaddress:\n self.log.warning(\"Received empty or None ipaddress\")\n return False\n\n # Gets the current timestamp\n now = time.time()\n\n # Gets both the raw and the formatted version of the current timestamp.\n raw_timestamp = str(int(round(now * 1000)))\n timestamp = time.strftime(\"%H:%M:%S\", time.localtime())\n self.log.debug(\"%s|New message: %s -> %s -> %s\", timestamp, ipaddress,\n nick, message)\n\n # Fills the events dictionary object.\n update = {\"IP\": ipaddress, \"nick\": nick, \"message\": message,\n \"timestamp\": timestamp,\n \"raw_timestamp\": raw_timestamp}\n\n # Sends the events.\n self.listener.update(self.subscribed, update, False)", "def sendmsg(msg, target=channel):\n msg = bytes('PRIVMSG ' + target + ' :' + msg + '\\n', 'UTF-8')\n sleep(randint(5, 10) / 10) # to avoid throttling due to flooding\n write(msg)\n ircsocket.send(msg)", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send(self, message):\n self.sock.send(message)", "def send(self, msg):\n self.__sock.send(msg)", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def send_message(self, message: str):\n self.client.chat_postMessage(\n channel=f\"@{self.username}\", text=message,\n )", "def sendMessage(sock, message):\n messageTemp = \"PRIVMSG \" + channel +\" :\" +message\n sock.send((messageTemp+ \"\\n\").encode())", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def send(self, msg, label=\"\"):\n self.remoter.tx(msg) # send to remote\n log.debug(\"%s sent %s:\\n%s\\n\\n\", self.remoter, label, bytes(msg))", "def sendMessage(self, name, message):\n time.sleep(int(self.getOwnName()) * 0.05)\n self.getSocket(name).send_pyobj(message)", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def send_message(self, message):\n pass", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "def sendmessage(user,gameid):\n message = request.form['message']\n channel.send_message(user+gameid,message)", "def send(self, message: str) -> None:\n\n if message and not message.isspace():\n self.__sendToAll(self.username + \" : \" + message)", "def send(self, message):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, text):\n log.msg('me %s' % (text))\n self.sendLine(text)", "def _send_message(self, e: Event):\n\n message = self.message_text.get(\"1.0\", 'end-1c').replace('\\n', \"\")\n\n if len(message) > 0:\n self.add_message_to_chat('you: ' + message)\n self._clear_message_text()\n self.connection_socket.send(bytes('them: ' + message, 'utf-8'))", "def send(self):\n if(self.target):\n try:\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)\n except socket.error, err:\n print err", "def send(self, msg):\n return self._channel_action(msg, 1)", "def send_message(self, message:str):\r\n msg_send = message.encode()\r\n self.server_connection.send(msg_send)", "def send_chat_message(self, channel, message):\r\n self._send(\"PRIVMSG #{0} :{1}\".format(channel, message))", "def send_message(stdscr, username=None):\n # Show the cursor and echo output.\n curses.curs_set(1)\n curses.echo()\n stdscr.clear()\n stdscr.refresh()\n if username is None:\n safe_put(stdscr, \"Recipient username: \", (0, 0))\n username = stdscr.getstr(0, 20)\n stdscr.clear()\n stdscr.refresh()\n tnu = taunet.users.by_name(username)\n if tnu == None:\n print(\"No such user. Known users: \" + \", \".join(sorted([u.name for u in taunet.users.all()])))\n return\n if not is_online(tnu):\n print(\"Couldn't connect to that user's host.\")\n return\n safe_put(stdscr, \"Message:\", (0, 0))\n message = stdscr.getstr(0, 9)\n stdscr.clear()\n stdscr.refresh()\n ship_tnm(tnu, taunet.TauNetMessage().outgoing(tnu.name, message))", "def sendto(self, name, msg):\n self.send(\"send/{}/{}:{}\".format(self.msg_id, name, msg))\n self.msg_id += 1", "def chat(sock, msg):\r\n message = \"PRIVMSG {} :{}\\r\\n\".format(cfg.CHAN, msg)\r\n #print(\"Sending: \"+message)\r\n sock.send(message.encode(\"utf-8\"))", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def send(self, message):\n _check_message_type(message=message)\n response = requests.post(\n self._server_url + _SEND_URL,\n data={\"id\": self._chat_id, \"msg\": message}\n )", "async def send_msg(self, message: str) -> None:\n await self.socket.sendall(message.encode())", "async def message(self, ctx:utils.Context, user:discord.User, *, content:str):\n\n await user.send(content)", "async def _nick(self, nick: str) -> str:\n\n logger.debug(f\"Setting nick to {nick!r}\")\n\n self._target_nick = nick\n\n reply = await self._connection.send(\"nick\", {\"name\": nick})\n data = self._extract_data(reply)\n\n new_nick = data[\"to\"]\n self._target_nick = new_nick\n\n if self._session is not None:\n self._session = self._session.with_nick(new_nick)\n\n logger.debug(f\"Set nick to {new_nick!r}\")\n\n return new_nick", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def send_as_server(self, command, msg):\n self._write(f':{self.server.name} {command} {msg}')", "def send_message(userid):\n\tsc.api_call(\n\t\t\"chat.postMessage\",\n\t\tchannel=userid,\n\t\ttext=\"Hey there, just wanted to remind you to join <#CQCKS8UN6|secret-snowflake-fa19> by Wednesday night, if you want to participate in Secret Santa this year. It will be lots of fun!\",\n\t\tusername=\"Reminder\",\n\t\ticon_emoji=\":santa:\"\n\t)", "def write(self, msg):\n self.sock.send(msg.encode())", "def send_message(self, message):\r\n if not self.is_connected():\r\n self.__root.after(self.WAIT_PERIOD, lambda: self.\r\n send_message(message))\r\n return\r\n self.__socket.send(str(message).encode())", "async def send(self, message):", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send(self):\r\n if self.connection:\r\n self.connection.send(self.getLine())\r\n else:\r\n print \"(0) message without connection could not be sent\"", "def send_message(self, message):\n\n self.socket.send(message.serialize())", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def send_message(self, chat_id, text):\n self.updater.bot.sendMessage(chat_id=chat_id, text=text)\n log.info(\"Send msg @%s: %s..\", chat_id, text[:20])", "def sendMessage(self, message):\n self.connection.sendMessage(self, message.encode('ascii', 'ignore'))", "def send_message(self, msg):\n if msg is None:\n raise ValueError('message cannot be None!')\n\n if not isinstance(msg, message.Message):\n raise ValueError('message must be a type of Message')\n\n message_json = json.dumps(msg.__dict__)\n message_length = len(message_json)\n message_length_binary = struct.pack('>I', message_length)\n\n logging.info(\"Send: {0}\".format(message_json))\n\n self.sck.send(message_length_binary)\n self.sck.send(message_json)", "def msg(self, chan, msg):\n self._msg(chan, msg)", "def sendMsg(self, msg):\n self.sockUDP.sendto(bytes(msg), self.serverAddress)\n logger.debug(\"sent: %r\", msg)", "def send(self, msg: str):\n message = msg.encode(HttpClient.FORMAT)\n self.client.send(message)\n print(\"[MESSAGE] message sent:\", msg)", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def send(self, message) -> None:\n raise NotImplementedError", "def _send_msg(self, msg):\n self._kernel.comm.send(msg)", "def send(self, msg):\n #assert(isinstance(msg, Message))\n\n msg = envelp(msg, self.get_msg_id())\n self.send_raw(msg)\n\n # TODO: Fix this: this little delay is to be able to\n # send messages one after the other\n #\n # without this delay, following code is not working:\n #\n # the_actor.send({'a': 'message'})\n # the_actor.send({'a': 'different message'})\n #\n gevent.sleep(0.000000000000000000000000001)", "def send_message(self, to, message):\n\t\tmessage_dict = {\n\t\t\tACTION: MESSAGE,\n\t\t\tSENDER: self.username,\n\t\t\tDESTINATION: to,\n\t\t\tTIME: time.time(),\n\t\t\tMESSAGE_TEXT: message\n\t\t}\n\t\tclient_log.debug(f'Сформирован словарь сообщения: {message_dict}')\n\t\t# Необходимо дождаться освобождения сокета для отправки сообщения\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, message_dict)\n\t\t\tself.process_server_ans(get_message(self.transport))\n\t\t\tclient_log.info(f'Отправлено сообщение для пользователя {to}')", "def send(self, msg):\n if self.sock is not None:\n try:\n send_msg(self.sock, msg)\n except socket.error, msg:\n self.sock = None\n print 'Send failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]", "def submit(self, msg):\n if len(msg) == 0:\n return\n self.prompt_win.clear()\n self.prompt_win.addstr(\"> \")\n self.refresh_prompt()\n if not self.client:\n self.add_msg(\"Error: Not Connected to Server\")\n self.refresh_prompt()\n return\n self.add_msg(\"You: \" + msg)\n self.client.send(msg)", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)", "def send(self, send_to, subject):\n self.server.send_message(self, send_to, subject)", "def send_message(username, message):\n add_messages(username, message)\n return redirect(username)", "def send(self, msg):\n\n self.sock.sendto(msg, (self.UDP_IP, self.UDP_PORT))", "def sendmessage(user,roomid):\n message = request.form['message']\n channel.send_message(user+roomid,message)", "def send_message(self, cmd_id, message_type, status, message=None):\n pass", "def send_message(self, message):\n encoded_message = self.encode_message(message)\n self.socket.send(encoded_message)", "def cli_test_irc_send(self, *args):\n\n self.log('Testing IRC message sending')\n self.fireEvent(send_irc_message('riot', 'Testing', 'This is a test message'))\n self.fireEvent(send_irc_message('#hackerfleet', 'Testing', 'Eek, that tickles! Stop it!'))", "def send(msg): # event is passed by binders.\n # print(\"i sended: \" + msg)\n msg = msg + \";\"\n client_socket.send(bytes(msg, \"utf8\"))", "def send_message(self, to, subject, body):\n self.forum.send_message(self.game, Message(to=to, subject=subject, body=body))", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def __send_message(self, data):\n if RemotePlayerProxy.DEBUG:\n print(f'[RPP] [SEND] -> [{self.name}]: {data}')\n\n try:\n self.__socket.sendall(bytes(data, 'ascii'))\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(e)", "def send_message(self, message):\n self.send_message_queue.put(message)", "def send_message_to_opponentbot_channel(self, message):\n\n try:\n self.ircMessageBuffer.append(\n (\n \"PRIVMSG \" + str(\"#\" + self.nick).lower() + \" :\" +\n str(message) + \"\\r\\n\"\n )\n )\n except Exception as e:\n logging.error(\"Error in SendMessageToOpponentBotChannelIRC\")\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def send_message(self, message):\n msg_bytes = (\n f'{self.username}{self.delimiter}{message}'\n ).encode('utf-8')\n self.socket.writeDatagram(\n qtc.QByteArray(msg_bytes),\n qtn.QHostAddress.Broadcast,\n self.port\n )", "def send(self, msg):\n self._mailbox.put(msg)", "def send_message(msg, settings):\n from_jid = xmpp.protocol.JID(settings['xmpp_jid'])\n passwd = settings['xmpp_password']\n\n client = xmpp.Client(from_jid.getDomain(), debug=[])\n if client.connect():\n if client.auth(from_jid.getNode(), passwd):\n client.send(msg)\n client.disconnect()", "def message(self, msg):\n if msg['type'] in ('chat', 'normal'):\n msg.reply(\"Thanks for sending\\n%(body)s\" % msg).send()", "async def send_discord(msg, cnl):\n await bot.wait_until_ready()\n await bot.send_message(bot.get_channel(cnl), msg)", "async def send_message(self, channel : str, message : str):\n await self._connection.send_message(channel, message)", "def send(self, irc, msg, args, user, targets, text):\n # Let's get the from user.\n public = irc.isChannel(msg.args[0])\n sent = []\n for target in targets:\n id = self.db.send(user.id, target.id, public, text)\n s = format('note #%i sent to %s', id, target.name)\n sent.append(s)\n irc.reply(format('%L.', sent).capitalize())", "def send_protocol_message(self, msg):\n self.conn.send(msg + \"\\0\")", "def ping(msg):\n msg = msg[0:1] + 'O' + msg[2:]\n ircsocket.send(bytes(msg, 'utf-8'))\n sendmsg('This message should be eaten by irc. QQ.')", "def player_send_msg(self, player_ip, *args):\r\n\t\ttry:\r\n\t\t\tto_ID = args[0] # IndexError\r\n\t\t\tmessage = args[1:len(args)] # IndexError\r\n\t\t\tteam_type = self._teammates[player_ip] # KeyError\r\n\t\texcept IndexError:\t# Invalid arguments\r\n\t\t\tself._comm_server.send_message(player_ip, \"send-to fail\")\r\n\t\texcept KeyError:\t# Invalid player\r\n\t\t\tself._comm_server.send_message(player_ip, \"send-to fail\")\r\n\t\telse:\r\n\t\t\tfrom_ID = self._teams[team_type].get_player_info_by_IP(player_ip).ID\r\n\t\t\tto_info = self._teams[team_type].get_player_info_by_ID(to_ID)\r\n\t\t\tif to_info is not None:\r\n\t\t\t\tmsg_str = \"\"\r\n\t\t\t\tfor msg_block in message:\r\n\t\t\t\t\tmsg_str += \" \" + msg_block\r\n\r\n\t\t\t\tself._comm_server.send_message(to_info.IP, \"send-from {0}{1}\" \\\r\n\t\t\t\t\t.format(from_ID, msg_str))\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"send-to ok\")\r\n\t\t\telse:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"send-to fail\")", "def send_msg(self, msg):\n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(msg)))", "def send(self, msg):\r\n if isinstance(msg, str):\r\n msg = msg.encode()\r\n logger.debug('Sending message: %s ...', repr(msg))\r\n self._socket.sendall(msg)", "def write(self, msg):\n cmd = self.__compose(msg)\n self.sock.send(cmd)", "def send(self, msg: Message, **kwargs):\n\n pass", "def send_message(self, message):\n self.client.queue.put(message)", "def _send(self, message):\r\n if not message:\r\n return\r\n\r\n self._maybe_print('twitch out queued: ' + message)\r\n self.buffer.append(message + \"\\n\")", "def send(message, to=\"*\", exclude=()):\n if net:\n if isinstance(net, Server):\n net.send(message, to, exclude)\n else:\n net.send(message)", "def tell(self, irc, msg, args, target, text):\n if target.lower() == 'me':\n target = msg.nick\n if ircutils.isChannel(target):\n irc.error('Dude, just give the command. No need for the tell.')\n return\n if not ircutils.isNick(target):\n irc.errorInvalid('nick', target)\n if ircutils.nickEqual(target, irc.nick):\n irc.error('You just told me, why should I tell myself?',Raise=True)\n if target not in irc.state.nicksToHostmasks and \\\n not ircdb.checkCapability(msg.prefix, 'owner'):\n # We'll let owners do this.\n s = 'I haven\\'t seen %s, I\\'ll let you do the telling.' % target\n irc.error(s, Raise=True)\n if irc.action:\n irc.action = False\n text = '* %s %s' % (irc.nick, text)\n s = '%s wants me to tell you: %s' % (msg.nick, text)\n irc.reply(s, to=target, private=True)" ]
[ "0.78963923", "0.7348465", "0.72309643", "0.71757305", "0.71429473", "0.71214193", "0.71134937", "0.7102403", "0.7016644", "0.7010554", "0.7000045", "0.697002", "0.6954962", "0.68770975", "0.6831442", "0.6811696", "0.68012816", "0.6797111", "0.6793605", "0.6779309", "0.6769687", "0.6743142", "0.6731753", "0.6713634", "0.66844267", "0.66705704", "0.66705704", "0.66705704", "0.6640205", "0.66333467", "0.6621541", "0.6578362", "0.6576147", "0.6564381", "0.65605706", "0.65482193", "0.6542073", "0.6513967", "0.6501057", "0.6489721", "0.6487279", "0.64764166", "0.64725745", "0.64579153", "0.6455599", "0.64409244", "0.64350945", "0.64330786", "0.641909", "0.63727164", "0.6371349", "0.6363751", "0.63581365", "0.6355127", "0.6354952", "0.6354426", "0.63518775", "0.6351086", "0.6349964", "0.6348171", "0.6342418", "0.6316455", "0.63131773", "0.6312794", "0.6306844", "0.63020134", "0.6287462", "0.6279699", "0.62781835", "0.62760204", "0.6275533", "0.62689286", "0.6266875", "0.6261571", "0.62594783", "0.62579674", "0.6255698", "0.6255698", "0.624891", "0.62468195", "0.6236767", "0.6233488", "0.623265", "0.6225283", "0.621102", "0.6210289", "0.6209863", "0.62090933", "0.6208695", "0.6197382", "0.61967254", "0.6171821", "0.6169235", "0.61681414", "0.6165586", "0.6159658", "0.61591524", "0.6157433", "0.6152833", "0.6149297" ]
0.65759915
33
Returns all the Channels that both the nick and the client has joined.
def channels(self): return [channel for channel in self.client.channels if channel.has_nick(self)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_channels_for(self, server, nick):\n channels = []\n for channel in self.serverchans[server.lower()].values():\n if irc.strings.lower(nick) in channel.users:\n channels.append(channel)\n return channels", "def get_channels():\n r = slack.channels.list().body\n return [ c for c in r['channels'] if c['is_member'] ]", "async def listchannels(self, ctx: commands.Context):\n db_session = self.bot.create_db_session()\n channels_query = db_session.query(Channel).filter(Channel.joinable == True).order_by(Channel.name)\n db_session.close()\n\n header_message = \"Here is a list of the joinable channels\"\n channel_list = \"\\n\".join(channel.name for channel in channels_query)\n footer_messge = (\"To join or leave one of these channels, use the !joinchannel and !leavechannel commands.\\n\"\n \"To join multiple channels, separate them with a space.\")\n\n message = discord.Embed()\n message.title = \"Joinable Channels\"\n message.description = channel_list\n message.set_footer(text=footer_messge)\n\n await ctx.send(embed=message)", "def comchans(self, nick):\n comchannels = 0\n for chan in self.chandb:\n if nick in chan:\n comchannels += 1\n return comchannels", "def channels(message):\n load_users(message._client.users)\n for x in message._client.channels:\n chan = message._client.channels[x]\n if 'is_member' in chan:\n if chan['is_member']:\n message.reply(\"{} ({})\".format(chan['name'], chan['id']))\n# message.reply(pretty_json(chan, True))\n elif 'is_im' in chan:\n print(chan)\n friendlyname = chan['user']\n try:\n friendlyname = chan['user'].name\n except KeyError:\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n chan['id']))", "def get_channels(\n self,\n on_channel_open: Callable[[str], None],\n on_catastrophic_disconnect: Callable[[str], None],\n on_message: Callable[[str, \"Packet\"], None],\n ) -> List[\"Channel\"]:\n raise NotImplementedError()", "def get_channels(self):\n return self.channels", "def get_channels(self):\n response = self.client.api_call(\n f'conversations.list?types={cfg.CHANNEL[\"types\"]}&exclude_archived={cfg.CHANNEL[\"exclude_archived\"]}'\n )\n assert response['ok']\n return response['channels']", "def channels(self):\n return self._channels.keys()", "def joined(self, channel):\n # Return user list to Server bot.\n self.get_nicklist()", "def _channels_list(self):\n result = self.slack.api_call(\"channels.list\")\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['channels']", "def channels(self):\n if not self.is_loaded():\n return []\n else:\n return ipmi_channels()", "def get_channels(cls):\n class_members = getmembers(cls)\n\n channels = []\n for name, member in class_members:\n if isinstance(member, CommonBase.BaseChannelCreator):\n channels.append((name, member))\n return channels", "def get_user_channels(self):\n\n request = self.youtube.subscriptions().list(\n part='snippet',\n mine=True,\n order='alphabetical'\n )\n subscriptions = []\n while request:\n response = request.execute()\n subscriptions.append(response)\n request = self.youtube.subscriptions().list_next(request, response)\n\n channels = {}\n for subscription in subscriptions:\n for channel in subscription['items']:\n channel_title = channel['snippet']['title']\n channel_id = channel['snippet']['resourceId']['channelId']\n channels[channel_title] = channel_id\n\n return channels", "def collect_channels(session):\n channel_objects = []\n brew_channels = session.listChannels()\n\n for brew_channel in brew_channels:\n channel_objects.append(channel(brew_channel[\"name\"], brew_channel[\"id\"]))\n\n return channel_objects", "async def on_member_join(member: discord.Member):\n for channel in member.server.channels:\n print(channel)\n if channel == \"general\":\n await member.send(f\"\"\"Welcome to the server {member.mention}!\"\"\")", "def get_clients(self, channel):\n if channel not in self.clients.keys():\n return []\n return self.clients[channel]", "def channels(self):\n return self._channels", "def joinedChannel(self, channel, users):\n pass", "def get_users_for(self, server, channame):\n skey = server.lower()\n ckey = irc.strings.lower(channame)\n users = []\n if skey in self.serverchans and ckey in self.serverchans[skey]:\n users = self.serverchans[skey][ckey].users.keys()\n return users", "def channels(message):\n for channel in message._client.channels:\n if 'is_member' in channel:\n message.reply(\"{} ({})\".format(channel['name'], channel['id']))\n elif 'is_im' in channel:\n #print(channel)\n friendlyname = channel['user']\n try:\n friendlyname = channel['user'][\"name\"]\n except (KeyError, AttributeError):\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n channel['id']))", "def get_channels(self, uuid=None, address=None):\n params = self._build_params(uuid=uuid, address=address)\n return self._get_query('channels', params, Channel)", "def foundInChannels(self, cls:\"Client\") -> List[\"Channel\"]:\n\n\t\tret:List[\"Channel\"] = []\n\n\t\tfor channel_name in self.found_in:\n\n\t\t\tCh:\"Channel\" = cls.channels.get(channel_name, None)\n\t\t\tif Ch: ret.append(Ch)\n\n\t\treturn ret", "def iter_channels(self) -> Iterable[\"MChannel\"]:\n for server in self.servers.values():\n yield from server.channels.values()", "def clients(self, r):\r\n return r.clients", "def joined(self):\n return str(self) in holder.bot.conn.channels.keys()", "def get_channels(self):\n bus_name = self.telepathy_conn.requested_bus_name\n connection_path = self.telepathy_conn.object_path\n channels = [self.telepathy_text_chan.object_path,\n self.telepathy_tubes_chan.object_path]\n\n print('%r: bus name is %s, connection is %s, channels are %r',\n self, bus_name, connection_path, channels)\n return bus_name, connection_path, channels", "def db_get_channels(self, guildID: int):\n query = \"SELECT rowid, * FROM wormhole_channel WHERE guildID = ?\"\n channels = self.bot.db_query(query, (guildID,), astuple=True)\n # come as: (rowid, name, channelID, guildID, type, webhookID,\n # webhookTOKEN)\n res: List[WormholeChannel] = []\n for row in channels:\n res.append(WormholeChannel(*row[1:5]))\n res[-1].id = row[0]\n return res if len(res) > 0 else None", "def getChannels(self) -> List:\n\t\tif self._taking_off:\n\t\t\tself.takeOff()\n\n\t\tif self._landing:\n\t\t\tself.land()\n\n\t\treturn self._altHoldController.getChannels() + [2000]", "def get_channels():\n\tchannels = slack.get_channels()\n\treturn jsonify(channels=channels.body['channels'])", "async def list_channel(self, ctx: MyContext):\n channels = self.db_get_channels(ctx.guild.id)\n if not channels: # we can't send an empty list\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.no-channels\", p=ctx.prefix\n )\n )\n return\n txt = \"\\n\".join([c.to_str() for c in channels])\n await ctx.send(txt)", "def join_channel(self, server, username, channel):\n for sock in self.socks:\n if sock.server == server and username == sock.username:\n if sock.channel == channel:\n return sock\n sock.send(\"JOIN {}\\r\\n\".format(channel))\n print (\"[!] channel {} joined on {} with username {}\".format(channel, server, username))\n sock = IRC.Socket(self.dispatcher, sock.sock, username, server, channel)\n self.replyer.add_sock(sock)\n return sock\n return self.add_sock(server=server, username=username, channel=channel)", "def get_channels_json(self):\n logging.debug(f\"Getting all Slack channels...\")\n return self.get_list_json(\"conversations\")[\"channels\"]", "def _join_channels(self, conn):\n channels = sorted(self.channels_to_join)\n logger.info('Channels to join: %s', ', '.join(c.name for c in channels))\n\n for channel in channels:\n logger.info('Joining channel %s ...', channel.name)\n conn.join(channel.name, channel.password or '')", "def rooms(self):\n\n channels = self.conn.channels.keys()\n return [IRCMUCRoom(node=channel) for channel in channels]", "def channels(self):\r\n return v3.Channels(self)", "def get_registered_clients(self):\n return self.hub.get_registered_clients(self.get_private_key())", "def channels_list(token):\n auth_u_id = get_id_from_token(token)\n all_channels = channels.query(\"all_members\", \"contains\", auth_u_id)\n channels_list = []\n for channel in all_channels:\n channels_list.append(\n {\"channel_id\": channel[\"channel_id\"], \"name\": channel[\"name\"]}\n )\n return {\"channels\": channels_list}", "def getChannelsByName(self, unit, channels): \n\t\treturn self.selectChannelsByName(unit, channels, dontSelect = 1)", "def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)", "def joined(self, channel):\n log.info(\"Joined %s.\", channel)\n # ask for the current list of users in the channel\n self.dispatch('presence', 'joined', channel)", "async def joinchannel(self, ctx: commands.Context, *channels: str):\n for channel in channels:\n channel_query = self._channel_query(channel)\n\n if channel_query == None:\n await ctx.send(f\"Unable to join {channel}.\")\n continue\n\n channel = self.bot.get_channel(channel_query.id)\n guild = self.bot.get_guild(SERVER_ID)\n member = guild.get_member(ctx.author.id)\n\n if channel == None:\n await ctx.send(f\"Unable to join {channel}.\")\n continue\n\n # Don't let a user join the channel again if they are already in it.\n if channel.permissions_for(member).is_superset(JOINED_PERMISSIONS):\n await ctx.send(f\"You're already a member of {channel}.\")\n continue\n\n await channel.set_permissions(member, read_messages=True, reason=\"UQCSbot added.\")\n join_message = await channel.send(f\"{member.display_name} joined {channel.mention}\")\n await join_message.add_reaction(\"👋\")\n await ctx.send(f\"You've joined {channel.mention}.\")", "def list_available_clients(self):\n connected_clients = self.all_clients.keys()\n return connected_clients", "def list_all_channels(_response=Response, _db=Depends(get_db)):\n\n res_status, _data = ChatController(_db).list_channels()\n\n _response.status_code = res_status\n\n return {\"data\": _data}", "def channels():\n channels = db.session.query(Channel).all()\n return render_template(\"admin/channels.html\", channels=channels)", "def get_channels(kwargs, limit=100):\n conn = engine.connect()\n\n fields = [field.name for field in ChannelStatusForm()]\n\n # make sure all the values in kwargs are actual fields\n kwargs = dict(item for item in kwargs.items() if item[0] in fields)\n\n query = \"SELECT * FROM current_channel_status \"\n if len(kwargs):\n query += \"WHERE %s \" % (\" AND \".join([\"%s = %%(%s)s\" % (item[0], item[0]) for item in kwargs.items()]))\n query += \"ORDER BY crate, slot, channel LIMIT %i\" % limit\n\n result = conn.execute(query, kwargs)\n\n if result is None:\n return None\n\n keys = result.keys()\n rows = result.fetchall()\n\n return [dict(zip(keys,row)) for row in rows]", "def get_subscribed_clients(self, mtype):\n return self.hub.get_subscribed_clients(self.get_private_key(), mtype)", "def get_connected_user():\n usernames = clients.keys()\n data = json.dumps(usernames)\n emit('on_client_list_received', data)", "async def _list(self, ctx):\n config = await self.config.guild(ctx.guild).channels()\n data = [self.bot.get_channel(x).mention for x in config]\n if ctx.channel.id in config:\n destination = ctx.author\n else:\n destination = ctx\n if not data:\n return await destination.send(\"There are no channels.\")\n await destination.send(\", \".join(data))", "async def fetch_dm_channels(self):\n data = await self.http.get_dm_channels()\n channels = []\n for dm_channel_data in data.get('channels', data):\n dm_channel = self.http.create_channel(data=dm_channel_data)\n channels.append(dm_channel)\n\n return channels", "def channels(self) -> Tuple[chans.Channel]:\n return self.operands", "def getConnectedUsers(self):\n\n\t\treturn self.connectedUsers", "def clients(self):\n return self._clients", "def active_channels(cls, user, notification_type):\n if notification_type not in NOTIFICATION_TYPES:\n raise ValueError(\"You asked for an invalid notification_type\")\n\n try:\n setting = cls.objects.get(user=user, notification_type=notification_type)\n except cls.DoesNotExist:\n # No setting equals all channels\n return CHANNELS\n\n if not setting.enabled:\n # Everything is disabled when the enabled flag is False\n return []\n\n # When enabled is True return all valid channels in the channels field.\n return list(set(setting.channels or []) & set(CHANNELS))", "def database_channels(self):\n return [c for c in self.values() if isinstance(c, DatabaseChannel)]", "def get_sockets(self, users):\n sockets = []\n for user in users:\n for client in self.clients:\n if user == client.get_name():\n sockets.append(client.get_socket())\n return sockets", "def channels(self):\n return [cc for cc in list(self.dataset.data_vars)]", "def list_conns(self):\n\t\tres = []\n\t\tself.AL.acquire()\n\t\tfor ls in self.ls.keys():\n\t\t\tinfo = self.ls[ls]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Relay\", \"LOCAL\", info[\"local\"], info[\"peer\"],\n\t\t\t\t\tinfo[\"port\"], info[\"got\"], None,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tfor s in self.s2i.keys():\n\t\t\tinfo = self.s2i[s]\n\t\t\tif info[\"creator\"] == self.cid:\n\t\t\t\tfai = \"LOCAL\"\n\t\t\t\ttai = info[\"peer\"]\n\t\t\telse:\n\t\t\t\tfai = info[\"creator\"]\n\t\t\t\ttai = info[\"peer\"]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Conn\", fai, info[\"local\"], tai, info[\"port\"],\n\t\t\t\t\tinfo[\"recv\"], info[\"send\"]\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tself.AL.release()\n\t\treturn res", "def clients(self):\n self.update_results()\n return self._clients", "def claim_channels(self, channel_pool: list[ZigbeeChannel]) -> list[ZigbeeChannel]:\n claimed = []\n if isinstance(self.channel_names, frozenset):\n claimed.extend([ch for ch in channel_pool if ch.name in self.channel_names])\n if isinstance(self.generic_ids, frozenset):\n claimed.extend(\n [ch for ch in channel_pool if ch.generic_id in self.generic_ids]\n )\n if isinstance(self.aux_channels, frozenset):\n claimed.extend([ch for ch in channel_pool if ch.name in self.aux_channels])\n return claimed", "def get_list_youtube_channels_check(self):\n return self.bot_data_file[\"youtube\"][\"channels\"]", "def get_activechannels(self,):\n\n channels_nibble = self._read('CSR')[0] >> 4\n channels = []\n\n for i in reversed (range (4)):\n if channels_nibble >> i > 0:\n channels.append(i)\n channels_nibble -= 2**i\n\n channels.reverse()\n\n return channels", "def channels(self): # type: (...) -> List[BlendingRangePair]\n return self._channels", "def get_user_channels(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/channels'\n options = {}\n return self.make_request(uri, options)", "def list_channels():\n user = getpass.getuser()\n base_path = \"C:\\\\Users\\\\\" + user + \"\\\\Documents\\\\Eve\\\\logs\\\\Chatlogs\\\\\"\n today = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n most_recent = {}\n for filename in os.listdir(base_path):\n filename = filename[:-4]\n full_filename = filename\n time = filename[-6:]\n filename = filename[:-7]\n date = filename[-8:]\n channel_name = filename[:-9]\n if date == today:\n channel = Channel()\n channel.file_name = full_filename\n channel.dir = base_path\n channel.channel_name = channel_name\n channel.date = date\n channel.time = time\n if most_recent.get(channel_name):\n newest_channel = most_recent.get(channel_name)\n if int(time) > int(newest_channel.time):\n most_recent[channel_name] = channel\n else:\n most_recent[channel_name] = channel\n\n return most_recent", "def _joined_all(self):\n if not self.channels:\n return False\n for channel in self:\n if not channel.joined:\n return False\n return True", "def showChannels(self):\n print(\"Channels:\")\n for c in self.channels:\n if c.role != channel_pb2.Channel.Role.DISABLED:\n cStr = stripnl(MessageToJson(c.settings))\n print(\n f\" {channel_pb2.Channel.Role.Name(c.role)} psk={pskToString(c.settings.psk)} {cStr}\")\n publicURL = self.getURL(includeAll=False)\n adminURL = self.getURL(includeAll=True)\n print(f\"\\nPrimary channel URL: {publicURL}\")\n if adminURL != publicURL:\n print(f\"Complete URL (includes all channels): {adminURL}\")", "def password_lockout_notification_channels(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"password_lockout_notification_channels\")", "def get_active_users(text_channel) -> List[discord.Member]:\n\n active_users = []\n for m in text_channel.members:\n if m.status.name in [\"online\", \"dnd\"] and m.bot == False:\n active_users.append(m)\n\n return active_users", "async def guild(ctx):\n print(ctx.channel)\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"guild: {ctx.guild.name}\"\"\")", "def _read_channels(self, info):\n channels = []\n if info.desc().child(\"channels\").empty():\n return channels\n\n channel = info.desc().child(\"channels\").child(\"channel\")\n for _ in range(info.channel_count()):\n channel_name = channel.child_value(\"label\")\n # If the data stream has a TRG channel, rename it so it doesn't\n # conflict with the marker channel.\n if channel_name == 'TRG' and self._marker_inlets:\n channel_name = \"TRG_device_stream\"\n channels.append(channel_name)\n channel = channel.next_sibling()\n\n for appended_channel in self._appended_channels:\n channels.append(appended_channel)\n\n trg_marker_index = self._trigger_inlet_index()\n for i, inlet in enumerate(self._marker_inlets):\n col = inlet_name(inlet)\n if i == trg_marker_index:\n col = 'TRG'\n channels.append(col)\n\n return channels", "def get_subscribed_clients(self, private_key, mtype):\n return self._samp_hub.getSubscribedClients(private_key, mtype)", "def getOpenChats(self):\n\n # Make sure we get refresh chat list every time\n self.cacheChats()\n for chat_id, chat in self.chats.items():\n yield chat_id, chat", "def channels_listall(token):\n channels_results = channels.list()\n channels_list = []\n for channel in channels_results:\n channels_list.append(\n {\"channel_id\": channel[\"channel_id\"], \"name\": channel[\"name\"]}\n )\n return {\"channels\": channels_list}", "def clients_with_team_access(self):\n from lastuser_core.models.client import CLIENT_TEAM_ACCESS\n return [cta.client for cta in self.client_team_access if cta.access_level == CLIENT_TEAM_ACCESS.ALL]", "def _identify_channels(self, name):\n\n channel_list = []\n if self.nuke_node.Class() == \"Cryptomatte\":\n # nuke_node is a keyer gizmo\n channel_list = self.nuke_node.node('Input1').channels()\n else:\n # nuke_node might a read node\n channel_list = self.nuke_node.channels()\n\n relevant_channels = [x for x in channel_list if x.startswith(name)]\n pure_channels = []\n for channel in relevant_channels:\n suffix = \".red\"\n if not channel.endswith(suffix):\n continue\n # to do: validate this somewhere else\n pure_channel = channel[:-len(suffix)]\n pure_channels.append(pure_channel)\n\n return sorted(pure_channels)", "def channels(self) -> int:\n return self._channels", "def list_clients(self):\n\n return self.clients_info", "def join_channels(channels_to_join):\n bot_id = settings.SLACK_BOT_ID\n limit = 200\n cursor = \"\"\n channels_to_join = set(channels_to_join)\n\n while True:\n resp = slack_client.api_call(\"conversations.list\", limit=limit, cursor=cursor)\n\n if \"response_metadata\" in resp:\n cursor = resp[\"response_metadata\"][\"next_cursor\"]\n\n slack_channels = resp[\"channels\"]\n for channel in slack_channels:\n if channel[\"name\"] in channels_to_join:\n channels_to_join.remove(channel[\"name\"])\n channel_id = channel[\"id\"]\n resp = slack_client.api_call(\n \"conversations.invite\", channel=channel_id, users=bot_id\n )\n if resp.get(\"ok\"):\n logger.info(f\"Bot was invited to channel {channel_id}\")\n\n if cursor == \"\":\n break\n\n if not channels_to_join:\n break\n\n if channels_to_join:\n logger.warning(f\"Unable to find slack channels: {channels_to_join}\")\n else:\n logger.info(\"Bot in all required channels.\")", "def on_connect(self, connection, event):\r\n print('[{}] Connected to {}' .format(event.type.upper(), event.source))\r\n print(\"{}\".format(event.arguments))\r\n\r\n res = self.cursor.execute(\"\"\"SELECT * FROM `IRC_servers` WHERE `Registred_users_userID` = %s AND `serverID` = %s;\"\"\", (self.userID, connection.serverID))\r\n if res != 0:\r\n result = self.cursor.fetchall()\r\n serverID_res = int(result[0][0])\r\n #res = self.cursor.execute(\"\"\"UPDATE `IRC_servers` SET `isConnected` = %s WHERE `serverID` = %s;\"\"\", (1, serverID_res))\r\n\r\n if serverID_res == int(connection.serverID): # pokud se získané ID z databáze rovná tomu, které v sobě uchovává connection, redundantní check, ale JTS\r\n res = self.cursor.execute(\"\"\"SELECT * FROM `IRC_channels` WHERE `IRC_servers_serverID` = %s;\"\"\", (serverID_res,))\r\n if res != 0:\r\n result = self.cursor.fetchall()\r\n print(\"For serverID = {}: {}\".format(serverID_res, result))\r\n\r\n channels = list()\r\n for resa in result:\r\n channelID = resa[0]\r\n channelName = resa[1]\r\n channelPassword = resa[2]\r\n lastOpened = resa[3]\r\n channel_serverID = resa[4]\r\n\r\n temp_dict = {\"channelName\": channelName, \"channelPassword\": channelPassword}\r\n channels.append(temp_dict)\r\n\r\n for channel in channels:\r\n if client.is_channel(channel[\"channelName\"]):\r\n connection.join(channel[\"channelName\"], key=channel[\"channelPassword\"])\r\n else:\r\n print(\"The channel in database is not a channel.\")\r\n else:\r\n print(\"[WARNING on_connect]: No channels to join on this server (serverID = {})\".format(serverID_res))", "def get_client_list(self):\r\n cursor = self.conn.cursor()\r\n cursor.execute(\"\"\"SELECT * FROM CLIENT\"\"\")\r\n return cursor.fetchall()", "async def managechannels(self, ctx:commands.Context):", "def list(self):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.list_conns()", "def current_wifi_clients(self) -> list:\n self._parse_clients_info()\n return self._current_wifi_clients", "def get_clients(self, just_names=False, as_dict=False):\n # Should be mod-type agnostic; GetAllClients() replaces\n #\n # flatten_set(net.GetClients() for net in networks)\n #\n clients = self.GetUser().GetAllClients()\n if just_names:\n return tuple(c.GetFullName() for c in clients)\n elif as_dict:\n return {c.GetFullName(): c for c in clients}\n return tuple(clients)", "async def used_channels(request: web.Request) -> web.Response:\n\n session_factory = get_session_factory_from_request(request)\n\n with session_factory() as session:\n channels = await get_channels(session)\n\n response = web.json_response({\"channels\": channels})\n response.enable_compression()\n return response", "async def votechannel_list(self, ctx):\n channels = await self.bot.db.execute(\n \"\"\"\n SELECT channel_id, voting_type FROM voting_channel WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n )\n if not channels:\n raise exceptions.Info(\"There are no voting channels on this server yet!\")\n\n rows = []\n for channel_id, voting_type in channels:\n rows.append(f\"<#{channel_id}> - `{voting_type}`\")\n\n content = discord.Embed(\n title=f\":1234: Voting channels in {ctx.guild.name}\", color=int(\"3b88c3\", 16)\n )\n await util.send_as_pages(ctx, content, rows)", "def GetChannelNames(vDataSet):\r\n nc = vDataSet.GetSizeC()\r\n ret = []\r\n for i in range(nc):\r\n name = vDataSet.GetChannelName(i)\r\n ret.append(name)\r\n\r\n return ret", "def extract_channels(self, index: int) -> ListLike:\n cmd_pieces = self[index].split()\n channels = []\n for i, piece in enumerate(cmd_pieces):\n if piece in [\"--channel\", \"-c\"]:\n channels.append(cmd_pieces[i + 1])\n return channels", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def _find_channels(note):\r\n id_ch = note.index('ChanNames')\r\n chan_beg = note.index('(', id_ch)\r\n chan_end = note.index(')', chan_beg)\r\n note_with_chan = note[chan_beg + 1:chan_end]\r\n return [x.strip('\" ') for x in note_with_chan.split(',')]", "def get_public_channels(cls, defer_nonmain_trees=False):\n if defer_nonmain_trees:\n c = (Channel.objects\n .filter(public=True)\n .exclude(deleted=True)\n .select_related('main_tree')\n .prefetch_related('editors')\n .defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers'))\n else:\n c = Channel.objects.filter(public=True).exclude(deleted=True)\n\n return c", "async def request_channel_thread_channels(client, guild_id, channel_id, request_function):\n thread_channels = []\n \n data = None\n \n while True:\n data = await request_function(client.http, channel_id, data)\n thread_channel_datas = data['threads']\n \n for thread_channel_data in thread_channel_datas:\n thread_channel = Channel.from_data(thread_channel_data, client, guild_id)\n thread_channels.append(thread_channel)\n \n thread_user_datas = data['members']\n for thread_user_data in thread_user_datas:\n thread_channel_id = int(thread_user_data['id'])\n try:\n thread_channel = CHANNELS[thread_channel_id]\n except KeyError:\n continue\n \n user_id = int(thread_user_data['user_id'])\n user = create_partial_user_from_id(user_id)\n \n thread_user_create(thread_channel, user, thread_user_data)\n \n if not data.get('has_more', True):\n break\n \n if thread_channels:\n before = thread_channels[-1].created_at\n else:\n before = datetime.utcnow()\n \n data = {'before': before}\n \n return thread_channels", "def get_chartooms():\n result = dispatch(Chatroom.get_chatroom_with_user_id(session['user_id']))\n rooms = [{'room_id': row[0], 'name': row[1]} for row in result]\n for room in rooms:\n res = dispatch(Chatroom.get_room_members_with_room_id(room['room_id']))\n res = [ row[1] for row in res]\n room['members'] = res\n return jsonify(results = rooms)", "def __redrawChannels(self):\n self.__channelWin.clear()\n all_chans = self._client.getChannels()\n all_chans.sort(key=lambda c: c.getName())\n count = min(len(all_chans), self.__channelWin.getmaxyx()[0])\n show = all_chans[:count]\n for c in show:\n cur = self._client.currentChannel() == c\n if cur:\n attr = curses.A_REVERSE\n elif c in self._client.getJoined():\n attr = curses.A_BOLD\n else:\n attr = curses.A_DIM\n if c != self._client.getNoneChannel():\n self.__channelWin.addstr(\n \"{chan}\\n\".format(chan=c.getName()),\n attr\n )", "def get_channels(self):\n return [self.afos, \"%s...\" % (self.afos[:3], )]", "def get_joined_buddies(self):\n return self._buddies.values()", "async def users(ctx):\n\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"# of members: {ctx.guild.member_count}\"\"\")", "def get_connected_devices(self, client):\r\n device_list = client.getConnectedDevices()\r\n # to get List split('\\n')\r\n return device_list", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False" ]
[ "0.7689199", "0.69669604", "0.66077626", "0.64817184", "0.6466343", "0.63576937", "0.6349284", "0.63328314", "0.63226914", "0.6310823", "0.62999576", "0.6298013", "0.628262", "0.62560254", "0.61833847", "0.6165296", "0.61439246", "0.6142929", "0.61309534", "0.6128517", "0.6128006", "0.6114568", "0.61017406", "0.6092856", "0.6072213", "0.60475314", "0.60320354", "0.599986", "0.59988284", "0.5992499", "0.5984219", "0.597707", "0.5958599", "0.593684", "0.58694506", "0.58630556", "0.58462226", "0.5809043", "0.5764031", "0.5750067", "0.57149667", "0.5688231", "0.56350017", "0.5629779", "0.56262", "0.56097484", "0.5597245", "0.5563507", "0.5557238", "0.5555677", "0.55218935", "0.55088264", "0.5475616", "0.54751694", "0.5464065", "0.54423434", "0.5426073", "0.54186934", "0.54156274", "0.5398331", "0.53935945", "0.53930664", "0.5391774", "0.53905326", "0.5376166", "0.53723097", "0.53650165", "0.53578067", "0.53447264", "0.5342441", "0.53420174", "0.5304432", "0.5301753", "0.52935046", "0.52934897", "0.5286939", "0.52804404", "0.52708113", "0.5269094", "0.5268456", "0.5266426", "0.52567863", "0.52407146", "0.5235438", "0.5201347", "0.51993996", "0.518878", "0.5169539", "0.5167268", "0.51668966", "0.51610076", "0.51588213", "0.5157702", "0.51479733", "0.5139779", "0.513866", "0.5137556", "0.51246077", "0.5123996", "0.51192045" ]
0.77265155
0
Cost function using binary crossentropy
def cost(self, Y, A): m = Y.shape[1] C = (-1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1.0000001 - A))) return C
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_crossentropy(output, target):\r\n return -(target * tensor.log(output) + (1.0 - target) * tensor.log(1.0 - output))", "def binary_crossentropy(predictions, targets):\n return theano.tensor.nnet.binary_crossentropy(predictions, targets)", "def compute_cost(Z6, Y):\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z6, labels=Y))\n\n return cost", "def nll(y_true, y_pred):\n return K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)", "def binary_cross_entropy(y_true, y_preds):\n return np.sum(y_true * np.log(y_preds) + (1 - y_true) * np.log(1 - y_preds))", "def binary_crossentropy(y_pred, y_true):\n with tf.name_scope(\"BinaryCrossentropy\"):\n return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(y_pred,\n y_true))", "def compute_cost(self,X, y):\r\n num_examples = np.shape(X)[0]\r\n z = np.dot(X,self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n \r\n one_hot_y = np.zeros((num_examples,np.max(y)+1))\r\n logloss = np.zeros((num_examples,)) \r\n for i in range(np.shape(X)[0]):\r\n one_hot_y[i,y[i]] = 1\r\n logloss[i] = -np.sum(np.log(softmax_scores[i,:]) * one_hot_y[i,:])\r\n data_loss = np.sum(logloss)\r\n return 1./num_examples * data_loss", "def get_reconstruction_cost(self, pre_sigmoid_nv):\n y = self.input\n a = T.nnet.sigmoid(pre_sigmoid_nv)\n\n cross_entropy = -T.mean(\n T.sum(y * T.log(a) + (1 - y) * T.log(1 - a), axis=1)\n )\n\n return cross_entropy", "def get_reconstruction_cost(self, updates, pre_sigmoid_nv):\r\n\r\n cross_entropy = T.mean(\r\n T.sum(self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +\r\n (1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),\r\n axis=1))\r\n\r\n return cross_entropy", "def cost(self, logits, labels, weights):\n return tf.multiply(\n tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels),\n weights)", "def binary_cross_entropy(true, pred, epsilon=1e-7):\n pred = ivy.clip(pred, epsilon, 1-epsilon)\n # noinspection PyTypeChecker\n return -(ivy.log(pred) * true + ivy.log(1 - pred) * (1 - true))", "def cross_entropy(y_observed, p):\n\n pass", "def cross_entropy_cost(output_out, target_out):\r\n total = 0\r\n for target_node in range(len(target_out)): # For each target data set\r\n for output_node in range(len(output_out)): # For each output node\r\n total += target_out[target_node][output_node] - target_out[target_node][output_node] * np.log(output_out[output_node]) - \\\r\n (1 - target_out[target_node][output_node]) * np.log(1 - output_out[output_node])\r\n\r\n total = 1 / total\r\n return total", "def get_reconstruction_cost(self, nv):\n cross_entropy = T.mean(\n T.sum(\n self.input * T.log(nv) +\n (1 - self.input) * T.log(1 - nv),\n axis=1\n )\n )\n return cross_entropy", "def binary_crossentropy(predictions, targets):\n predictions, targets = align_targets(predictions, targets)\n return theano.tensor.nnet.binary_crossentropy(predictions, targets)", "def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def cost(self, x, y):\n return self.cross_entropy_error(x,y) + self.l2_regularization_penalty()", "def cost(self, output, labels, weights):\n return tf.multiply(0.5 * tf.square(output - labels), weights)", "def cross_entropy(\n **kwargs\n) -> Callable:\n return categorical_crossentropy", "def binary_cross_entropy(y_true, y_pred, eps=1e-15):\n assert y_true.shape == y_pred.shape\n y_pred = np.clip(y_pred, eps, 1 - eps) # Avoid log(0)\n return - np.mean(\n y_true * np.log(y_pred) + \n (1 - y_true) * (np.log(1 - y_pred))\n )", "def binary_cross_entropy(input: Tensor, target: Tensor) -> Tensor:\n assert input.shape == target.shape, 'input and target have different shape!'\n assert len(input.shape) == 2, 'binary cross entropy only used in 2 dim matrix'\n assert input.shape[1] == 1, 'binary shape[1] should be 1'\n loss = target * log(input) + (1 - target) * log(1 - input)\n return -sum(loss, 0) / input.shape[0]", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def cross_entropy(y_pred,y):\n \n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n return sum(-y*np.log(y_pred+epsilon))", "def class_balanced_cross_entropy(this,pred, label, name='cross_entropy_loss'):\n with tf.name_scope('class_balanced_cross_entropy'):\n z = this.batch_flatten(pred)\n y = tf.cast(this.batch_flatten(label), tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / ((count_neg + count_pos)+this.EPS)\n\n eps = 1e-12\n loss_pos = -beta * tf.reduce_mean(y * tf.log(z + eps))\n loss_neg = (1. - beta) * tf.reduce_mean((1. - y) * tf.log(1. - z + eps))\n cost = tf.subtract(loss_pos, loss_neg, name=name)\n return cost", "def class_balanced_cross_entropy(this,pred, label, name='cross_entropy_loss'):\n with tf.name_scope('class_balanced_cross_entropy'):\n z = this.batch_flatten(pred)\n y = tf.cast(this.batch_flatten(label), tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / ((count_neg + count_pos)+this.EPS)\n\n eps = 1e-12\n loss_pos = -beta * tf.reduce_mean(y * tf.log(z + eps))\n loss_neg = (1. - beta) * tf.reduce_mean((1. - y) * tf.log(1. - z + eps))\n cost = tf.subtract(loss_pos, loss_neg, name=name)\n return cost", "def binary_cross_entropy(Y, Y_hat, epsilon=1e-8):\n \n m = Y.shape[0]\n \n # make data safe\n Y_hat = np.clip(Y_hat, a_min=epsilon, a_max=(1 - epsilon))\n \n # calc cost\n cost = (1 / m) * np.nansum(-np.log(Y_hat) * Y - np.log(1 - Y_hat) * (1 - Y))\n cost = np.squeeze(cost)\n \n # calc gradient\n dY_hat = -(Y / Y_hat) + (1 - Y) / (1 - Y_hat)\n \n return cost, dY_hat", "def compute_cost(self,X, y):\n \n num_samples = len(X)\n # Do Forward propagation to calculate our predictions\n z1 = X.dot(self.W1) + self.b1\n a1 = np.tanh(z1)\n z2 = a1.dot(self.W2) + self.b2\n exp_z = np.exp(z2)\n a2 = exp_z / np.sum(exp_z, axis=1, keepdims=True)\n softmax_scores = a2\n # Calculate the cross-entropy loss\n cross_ent_err = -np.log(softmax_scores[range(num_samples), y])\n data_loss = np.sum(cross_ent_err)\n return 1./num_samples * data_loss", "def my_loss(y_pred,y_true,n_outputs):\n y_true = tf.one_hot(tf.cast(y_true,tf.int64), n_outputs, dtype=tf.float32)\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_true,y_pred))", "def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass", "def binary_cross_entropy(preds, targets, name=None):\n eps = 1e-12\n with ops.op_scope([preds, targets], name, \"bce_loss\") as name:\n preds = ops.convert_to_tensor(preds, name=\"preds\")\n targets = ops.convert_to_tensor(targets, name=\"targets\")\n return tf.reduce_mean(-(targets * tf.log(preds + eps) +\n (1. - targets) * tf.log(1. - preds + eps)))", "def calc_cost(self):\n \n correct_pred = tf.equal(self.predictions, tf.argmax(self.y,1))\n batchaccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return self.cost, batchaccuracy, self.predictions", "def crossentropy_loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) \n return ce", "def grad_softmax_cross_entropy_loss(logit, labels):\n return softmax(logit) - labels", "def cross_entropy(true, pred, axis=-1, epsilon=1e-7):\n pred = ivy.clip(pred, epsilon, 1 - epsilon)\n log_pred = ivy.log(pred)\n # noinspection PyUnresolvedReferences\n return -ivy.reduce_sum(log_pred * true, axis)", "def conditional_entropy_hyper(self) -> float:\n pass", "def add_loss_op(self, pred):\n ### YOUR CODE HERE\n loss = cross_entropy_loss(self.labels_placeholder,pred)\n ### END YOUR CODE\n return loss", "def cross_entropy_cost(m, A, L):\n\tcost = (-1 / m) * np.sum(L * np.log(A) + (1 - L) * (np.ma.log(1 - A))) #Note: Using numpy masked array np.ma for values of log(0)\n\n\n\t# Sanity checks\n\tcost = np.squeeze(cost) \t#squeeze() removes single dimensional elements from the array: e.g. (1, 3, 1) -> (3,)\n\tassert(cost.shape == ()) \t#checks if cost value is a scalar\n\n\treturn cost", "def categorical_crossentropy(predictions, targets):\n return theano.tensor.nnet.categorical_crossentropy(predictions, targets)", "def categorical_crossentropy(predictions, targets):\n return theano.tensor.nnet.categorical_crossentropy(predictions, targets)", "def class_balanced_sigmoid_cross_entropy(this,logits, label, name='cross_entropy_loss'):\n with tf.name_scope('class_balanced_sigmoid_cross_entropy'):\n y = tf.cast(label, tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / ((count_neg + count_pos)+this.EPS)\n\n pos_weight = beta / ((1 - beta)+this.EPS)\n cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)\n cost = tf.reduce_mean(cost * (1 - beta))\n zero = tf.equal(count_pos, 0.0)\n return tf.where(zero, 0.0, cost, name=name)", "def class_balanced_sigmoid_cross_entropy(this,logits, label, name='cross_entropy_loss'):\n with tf.name_scope('class_balanced_sigmoid_cross_entropy'):\n y = tf.cast(label, tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / ((count_neg + count_pos)+this.EPS)\n\n pos_weight = beta / ((1 - beta)+this.EPS)\n cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)\n cost = tf.reduce_mean(cost * (1 - beta))\n zero = tf.equal(count_pos, 0.0)\n return tf.where(zero, 0.0, cost, name=name)", "def cross_entropy(self):\n return self._cross_entropy_func", "def loss(self, logits, labels):\r\n return tf.reduce_mean(tf.keras.losses.binary_crossentropy(labels,logits))", "def classification_loss(self, logit, target):\n return F.cross_entropy(logit, target)", "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def loss(logits, labels):\n labels = tf.to_int64(labels)\n# labels = tf.to_float(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')\n# y_conv = tf.nn.softmax(logits)\n# cross_entropy = -tf.reduce_sum(labels*tf.log(y_conv))\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')", "def generatorLoss(fakeOutput):\n return cross_entropy(tf.ones_like(fakeOutput), fakeOutput)", "def conditional_entropy(self) -> float:\n pass", "def cross_entropy(X, y):\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)", "def test_softmax_cross_entropy(self):\n loss_op = listwise_losses.SoftmaxCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n assert np.isclose(y_pred[0][0].numpy(), 0.19868991, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0.0, atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true, y_pred)\n assert np.isclose(loss, 1.306335, atol=1e-5)", "def my_softmax_cross_entropy(preds, labels):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n # loss = tf.nn.weighted_cross_entropy_with_logits(logits=preds, targets=labels, pos_weight=0.1)\n return tf.reduce_mean(loss)", "def _bce_loss_with_logits(output, labels, **kwargs):\n return F.binary_cross_entropy_with_logits(output, labels, reduction='none', **kwargs)", "def class_balanced_cross_entropy_no_norm(this,pred, label, name='cross_entropy_loss_no_norm'):\n z = this.batch_flatten(pred)\n y = tf.cast(this.batch_flatten(label), tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / (count_neg + count_pos+this.EPS)\n\n eps = 1e-12\n loss_pos = -beta * tf.reduce_sum(y * tf.log(z + eps))\n loss_neg = (1. - beta) * tf.reduce_sum((1. - y) * tf.log(1. - z + eps))\n cost = tf.subtract(loss_pos, loss_neg, name=name) / (tf.cast(tf.shape(pred)[0], tf.float32)+this.EPS)\n return cost", "def class_balanced_cross_entropy_no_norm(this,pred, label, name='cross_entropy_loss_no_norm'):\n z = this.batch_flatten(pred)\n y = tf.cast(this.batch_flatten(label), tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / (count_neg + count_pos+this.EPS)\n\n eps = 1e-12\n loss_pos = -beta * tf.reduce_sum(y * tf.log(z + eps))\n loss_neg = (1. - beta) * tf.reduce_sum((1. - y) * tf.log(1. - z + eps))\n cost = tf.subtract(loss_pos, loss_neg, name=name) / (tf.cast(tf.shape(pred)[0], tf.float32)+this.EPS)\n return cost", "def loss(labels, logits):\n return sparse_categorical_crossentropy(labels, logits, from_logits=True)", "def cross_entropy(U, V):\n return -np.sum(U * np.log(V))", "def ovo_crossentropy_loss(y_true, y_pred):\n # Bei OvO wird als Aktivierungsfunktion 'tanh' verwendet. Diese produziert Werte aus (-1, 1)\n # Auf Wertebereich [0,1] hochskalieren (eigentlich möchte man (0,1) erreichen um später im Logarithmus\n # keine undefinierten Werte zu erhalten, aber wegen numerischen Problemen sind auch 0 und 1 denkbare Werte)\n y_true_scaled = (y_true + 1.0) / 2.0\n y_pred_scaled = (y_pred + 1.0) / 2.0\n\n # Wertebereich von y_pred_scaled von [0,1] auf [0.00001, 0.99999] einschränken wegen Logarithmen. Näherung an (0,1)\n\n zeroes = tf.zeros_like(y_pred_scaled) # Tensor mit gleicher Dimension wie 'y_pred_scaled' bestehend aus nur 0en\n # Alle kleineren Werte als 0.00001 in 'y_pred_scaled' auf 0.00001 setzen (untere Schranke für Wertebereich)\n y_pred_scaled = tf.where(y_pred_scaled < 0.00001, zeroes + 0.00001, y_pred_scaled)\n # Alle größeren Werte als 0.99999 in 'y_pred_scaled' auf 0.99999 setzen (obere Schranke für Wertebereich)\n y_pred_scaled = tf.where(y_pred_scaled > 0.99999, zeroes + 0.99999, y_pred_scaled)\n\n # J_{OvO} aus Pawara et al. anwenden\n log_function = tf.log if tf.__version__ == \"1.13.1\" else tf.math.log # flexibel für neue / alte Version\n loss = - tf.reduce_mean(\n y_true_scaled * log_function(y_pred_scaled) + (1 - y_true_scaled) * log_function(1 - y_pred_scaled))\n return loss", "def Weighted_Cross_Entropy(y_true, y_pred, eps = 1e-10):\n y_pred = tf.cast(y_pred, 'float64')\n y_true = tf.cast(y_true, 'float64')\n # deduce weights based on true pixel value\n class_weights = weights * y_true\n # compute your (unweighted) softmax cross entropy loss\n unweighted_losses = y_true*tf.math.log(y_pred + eps)\n ##print(unweighted_losses.dtype, weights.dtype)\n weighted_losses = unweighted_losses * class_weights\n # reduce the result to get your final loss\n loss = -tf.reduce_sum(weighted_losses)\n return loss", "def loss_func(output, label):\n\n return tf.losses.softmax_cross_entropy(\n logits=output, onehot_labels=label)", "def cross_entropy(input: Tensor, target: Tensor) -> Tensor:\n norm_log = log_softmax(input, 1)\n\n np_one_hot = np.eye(input.shape[1])[target.data]\n tensor_one_hot = tensor(np_one_hot, 'one-hot', False, True)\n\n mask = -norm_log * tensor_one_hot\n mask_sum = sum(mask, 1)\n loss = sum(mask_sum, 0)\n\n return loss / input.shape[0]", "def rpn_cls_loss(*args):\n y_true, y_pred = args if len(args) == 2 else args[0]\n indices = tf.where(tf.not_equal(y_true, -1))\n target = tf.gather_nd(y_true, indices)\n output = tf.gather_nd(y_pred, indices)\n lf = tf.losses.BinaryCrossentropy()\n return lf(target, output)", "def comp_cost(y_hat, y, activation, epsilon=1e-15):\n if activation == 'sigmoid':\n m = y.shape[1]\n cost = np.dot(y, np.log(y_hat).T) + np.dot((1 - y), np.log(1 - y_hat).T)\n cost = (-1. / m) * cost\n cost = np.squeeze(cost) # turns [[17]] into 17).\n assert (cost.shape == ())\n elif activation == 'softmax':\n \"\"\"\n Computes x-entropy between y (encoded as one-hot vectors) and y_hat.\n \n Arguments:\n y_hat -- predictions, array (n, k), (# of examples, # of categories)\n y -- true 'label' np.array (n, k) (# of examples, # of categories)\n \n Returns:\n cost -- categorical cross entropy cost\n \n Algorithm:\n -1./N * sum_i(sum_j t_ij * log(p_ij)), i=1..len(y), j=1..k\n \n y_hat = np.clip(y_hat, epsilon, 1. - epsilon)\n -np.sum(y * np.log(y_hat + epsilog)) / y_hat.shape[0] \n \"\"\"\n cost = log_loss(y, y_hat)\n else:\n raise AttributeError('Unexpected activation function:', activation)\n return cost", "def cross_entropy(predictions, targets):\n likelihood = targets * np.log(predictions)\n return -np.sum(likelihood) / predictions.shape[0]", "def entropy(self, **kwargs) -> TensorType:", "def entropy(self, **kwargs) -> TensorType:", "def class_balanced_sigmoid_cross_entropy_no_norm(this,logits, label, name='cross_entropy_loss_no_norm'):\n\n y = tf.cast(label, tf.float32)\n\n count_neg = tf.reduce_sum(1. - y) # the number of 0 in y\n count_pos = tf.reduce_sum(y) # the number of 1 in y (less than count_neg)\n beta = count_neg / ((count_neg + count_pos)+this.EPS);\n\n pos_weight = beta / ((1 - beta)+this.EPS)\n cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)\n\n cost = tf.reduce_sum(cost * (1 - beta), name=name) / (tf.cast(tf.shape(logits)[0], tf.float32)+this.EPS)\n return cost", "def class_balanced_sigmoid_cross_entropy_no_norm(this,logits, label, name='cross_entropy_loss_no_norm'):\n\n y = tf.cast(label, tf.float32)\n\n count_neg = tf.reduce_sum(1. - y) # the number of 0 in y\n count_pos = tf.reduce_sum(y) # the number of 1 in y (less than count_neg)\n beta = count_neg / ((count_neg + count_pos)+this.EPS);\n\n pos_weight = beta / ((1 - beta)+this.EPS)\n cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)\n\n cost = tf.reduce_sum(cost * (1 - beta), name=name) / (tf.cast(tf.shape(logits)[0], tf.float32)+this.EPS)\n return cost", "def compute_cost(AL, Y):\n\n m = Y.shape[1] # number of example\n\n n = 10 #number of labels\n\n y_binary = get_binary_matrix(Y)\n\n # Compute the cross-entropy cost\n # sum_M = 0\n # for i in range(m):\n # sum_K = 0\n # for k in range(n):\n # cur_y = 0\n # if (Y[0, i] == k):\n # cur_y = 1\n # sum_K += -cur_y * np.log(A2[k, i]) - (1 - cur_y) * np.log(1 - A2[k, i])\n # sum_M += sum_K\n #\n # cost = 1 / m * sum_M\n\n logprobs = np.multiply(-y_binary, np.log(AL)) - np.multiply((1 - y_binary), np.log(1 - AL))\n cost = 1 / m * np.sum(logprobs)\n\n cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect.\n # E.g., turns [[17]] into 17\n\n return cost", "def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent", "def compute_cost(A2, Y, parameters):\n\n\tm = Y.shape[1] # number of example\n\n\t# Compute the cross-entropy cost\n\tlogprobs = np.multiply(np.log(A2), Y)\n\tcost = -(1/m)*(np.sum((logprobs) + np.multiply(np.log(1-A2), 1-Y)))\n\n\tcost = np.squeeze(cost) # makes sure cost is the dimension we expect. \n\t \t# E.g., turns [[17]] into 17 \n\tassert(isinstance(cost, float))\n\n\treturn cost", "def loss(y, y_pred):\n # assert_is_binary(y)\n # assert_is_stochastic(y_pred)\n is_binary(y)\n is_stochastic(y_pred)\n\n # prevent taking the log of 0\n eps = np.finfo(float).eps\n\n # each example is associated with a single class; sum the negative log\n # probability of the correct label over all samples in the batch.\n # observe that we are taking advantage of the fact that y is one-hot\n # encoded!\n cross_entropy = -np.sum(y * np.log(y_pred + eps))\n return cross_entropy", "def compute_cost(zn, y, reg, params, n_layers):\n\n logits = tf.transpose(zn)\n labels = tf.transpose(y)\n\n regularization = 0.0\n for i in range(n_layers):\n wn = 'W{}'.format(i)\n regularization += tf.nn.l2_loss(params[wn])\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) + (\n reg * regularization)\n\n return cost", "def loss_function(self, targets, outputs):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=outputs)\n return tf.reduce_mean(cross_entropy)", "def condentropy(truelabels, labels):\n labels=array(labels)\n truelabels=array(truelabels)\n \n condent=0.\n for l in xrange(min(labels),max(labels)+1):\n sublabels = truelabels[ labels==l ]\n condent += len(sublabels)*chl_entropy( sublabels )\n return condent/float(len(labels))", "def loss_calc(pred, label, device):\r\n # out shape batch_size x channels x h x w -> batch_size x channels x h x w\r\n # label shape h x w x 1 x batch_size -> batch_size x 1 x h x w\r\n label = label.long().to(device)\r\n return cross_entropy_2d(pred, label)", "def loss_sce(y_pred, y_true):\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n\n return tf.reduce_mean(loss)", "def swish(x):\n return x * tf.sigmoid(x)", "def setup_loss(logits, labels):\n predictions = tf.nn.softmax(logits)\n cost = tf.losses.softmax_cross_entropy(onehot_labels=labels,\n logits=logits,\n )\n return predictions, cost", "def cross_entropy_loss(self, logits, labels):\n return F.cross_entropy(logits, labels)", "def SoftEntropy(nn_last_layer, correct_label, learning_rate): \n \n loss = tf2.math.reduce_sum( tf2.nn.softmax_cross_entropy_with_logits(tf2.stop_gradient(correct_label), nn_last_layer))\n \n #obtain training operation\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate, epsilon = 1e-8) #Note default value of epsilon 1e-8 results in instability after few epochs\n \n #clip the gradients\n gvs = optimizer.compute_gradients(loss)\n #capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]\n training_operation = optimizer.apply_gradients(gvs)\n\n return training_operation, loss", "def cost(self, X=None, y=None, theta=None):\n X = self.X_train if X is None else X\n y = self.y_train if y is None else y\n m = X.shape[1]\n\n prob_mat = self.softmax_mat(X, theta)\n cost = -(y * np.log(prob_mat)).sum() / m\n return cost", "def softmax_categorical_crossentropy(y_pred, y_true):\n with tf.name_scope(\"SoftmaxCrossentropy\"):\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred,\n y_true))", "def cross_entropy(t,y):\r\n #print(-1*t*np.log(y))\r\n #print(np.shape(np.log(y)))\r\n #print(np.shape(t))\r\n return t*np.log(y)*(-1)", "def sigmoid_cross_entropy(y, label):\r\n losses = - np.log(y + g_epsilon) * label - np.log(1.0 - y + g_epsilon) * (1.0 - label)\r\n return losses", "def obj_mix(Y_true,Y_pred):\n y_true = K.mean(Y_true,axis=0)\n if y_true == 1:\n y_pred = K.max(Y_pred,axis=0)\n return(K.mean(K.binary_crossentropy(y_pred, y_true)))\n elif y_true == 0:\n return(K.mean(K.binary_crossentropy(Y_pred,Y_true)))\n else:\n print('unexpected value of y_true',y_true)\n return(K.mean(K.binary_crossentropy(Y_pred,Y_true)))", "def sparse_cross_entropy(true, pred, axis=-1, epsilon=1e-7):\n true = ivy.one_hot(true, pred.shape[axis])\n return cross_entropy(true, pred, axis, epsilon)", "def _build_train_op(self):\n\n logits_flatten = tf.reshape(self.logits_up, [-1, self.num_classes])\n pred_flatten = tf.reshape(self.pred, [-1, self.num_classes])\n\n labels_gt = self.labels\n\n if self.ignore_class_bg:\n # ignore background labels: 255\n gt_labels_flatten = tf.reshape(labels_gt, [-1, ])\n indices = tf.squeeze(tf.where(tf.less_equal(gt_labels_flatten, self.num_classes - 1)), 1)\n remain_logits = tf.gather(logits_flatten, indices)\n remain_pred = tf.gather(pred_flatten, indices)\n remain_labels = tf.gather(gt_labels_flatten, indices)\n xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=remain_logits, labels=remain_labels)\n else:\n xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_up, labels=labels_gt)\n\n self.cls_loss = tf.reduce_mean(xent, name='xent') # xent.shape=[nIgnoredBgPixels]\n self.cost = self.cls_loss + self._decay()\n tf.summary.scalar('cost', self.cost)\n\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self.learning_rate = tf.train.polynomial_decay(self.lrn_rate,\n self.global_step,\n self.lr_decay_step,\n end_learning_rate=self.lrn_rate_end,\n power=0.9)\n tf.summary.scalar('learning rate', self.learning_rate)\n\n tvars = tf.trainable_variables()\n\n if self.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n elif self.optimizer == 'mom':\n optimizer = tf.train.MomentumOptimizer(self.learning_rate, 0.9)\n elif self.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(self.learning_rate)\n else:\n raise NameError(\"Unknown optimizer type %s!\" % self.optimizer)\n\n grads_and_vars = optimizer.compute_gradients(self.cost, var_list=tvars)\n var_lr_mult = {}\n for var in tvars:\n if var.op.name.find(r'fc_final_sketch46') > 0 and var.op.name.find(r'biases') > 0:\n var_lr_mult[var] = 20.\n elif var.op.name.find(r'fc_final_sketch46') > 0:\n var_lr_mult[var] = 10.\n else:\n var_lr_mult[var] = 1.\n grads_and_vars = [((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v)\n for g, v in grads_and_vars]\n\n ## summary grads\n # for grad, grad_var in grads_and_vars:\n # print('>>>', grad_var.op.name)\n # if grad is None:\n # print('None grad')\n # # if grad is not None:\n # # tf.summary.histogram(grad_var.op.name + \"/gradient\", grad)\n\n apply_op = optimizer.apply_gradients(grads_and_vars,\n global_step=self.global_step, name='train_step')\n\n train_ops = [apply_op] + self._extra_train_ops\n self.train_step = tf.group(*train_ops)", "def activation(h):\n\n if(h > 0):\n return 1\n\n else:\n return 0", "def cost(self, result: [float], label: int) -> float:\n desired_outputs = Network.digit_to_one_hot(label)\n self._desired_changes = [result[i] - desired_outputs[i] for i in range(num_outputs)]\n return sum((result[i] - desired_outputs[i]) ** 2 for i in range(num_outputs))", "def cross_entropy(y, y_hat):\n return -tf.math.log(\n tf.gather_nd(y_hat, tf.reshape(y, (-1, 1)), batch_dims=1)\n )", "def cross_entropy(Y, Y_hat, epsilon=1e-8):\n \n m = Y.shape[0]\n \n # make data safe\n Y_hat = np.clip(Y_hat, a_min=epsilon, a_max=(1 - epsilon))\n \n # calc cost\n cost = (-1 / m) * np.nansum(Y * np.log(Y_hat))\n cost = np.squeeze(cost)\n \n # calc gradient\n dY_hat = -Y / Y_hat\n \n return cost, dY_hat", "def __compute_cost(self, x, y):\n\n predictions = self.__compute_prediction(x)\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\n\n return cost", "def loss(output, y):\n #Computes softmax cross entropy between logits and labels.\n xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)\n loss = tf.reduce_mean(xentropy)\n\n return loss", "def prediction_cost(a, y):\n return np.sum(-(y * np.log(a) + (1 - y) * np.log(1 - a)))", "def loss(y_true, y_pred):\r\n\r\n y_true = (K.flatten(y_true) + 1) / 2 # [-1~1] -> [0~1]\r\n y_pred = (K.flatten(y_pred) + 1) / 2 # [-1~1] -> [0~1]\r\n\r\n return -10. * np.log10(K.mean(K.square(y_pred - y_true)))\r\n\r\n\r\n # max_pixel = 1.0\r\n # y_true = (K.flatten(y_true) + 1) / 2 # [-1~1] -> [0~1]\r\n # y_pred = (K.flatten(y_pred) + 1) / 2 # [-1~1] -> [0~1]\r\n #\r\n # condition = tf.equal(y_true, y_pred)\r\n # psnr = 10.0 * (1.0 / math.log(10)) * K.log((max_pixel ** 2) / (K.mean(K.square(y_pred - y_true))))\r\n #\r\n # return K.switch(condition, K.zeros_like(psnr), 1-psnr/100)\r", "def get_cross_entropy(self):\n assert (self.dataset is not None) and (self.labels is not None), 'Logistic Regression requires a dataset and labels.'\n potential = 0.0\n logits = self.dataset @ self.parameters[:self.dataset.shape[1]]\n max_logits = torch.max(torch.zeros(logits.shape[0]),logits)\n potential = (-logits @ self.labels.t() + torch.sum(max_logits) + torch.sum(\n torch.log(torch.exp(-max_logits)+torch.exp(logits - max_logits))))# * n.reciprocal())\n return potential", "def cross_entropy(X, y, using_onehot=True):\n\tM = y.shape[0]\n\tif using_onehot :\n\t\tlog_likelihood = -np.log(np.max(X * y, -1))\n\telse:\n\t\tlog_likelihood = -np.log(X[range(M), y]) # 找到y对应的那个类别所对应的logit\n\tloss = np.sum(log_likelihood) / M\n\treturn loss", "def nn_cost_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, l):\n Theta_1 = np.reshape(nn_params[0:(hidden_layer_size * (input_layer_size + 1)), ],\n (hidden_layer_size, input_layer_size + 1))\n Theta_2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):, ],\n (num_labels, hidden_layer_size + 1))\n\n m, n = X.shape\n X = np.hstack((np.ones((m, 1)), X))\n\n Z_2 = X.dot(Theta_1.T)\n A_2 = sigmoid(Z_2)\n A_2 = np.hstack((np.ones((m, 1)), A_2))\n\n Z_3 = A_2.dot(Theta_2.T)\n A_3 = sigmoid(Z_3)\n\n Y = np.zeros((m, num_labels))\n for i in range(m):\n Y[i, y[i] - 1] = 1\n\n j = 0.0\n for i in range(m):\n j += np.log(A_3[i, ]).dot(-Y[i, ].T) - np.log(1 - A_3[i, ]).dot(1 - Y[i, ].T)\n j /= m\n\n Theta_1_square = np.square(Theta_1[:, 1:])\n Theta_2_square = np.square(Theta_2[:, 1:])\n reg = 1.0 * l / (2 * m) * (np.sum(Theta_1_square) + np.sum(Theta_2_square))\n j += reg\n\n d_3 = A_3 - Y\n D_2 = d_3.T.dot(A_2)\n\n Z_2 = np.hstack((np.ones((m, 1)), Z_2))\n d_2 = d_3.dot(Theta_2) * sigmoid_gradient(Z_2)\n d_2 = d_2[:, 1:]\n D_1 = d_2.T.dot(X)\n\n Theta_1_grad = 1.0 * D_1 / m\n Theta_1_grad[:, 1:] = Theta_1_grad[:, 1:] + 1.0 * l / m * Theta_1[:, 1:]\n\n Theta_2_grad = 1.0 * D_2 / m\n Theta_2_grad[:, 1:] = Theta_2_grad[:, 1:] + 1.0 * l / m * Theta_2[:, 1:]\n\n grad = np.hstack((Theta_1_grad.ravel(), Theta_2_grad.ravel()))\n\n return j, grad", "def test_sigmoid_cross_entropy(self):\n loss_op = pointwise_losses.SigmoidCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n assert np.isclose(y_pred[0][0].numpy(), 0.54905695, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0., atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true, y_pred)\n assert np.isclose(loss, 0.6905699, atol=1e-5)", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def _ragged_tensor_binary_crossentropy(y_true,\n y_pred,\n from_logits=False,\n label_smoothing=0,\n axis=-1):\n fn = functools.partial(\n binary_crossentropy,\n from_logits=from_logits,\n label_smoothing=label_smoothing,\n axis=axis)\n return _ragged_tensor_apply_loss(fn, y_true, y_pred)" ]
[ "0.77854806", "0.7722801", "0.734179", "0.7316423", "0.7314664", "0.7280549", "0.7246427", "0.72080016", "0.72075146", "0.71928245", "0.71274495", "0.7125283", "0.71109074", "0.710973", "0.7100907", "0.70664674", "0.7050997", "0.7037364", "0.70035136", "0.6952975", "0.6936544", "0.6915349", "0.69133663", "0.6912001", "0.6912001", "0.6845238", "0.68192685", "0.681574", "0.6812151", "0.67867225", "0.6786485", "0.677844", "0.67742574", "0.6770112", "0.67639184", "0.6745749", "0.6735951", "0.67230153", "0.67230153", "0.6707238", "0.6707238", "0.6691799", "0.66855955", "0.6672464", "0.6645651", "0.6643186", "0.664156", "0.66365176", "0.66049165", "0.65993387", "0.6598432", "0.6580958", "0.65780914", "0.65780914", "0.65755343", "0.6567057", "0.65641356", "0.65619165", "0.6555487", "0.6555242", "0.6542394", "0.65380013", "0.653673", "0.6529547", "0.6529547", "0.6505294", "0.6505294", "0.650514", "0.64934623", "0.6488964", "0.6485907", "0.6484038", "0.6474855", "0.6473493", "0.64709324", "0.64672256", "0.6454484", "0.64538467", "0.6439029", "0.6437305", "0.64270526", "0.6419189", "0.641515", "0.6411868", "0.64106613", "0.64104855", "0.63961405", "0.6392943", "0.63868517", "0.6383754", "0.63757074", "0.6374833", "0.63746756", "0.63734573", "0.6373256", "0.6370361", "0.637025", "0.636968", "0.6362219", "0.63588214", "0.6358812" ]
0.0
-1
Calculates the amount of ORE required for a given amount of fuel
def ore_required(recipes, fuel_required=1): stock_required = {'FUEL': fuel_required} is_finished = False while not is_finished: sku, sku_quantity_required = next((sku, sku_quantity_required) for sku, sku_quantity_required in stock_required.items() if sku_quantity_required > 0 and sku != 'ORE') min_batch_size, bom = recipes[sku] batches_required = math.ceil(sku_quantity_required/min_batch_size) for quantity, ingredient in bom: stock_required[ingredient] = stock_required.get( ingredient, 0) + batches_required * quantity stock_required[sku] -= batches_required * min_batch_size is_finished = len([(sku, sku_quantity_required) for sku, sku_quantity_required in stock_required.items() if sku_quantity_required > 0 and sku != 'ORE']) == 0 return stock_required['ORE']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_fuel_simple(self):\n self._fuel_simple = (self.mass // 3) - 2", "def calc_fuel_given_ore(ore, recipes):\n\n upper_bound = None\n lower_bound = 469536 # assume ore needed for 1 fuel is good lower bound\n\n\n while lower_bound + 1 != upper_bound:\n if upper_bound is None:\n guess = lower_bound * 2\n else:\n guess = (upper_bound + lower_bound) // 2\n \n ore_needed = calc_ore_needed(guess, recipes)\n if ore_needed > ore:\n upper_bound = guess\n else:\n lower_bound = guess\n \n return lower_bound", "def _calculate_fuel(self):\n self._fuel = self._calculate_fuel_r(self._mass)", "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def fuel_calc(mass):\n return max((mass / 3) - 2, 0)", "def part2():\n input_list = read_input('input.txt')\n fuel_list = []\n for mass_in in input_list:\n helper = True\n total_fuel = 0\n mass = mass_in\n while helper or mass > 0:\n helper = False\n mass = fuel_required(mass)\n if mass > 0:\n total_fuel += mass\n fuel_list.append(total_fuel)\n return sum(fuel_list)", "def _calculate_fuel_r(self, mass):\n fuel = (mass // 3) - 2\n if fuel <= 0:\n return 0\n\n return (self._calculate_fuel_r(fuel) + fuel)", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def calculate_fuel(mass):\n return math.floor(mass/3) - 2", "def other_opex_remaining(self) -> float:\n return (\n self.income_statement.opex.other_opex\n - self.depot_overhead_cost(self.operations.productivity.avg_num_trucks)\n - self.maintenance_cost(self.operations.productivity.avg_num_trucks)\n - self.fuel_cost(self.operations.productivity.avg_num_trucks)\n )", "def calc_total_fuel(mass):\n fuel = fuel_for_mass(mass)\n\n if fuel < 0:\n return 0\n\n added_fuel = calc_total_fuel(fuel)\n return fuel + added_fuel", "def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)", "def total_fuel_required(mass):\n total = 0\n while mass > 0:\n fuel = fuel_required(mass)\n total += fuel\n mass = fuel\n return total", "def fuel_required(mass):\n return int(floor(mass / 3) - 2)", "def calculate_fuel(self, mass: int) -> int:\n fuel = math.floor(mass/3)-2\n return fuel", "def fuel_cost(mass):\n total_cost = 0\n mass_to_fund = mass\n while True:\n cost = max((mass_to_fund / 3) - 2, 0)\n if cost == 0:\n break\n total_cost += cost\n mass_to_fund = cost\n return total_cost", "def calculate_total_fuel(filename):\n return sum([calculate_fuel_from_mass(mass) for mass in read_mass_from_file(filename)])", "def fuel_required(mass):\n return max(math.floor(mass/3) - 2, 0)", "def run_solution2_simple(self):\n total_fuel_sum = 0\n for n in self.data:\n module_fuel = 0\n base_fuel = n\n while base_fuel > 0:\n base_fuel = self.calculate_fuel(base_fuel)\n module_fuel += base_fuel\n total_fuel_sum += module_fuel\n return total_fuel_sum", "def get_fuel_requirements(mass: int) -> int:\n return int(mass / 3) - 2", "def part1():\n input_list = read_input('input.txt')\n return sum(map(fuel_required, input_list))", "def calculate_fuel_from_mass(mass):\n return int(mass / 3) - 2", "def get_total_fuel_requirements_part2(mass_lst: List[int]) -> int:\n total_fuel = 0\n for mass in mass_lst:\n while True:\n if get_fuel_requirements(mass) <= 0:\n break\n else:\n mass = get_fuel_requirements(mass)\n total_fuel += mass\n return total_fuel", "def checkFuel(self):\n return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving", "def fuel_for_mass(mass):\n return int(mass / 3) - 2", "def day1(part2: bool = False, input_filename='input.txt') -> int:\n mass_lst = load_day1_input(input_filename)\n if part2:\n total_fuel = get_total_fuel_requirements_part2(mass_lst)\n else:\n total_fuel = get_total_fuel_requirements(mass_lst)\n return total_fuel", "def calculate_total_fuel_recursively(filename):\n return sum([calculate_fuel_recursively(mass) for mass in read_mass_from_file(filename)])", "def cargo_fuel(self):\n return self._cargo_fuel", "def estimate_fuel(pos: int) -> int:\r\n positions = np.array([pos for _ in range(n_values)], dtype='int')\r\n distances = np.abs(positions - start_positions).reshape((-1, 1))\r\n fuel = np.sum(np.apply_along_axis(\r\n lambda x: fuel_expenditure[x[0]],\r\n axis=-1,\r\n arr=distances))\r\n return fuel", "def run_solution1(self):\n return reduce(lambda a, b: a + self.calculate_fuel(b), self.data, 0)", "def calculate_fuel_recursively(mass):\n fuel = calculate_fuel_from_mass(mass)\n if fuel < 0:\n return 0\n return calculate_fuel_recursively(fuel) + fuel", "def get_total_fuel_requirements(mass_lst: List[int]) -> int:\n total_fuel = 0\n for mass in mass_lst:\n fuel_requirement = get_fuel_requirements(mass)\n total_fuel += fuel_requirement\n return total_fuel", "def run_solution1_simple(self):\n fuel_sum = 0\n for n in self.data:\n fuel_sum += self.calculate_fuel(n)\n return fuel_sum", "def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)", "def search_fuel(g, ore):\n s_min, s_max = 0, ore\n while True:\n if s_min == s_max:\n return s_min\n middle = s_min + (s_max - s_min) // 2\n ore_needed = min_number_of(g, 'ORE', middle)\n if ore_needed > ore:\n s_max = middle\n else:\n if middle == s_min:\n return middle\n s_min = middle", "def energy(self):\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def iterative_fuel(d):\n accumulator = d\n total = 0\n while True:\n accumulator = math.floor(accumulator / 3) - 2\n if accumulator < 0:\n return total\n total += accumulator", "def efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.shortestpathij(i, j) == None):\n continue\n Temp += 1/self.shortestpathij(i, j)\n \n self.efficiency = 1/(self.supplynum*self.demandnum)*Temp", "def cost(self) -> float:", "def calculate(self) -> float:", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def add_fuel(self, amount):\n if (self.fuel_level + amount <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel to \"+ self.make + \".\")\n else:\n print(\"The tank won't hold that much.\")", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def _calc_rebl_cost(self, ozone, max_cost=7):\n dist = Veh._get_dist_to_all_zones(ozone)[[\"DOLocationID\", \"trip_distance_meter\"]]\n # dist = veh._get_dist_to_all_zones(veh.ozone)[[\"DOLocationID\", \"trip_distance_meter\"]]\n # this is the costliest operation! \n dist[\"costs\"] = ((dist.trip_distance_meter * self.data_obj.FUEL_COST).apply(\n lambda x: np.around(x, 1))) / max_cost\n # dist[\"costs\"] = dist[\"costs\"].apply(lambda x: np.around(x, 1))\n # dist[\"costs\"] /= max_cost\n\n return dist", "def test_function_fuel_sum(data, mode_constrained, space_heating_enduses):\n fuel_in = 0\n fuel_in_solid_fuel = 0\n fuel_in_gas = 0\n fuel_in_elec = 0\n fuel_in_oil = 0\n fuel_in_heat = 0\n fuel_in_hydrogen = 0\n fuel_in_biomass = 0\n\n fuel_heating_all_fueltypes = 0\n fuel_heating_gas = 0\n tot_heating = 0\n #mode_constrained = True #SCRAP\n\n for region in data['rs_fuel_disagg']:\n for enduse in data['rs_fuel_disagg'][region]:\n fuel_in += np.sum(data['rs_fuel_disagg'][region][enduse])\n fuel_in_heat += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses: #Exclude inputs for heating\n tot_heating += np.sum(data['rs_fuel_disagg'][region][enduse])\n #pass\n else:\n fuel_in_elec += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['biomass']])\n \n for region in data['ss_fuel_disagg']:\n for enduse in data['ss_fuel_disagg'][region]:\n for sector in data['ss_fuel_disagg'][region][enduse]:\n fuel_in += np.sum(data['ss_fuel_disagg'][region][enduse][sector])\n fuel_in_heat += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses:\n tot_heating += np.sum(data['ss_fuel_disagg'][region][enduse][sector])\n else:\n fuel_in_elec += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['biomass']])\n \n for region in data['is_fuel_disagg']:\n for enduse in data['is_fuel_disagg'][region]:\n for sector in data['is_fuel_disagg'][region][enduse]:\n fuel_in += np.sum(data['is_fuel_disagg'][region][enduse][sector])\n fuel_in_heat += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses:\n tot_heating += np.sum(data['is_fuel_disagg'][region][enduse][sector])\n else:\n fuel_in_elec += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['biomass']])\n \n return fuel_in, fuel_in_biomass, fuel_in_elec, fuel_in_gas, fuel_in_heat, fuel_in_hydrogen, fuel_in_solid_fuel, fuel_in_oil, tot_heating", "def get_o_energies(mol):\n try:\n ev_to_hartree = 1./convertor(1,'hartree','eV')\n g=hack_parser.Gaussian(mol.calc.log, loglevel=50)\n d=g.parse()\n #lm, hm, lr\n o_component_es = np.array(d.oniomenergies)\n except AttributeError:\n return 0\n\n return (ev_to_hartree * o_component_es * [-1,1,1]).sum(axis=1)", "def fuel_cost(self, update=False):\n if update or self._dfs['fuel_cost'] is None:\n self._dfs['fuel_cost'] = pudl.analysis.mcoe.fuel_cost(self)\n return self._dfs['fuel_cost']", "def calc_price(self):\n price = self.price\n action = self.action\n mortage = 5 # here set mortage multiplier \n\n if action == 'RESIDENTIAL_SALE':\n return price * 12 * mortage\n\n\n if price >= 10000:\n return price * 0.7\n elif price < 10000 & price >= 5000:\n return price * 0.55\n elif price < 5000 & price >= 2800:\n return price * 0.475\n else:\n return price * 0.4", "def calc_energy_and_price(self) -> (float, float):\n\n cost_sum = 0\n energy_sum = 0\n for pump_id in self.pumps:\n pump_energy, pump_cost = self.pumps[pump_id].calculate_energy_and_cost()\n cost_sum += pump_cost\n energy_sum += pump_energy\n\n pump_id.append_index = 0\n\n assert energy_sum >= 0, \"The pumping energy cant be negative!\"\n assert cost_sum >= 0, \"The pumping cost cant be negative!\"\n return energy_sum, cost_sum", "def ComputeEnergyConsumption(self):\r\n pass", "def get_total_supply() -> int:\n return total_supply", "def factorizeLinearSum(eoi400,ei400,eo400,e400,eoi280,ei280,eo280,e280):\n\n var = \"atm/TREFHT\"\n dT = eoi400 - e280\n\n dTCO2 = (2 * (e400 - e280) +\n (eo400 - eo280) +\n (ei400 - ei280) +\n 2 * (eoi400 - eoi280)) / 6\n\n dTtopo = (2 * (eo280 - e280) +\n (eo400 - e400) +\n (eoi280 - ei280) +\n 2 * (eoi400 - ei400)) / 6\n\n dTice = (2 * (ei280 - e280) +\n (ei400 - e400) +\n (eoi280 - eo280) +\n 2 * (eoi400 - eo400)) / 6\n\n return dT, dTCO2, dTtopo, dTice", "def fuel_prediction(self):\n\n return 0", "def recursive_fuel(mass):\n\n if mass <= 0:\n return 0\n else:\n return mass + recursive_fuel(floor(mass / 3) - 2)", "def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "def add_fuel(self, amount):\n if (self.fuel_level + amount\n <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel.\")\n else:\n print(\"The tank won't hold that much.\")", "def emission_factor(sheets, vehicleClass, speedRegime, pollutant):\r\n # get bc from excel. The year is hard coded to 2015\r\n f = sheets[\"Emissiefactoren CAR-VL3.0\"]\r\n idx = f[f.iloc[:,0] == vehicleClass + speedRegime + '2015'].index\r\n if len(idx) == 0:\r\n print(\"EFError: No ef corresponds to vehicle class {} and speed type {}.\".format(vehicleClass, speedRegime))\r\n return 0\r\n return float(f['EF_' + pollutant][idx])", "def MFE_rel(self):\n try:\n return(self.MFE / self.price_open)\n except:\n return", "def transport_cost_per_t(self):\n return safe_divide(self.reseller.operating_expenses(), self.quantity_fieldside)", "def total_cost(self):\n return (self.food_amount + self.local_transport_amount + self.other_expenses +\n self.travel_amount + self.accomodation_amount)", "def sum_fuel(fuel_list):\n return functools.reduce(lambda a, b: a+b, fuel_list)", "def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r", "def calculate_cost(self):\n booking_days, booking_hours = self.calculate_daily_hourly_billable_counts()\n day_cost = booking_days * Decimal(self.vehicle.type.daily_rate)\n hour_cost = booking_hours * Decimal(self.vehicle.type.hourly_rate)\n if hour_cost > self.vehicle.type.daily_rate:\n hour_cost = self.vehicle.type.daily_rate\n return float(day_cost + hour_cost)", "def compute_total(price):\n\n quantity = 20\n return price * quantity", "def get_total(self):\n\n base_price = self.get_base_price()\n\n # Christmas Melons are more x1.5 expensive than other melons\n if self.species == \"Christmas Melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def lot_leverage(self): \n return 20", "def get_total(self):\n\n base_price=5\n if self.species == \"Christmas\":\n base_price=1.5*base_price\n \n total = (1 + self.tax) * self.qty * base_price\n\n if self.order_type==\"international\" and self.qty<10:\n total+=3\n\n return total", "def calculate_purity(self, purity_coefficent_1):\r\n\r\n #acquire inputs\r\n data = self.get_raw_data()\r\n\r\n #initialise variables\r\n total_1 = 0\r\n\r\n #calculate sum of sensor inputs\r\n for i in data:\r\n total_1 += i[0]\r\n\r\n #calculate time elapsed and number of data points per sensor\r\n time_elapsed = data[-1][1]\r\n number_data_points = len(data)\r\n \r\n #calculate purity using simplified sum \r\n purity_1 = (total_1*time_elapsed*purity_coefficent_1)/number_data_points\r\n \r\n #give all the purity values (one for each sensor method - these need to be assigned appropriately when reported)\r\n return(purity_1)", "def _compute_gasto_subtotal(self):\n beneficio = ingreso_subtotal - gasto_subtotal_comercial", "def cost(self):\n\t\treturn self.g + self.h", "def get_total(self):\n # method on the class DomesticMelonOrder\n base_price = 5\n\n if self.species == \"Christmas melons\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total", "def calc_excess_energy (self):\n #~ print sorted(self.cd.keys())\n self.excess_energy = \\\n (self.generation_wind_proposed - self.transmission_losses) * \\\n (self.cd['percent excess energy'] / 100.0)\n #~ print 'self.excess_energy',self.excess_energy", "def CalculateGasEneregyCost(self, dollarsPerDTH = 6.53535):\n\t\tdthUsed = self.building_hvac.GetGasDTH()\n\t\treturn dthUsed * dollarsPerDTH", "def amount_5_conto_energia(self, production, verbose=False):\n en_autocons = round(production*self.perc_autocons,2) \n energia_immessa_in_rete = round(production - en_autocons, 2)\n tot_incent_autocons = round(en_autocons*self.incent_5_autocons,3)\n tot_incent_omnic = round(self.incent_5_omnic*energia_immessa_in_rete,3)\n tot_incent_EU = round(production*self.incent_5_EU,2)\n\n tot_5 = tot_incent_autocons + tot_incent_omnic + tot_incent_EU - self.spese_5\n if verbose:\n print( \"production\" , production)\n print( \"tot_incent_autocons\", tot_incent_autocons)\n print( \"tot_incent_omnic\" , tot_incent_omnic)\n print( \"tot_incent_EU\" , tot_incent_EU)\n print( \"spese\" , spese)\n return tot_5", "def car_cost(self,car,car_stations):\n car_cost = car.capacity # Aqui le deberiamos aumentar el peso\n #de acuerdo a la capacidad del carro y la cantidad de lugares \n #en la estacion\n return car_stations", "def Total_energy(angles):\n energy = 0\n \n for i in range(0,4):\n energy += Single_spin_energy(i,angles)\n return energy", "def amount_to_charge(opportunity):\n amount = float(opportunity.amount)\n if opportunity.agreed_to_pay_fees:\n total = (amount + 0.30) / (1 - 0.022)\n else:\n total = amount\n return quantize(total)", "def somme(self) -> Numeric:\n return query_sum(\n self.offre_set.filter(valide=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def cost(self):\n abs_cost = sum(f['price'] * f['qty'] for f in self.fills)\n return -abs_cost if self.is_ask() else abs_cost", "def get_percent_oxygen(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[1]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'po read error: {err}')\n return -1", "def get_cost(self) -> float:\n return math.e / self.fitness", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def spendFuelToSurvive(self):\n fuelNeeded = self.getLightUpkeep()\n woodNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"WOOD\"]\n )\n woodUsed = min(self.cargo[\"wood\"], woodNeeded)\n fuelNeeded -= woodUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"WOOD\"]\n self.cargo[\"wood\"] -= woodUsed\n if fuelNeeded <= 0:\n return True\n\n coalNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"COAL\"]\n )\n coalUsed = min(self.cargo[\"coal\"], coalNeeded)\n fuelNeeded -= coalUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"COAL\"]\n self.cargo[\"coal\"] -= coalUsed\n\n if fuelNeeded <= 0:\n return True\n\n uraniumNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"URANIUM\"]\n )\n uraniumUsed = min(self.cargo[\"uranium\"], uraniumNeeded)\n fuelNeeded -= uraniumUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"URANIUM\"]\n self.cargo[\"uranium\"] -= uraniumUsed\n\n if fuelNeeded <= 0:\n return True\n\n return fuelNeeded <= 0", "def calc_electric_diesel_reduction (self):\n gen_eff = self.cd[\"diesel generation efficiency\"]\n\n self.electric_diesel_reduction = self.net_generation_wind / gen_eff\n\n electric_diesel = self.generation/gen_eff\n if self.electric_diesel_reduction > electric_diesel:\n self.electric_diesel_reduction = electric_diesel", "def calcular_ocupacion():\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT COUNT(*)\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary=0\"\"\")\n\tnormales = cur.fetchall()\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT COUNT(*)\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary=1\"\"\")\n\tlegendarios = cur.fetchall()\n\t# Calcula la ocupacion como cant_normales * 1 + cant_legendarios * 5\n\tocupado = normales[0][0] + 5 * legendarios[0][0]\n\treturn ocupado", "def total_calories(self, weight=75):\n return weight * 0.862911 * self.total_distance", "def kilometres_available(self):\n return self.fuel / self.litres_per_kilometre", "def convert_fuelconsumption(self, event):\n try:\n #Compare other unit to one unit(liters/100 kilometer)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n if current_value != 0:\n unit_comp = {\"car(2014 US Average)\": 9.260417, \"gallon(UK)/100 miles\": 2.824809, \"gallon(US)/100 miles\": 2.352146, \"kilometer/liter\": 100.0 / (current_value ** 2), \"liters/100 kilometer\": 1.0, \"liters/meter\": 100000.0, \"miles/gallon(UK)\": 282.480936 / (current_value ** 2), \"miles/gallon(US)\": 235.214583 / (current_value ** 2)}\n else: #In case current_value == 0, it will error coz number division by zero.\n unit_comp = {\"car(2014 US Average)\": 1.0, \"gallon(UK)/100 miles\": 1.0, \"gallon(US)/100 miles\": 1.0, \"kilometer/liter\": 1.0, \"liters/100 kilometer\": 1.0, \"liters/meter\": 1.0, \"miles/gallon(UK)\": 1.0, \"miles/gallon(US)\": 1.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def calculate_energy(mol: str, method: str = \"wB97X/6-31g*\") -> unit.Quantity:\n e = psi4.energy(method, molecule=mol) * hartree_to_kJ_mol\n return e * unit.kilojoule_per_mole", "def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total", "def RestEnergy(self):\n return (self.restMass * const.speed_of_light * const.speed_of_light)", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def totalElectricBonusDamage(self):\n return int(self._baseElectricBonusDamage +\n self._equipmentElectricBonusDamage +\n self._statusElectricBonusDamage)", "def steel_total_force(self,strain_dis, newFOS):\r\n\t\ttotal_force = 0.0\r\n\t\tfor steel in self.reinforcement:\r\n\t\t\tstrain = np.interp(steel[0], self.mesh_center,strain_dis)\r\n\t\t\tforce = (self.steel(strain, newFOS)-self.concrete(strain, newFOS))*steel[1]\r\n\t\t\ttotal_force = total_force + force\r\n\t\treturn total_force", "def depot_labor_cost(self, num_trucks: int) -> float:\n depots_needed = np.ceil(num_trucks / self.inputs.trucks_per_depot)\n total_depot_employees = self.inputs.employees_per_depot * depots_needed\n return (\n total_depot_employees\n * self.operations.productivity.working_days_per_year\n * self.operations.driver_cost_per_truck_day()\n )", "def amount_4_conto_energia(self, production, verbose=False):\n en_autocons = round( production * self.perc_autocons) \n energia_immessa_in_rete = production - en_autocons\n tot_incentivo_all_prod = production * self.incentivo_all_prod\n if en_autocons < self.used_external_en:\n tot_incentivo_ssp = self.incentivo_ssp * energia_immessa_in_rete\n else:\n tot_incentivo_ssp = self.incentivo_ssp * energia_immessa_in_rete + \\\n self.eccedenze * (en_autocons - self.used_external_en )\n if verbose:\n print(\"\\nincentivo_ssp: \",self.incentivo_ssp)\n print(\"incentivo_all_prod: \",self.incentivo_all_prod)\n print(\"en_autocons \", en_autocons) \n print(\"energia_immessa_in_rete \", energia_immessa_in_rete)\n print(\"tot_incentivo_all_prod \", tot_incentivo_all_prod) \n print(\"tot_incentivo_ssp \", tot_incentivo_ssp)\n \n \n return tot_incentivo_all_prod + tot_incentivo_ssp - self.spese_4", "def energy(energy_name: str) -> float:\n pass", "def diff_equilibrium_equation(flow, capacity, network, route_idx): \r\n total_flow = np.sum(flow)\r\n if 0 < total_flow < 1:\r\n diff = np.log(flow[route_idx]) + network.beta * (flow[route_idx]**network.theta) - network.b[route_idx] + \\\r\n network.phi * flow[route_idx]/capacity[route_idx] - np.log(1-total_flow) \r\n return diff \r\n else:\r\n print(\"The given flow vector is not feasible.\")\r\n return 0" ]
[ "0.71351874", "0.6992234", "0.6928581", "0.6891486", "0.6802863", "0.67839015", "0.670754", "0.6705683", "0.66584235", "0.6638039", "0.663743", "0.6634727", "0.6633174", "0.65847164", "0.65335196", "0.6518364", "0.64671177", "0.644818", "0.644271", "0.6359727", "0.6337555", "0.62942946", "0.62707514", "0.6170932", "0.6169476", "0.61308503", "0.6125649", "0.6120324", "0.6087149", "0.60806423", "0.60202223", "0.6006361", "0.5992684", "0.5991621", "0.5980555", "0.5973069", "0.5831507", "0.582346", "0.5809953", "0.5799739", "0.5785498", "0.5771725", "0.5762847", "0.57609415", "0.57590693", "0.5740437", "0.57324994", "0.57203674", "0.5718764", "0.5710263", "0.5709262", "0.5703083", "0.5698183", "0.56978637", "0.5697658", "0.5690086", "0.5686111", "0.5672286", "0.5668492", "0.5650727", "0.5643896", "0.56350857", "0.5624375", "0.56224823", "0.5606688", "0.5605563", "0.5602738", "0.56022644", "0.559654", "0.55921614", "0.5580377", "0.55795014", "0.5565814", "0.5564244", "0.55500144", "0.5546688", "0.5544399", "0.5543369", "0.5539634", "0.55303305", "0.552613", "0.55249727", "0.55209", "0.55133086", "0.5512197", "0.5509746", "0.5506135", "0.55040866", "0.54953593", "0.5489461", "0.54873973", "0.5485027", "0.54790926", "0.5476072", "0.54709786", "0.54638135", "0.5458625", "0.5454703", "0.5450213", "0.54339164" ]
0.64869946
16
Ask user for an upper bound and returns a list containing an upper and lower bound
def get_bounds(): lower_bound = 0 upper_bound = input("Please enter a whole number: ") domain = [lower_bound, upper_bound] return domain
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bounds():\n return [0.00], [1.00]", "def AskForNumberRange():\n\n\twhile True:\n\t\t# This OUTER loop will loop forever until the user enters correct integers for\n\t\t# lower and upper bound, such that lobound < hibound.\n\n\t\twhile True:\n\t\t\t# This INNER loop will loop forever until the user enters a valid value for lobound\n\t\t\tprint \"Enter the LOWER bound for the range of numbers, or press enter for default 1:\"\n\t\t\tlobound = SolicitInteger( default_return=1 )\n\t\t\tif lobound != None:\n\t\t\t\tprint \"Ok, lower bound of {}.\".format( lobound )\n\t\t\t\tbreak\n\n\t\twhile True:\n\t\t\t# This INNER loop will loop forever until the user enters a valid value for hibound\n\t\t\tprint \"Enter the UPPER bound for the range of numbers that's greater than the lowerbound, or press enter for default 20:\"\n\t\t\thibound = SolicitInteger( default_return=20 )\n\t\t\tif hibound != None:\n\t\t\t\tprint \"Ok, upper bound of {}.\".format( hibound )\n\t\t\t\tbreak\n\n\t\tif lobound < hibound:\n\t\t\t# We've got what we need! return out of this function!\n\t\t\treturn lobound, hibound\n\n\t\t# Uh oh. If we're still here, the user didn't enter in a correct range\n\t\tprint \"***Invalid input: upper bound must be greater than lower bound***\"\n\t\t# Back to the beginning of the outer loop", "def create_range():\n limit1, limit2 = (int(num) for num in input(\"Please, specify range limits using space: \").split())\n nums_in_range = []\n for _ in range(limit1, limit2 + 1):\n nums_in_range.append(int(_))\n return limit1, limit2, nums_in_range", "def create_bound_for_scipy(lb, ub):\n lb = tuple(map(convert_inf_to_none, lb))\n ub = tuple(map(convert_inf_to_none, ub))\n return list((lb[i], ub[i]) for i in range(len(ub)))", "def get_range(lst):\n return float(max(lst)) - float(min(lst))", "def _parse_bounds(self, bounds):\n try:\n if bounds == None:\n return None\n elif not isinstance(bounds[0], tuple):\n if len(bounds)==2:\n return [tuple(bounds) for i in range(self.params.size)]\n else:\n raise ValueError\n elif len(bounds) == self.params.size:\n if all([len(b)==2 for b in bounds]):\n return bounds\n else:\n raise ValueError\n else:\n raise ValueError\n except:\n raise ValueError(\"'bounds' should be a list of two elements \"\n \"[lb, ub], or a list of the same length as the number of \"\n \"parameters where each element is a tuple (lb, ub)\")", "def upper_bound(self) -> float:\n ...", "def get_bounds(f, lb=0, ub=None):\r\n lb_idx = np.searchsorted(f, lb, 'left')\r\n if ub == None:\r\n ub_idx = len(f)\r\n else:\r\n ub_idx = np.searchsorted(f, ub, 'right')\r\n\r\n return lb_idx, ub_idx", "def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)", "def constrain(inputVal, lower_limit, upper_limit):\n \n if (inputVal < lower_limit):\n return lower_limit\n elif (inputVal > upper_limit):\n return upper_limit\n else:\n return inputVal", "def max_values(self, lower, upper):\n if not self.upper_bounds is None:\n return self.upper_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.upper_bounds = plus.dot(upper) + minus.dot(lower) + self.const\n \n return self.upper_bounds", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def only_bounds(must_print):\n\n #Extracting input.\n input = find_input()\n\n #Running the experiment.\n result = bound.execute_script(input, must_print, True)[2:]\n #result = bound2.execute_script(input, must_print, True)[2:]\n\n #Storing output.\n store_output(result) #result = [P_BOUND, R_BOUND]", "def get_boundaries(outputs: list, inv_choices: str) -> list:\n process_output = []\n\n def process_partition(partition):\n boundaries = set()\n if '-' in partition:\n p = list(map(int, partition.split('-')))\n boundaries.update({p[0] - 1, p[0], p[0] + 1, p[1] - 1, p[1], p[1] + 1})\n\n sort_bounds = sorted(boundaries)\n inv_min_bound = [sort_bounds.pop(0), inv_choices]\n inv_max_bound = [sort_bounds.pop(), inv_choices]\n # Since it is sorted and starting from the lowest, only the lowest value need to be checked.\n if not any(inv_min_bound[0] in sublist for sublist in process_output):\n process_output.extend([inv_min_bound, inv_max_bound])\n # Check if overlapping and if it is, check the answer (last item) and replace it if had the invalid choice.\n for i in sort_bounds:\n if any(i in sublist for sublist in process_output):\n ind = get_index(process_output, i)\n ans = process_output[ind][-1]\n if ans == inv_choices:\n process_output.pop(ind)\n\n process_output.append([i, output_answer])\n\n for part in outputs:\n output_answer = part.pop()\n if len(part) == 1 and isinstance(part, list):\n\n part = ''.join([p for p in part if isinstance(p, str)])\n process_partition(part)\n else:\n logger.critical('LIMITATION: Not able to process more than 1 BVA variable per function!')\n break\n return process_output", "def bounds(self) -> typing.List[float]:\n raise NotImplementedError()", "def return_parameter_bounds(maximum_luminosity=20):\n return [(maximum_luminosity, maximum_luminosity + 3),\n (3 * 10 ** -4, 8 * 10 ** -3), (2., 350), (-8., -0.2),\n (-400, 400)]", "def boundary(quantity, lower, upper):\r\n in_range = False\r\n while not in_range:\r\n if quantity < lower or quantity > upper:\r\n quantity = int(input(\"That is out of range, please try a number between \" + \\\r\n str(lower) + \" and \" + str(upper) + \": \"))\r\n else:\r\n in_range = True\r\n return quantity", "def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds", "def get_range(min, max, intervals, log):\n if not log:\n min = float(min)\n max = float(max)\n difference = max-min\n step_size = difference/intervals\n output = [min + i*step_size for i in range(intervals+1)]\n return output\n else:\n from math import log10 as log\n log_min = log(min)\n log_max = log(max)\n log_difference = log_max - log_min\n step_size = log_difference/intervals\n output = [pow(10, log_min + i*step_size) for i in range(intervals+1)]\n return output", "def bounds(x, xMin, xMax):\n if (x < xMin):\n x = xMin\n elif (x > xMax):\n x = xMax\n return(x)", "def checkRange(x,y,w,h,maxW,maxH):\n if x < 0:\n x = 0\n if y < 0:\n y = 0\n if x + w >= maxW:\n w = maxW-x-1\n if y + h >= maxH:\n h = maxH-y-1\n return [x,y,w,h]", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def get_bounds(self, A: list, c: int) -> (int, int):\r\n\r\n # This implementation uses two binary search algorithms to find\r\n # the upper and lower bound.\r\n # First step is to isolate the upper_bound.\r\n\r\n L = 0\r\n R = len(A)\r\n while L < R:\r\n # Find the middle value\r\n m = math.floor((L + R) / 2)\r\n v = A[m]\r\n\r\n # Check if |A[i] - i| < c:\r\n if abs(v - m) > c:\r\n # This step is important, if we are on a negative number\r\n # We need to move right instead of left.\r\n if v < 0 or (v - m) < 0:\r\n L = m + 1\r\n else:\r\n # Else, we need to move towards the left.\r\n R = m\r\n else:\r\n # If it matches the condition, move the left up because we're\r\n # going towards the lowest number.\r\n L = m + 1\r\n upper_bound = R\r\n\r\n # Now that we have the upper bound, we only need to\r\n # Binary search for the lower bound between index 0 and upper_bound.\r\n L = 0\r\n R = upper_bound\r\n while L < R:\r\n # find the middle\r\n m = math.floor((L + R) / 2)\r\n if abs(A[m] - m) > c:\r\n # If it's greater, move the left up.\r\n L = m + 1\r\n else:\r\n # Else, move the right down.\r\n R = m\r\n\r\n # Finally we have the lower bound.\r\n lower_bound = L\r\n\r\n # Return the lower bound and the upper bound index\r\n # Note the -1 because the upper bound will give the\r\n # size of the array in worst case.\r\n return lower_bound, upper_bound - 1", "def input_bounds(self):\n return self._min_input, self._max_input", "def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))", "def _process_bounds(self, param_name, bounds):\n l = self.n_elements_one_param(param_name)\n if(ut.is_iter(bounds)):\n if(len(bounds) == l):\n if(l!=2):\n res = [self._process_individual_bound(b) for b in bounds]\n else:\n try:\n res = [self._process_individual_bound(b) for b in bounds]\n except:\n res_one = self._process_individual_bound(bounds)\n res = [res_one for _ in range(l)]\n\n elif(len(bounds) == 2):\n # slightly tricky as l can be = 2\n res_one = self._process_individual_bound(bounds)\n res = [res_one for _ in range(l)]\n\n else:\n raise ValueError('Bounds length (%s) is not recognized. '% (len(bounds)))\n else:\n res_one = self._process_individual_bound(bounds)\n res = [res_one for _ in range(l)]\n \n return res", "def constraint_clause_in_range_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if isinstance(values, list):\n # Make sure list has exactly two elements\n if len(values) == 2:\n lower, upper = values\n the_type = presentation._get_type(context)\n\n # Lower bound must be coercible\n lower = coerce_value(context, presentation, the_type, None, None, lower, field.name)\n\n if upper != 'UNBOUNDED':\n # Upper bound be coercible\n upper = coerce_value(context, presentation, the_type, None, None, upper, field.name)\n\n # Second \"in_range\" value must be greater or equal than first\n if (lower is not None) and (upper is not None) and (lower >= upper):\n context.validation.report(\n u'upper bound of \"in_range\" constraint is not greater than the lower bound'\n u' in \"{0}\": {1} <= {2}'\n .format(presentation._container._fullname, safe_repr(lower),\n safe_repr(upper)),\n locator=presentation._locator, level=Issue.FIELD)\n else:\n context.validation.report(\n u'constraint \"{0}\" is not a list of exactly 2 elements in \"{1}\": {2}'\n .format(field.name, presentation._fullname, safe_repr(values)),\n locator=presentation._get_child_locator(field.name), level=Issue.FIELD)", "def simplebounds(cls, val, lower, upper):\n if val < lower:\n val = lower\n if val > upper:\n val = upper\n return val", "def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)", "def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))", "def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up", "def eval_strategy_range(make_strategy, lower_bound, upper_bound):\r\n best_value, best_win_rate = 0, 0\r\n value = lower_bound\r\n while value <= upper_bound:\r\n strategy = make_strategy(value)\r\n win_rate = compare_strategies(strategy)\r\n print('Win rate against the baseline using', value, 'value:', win_rate)\r\n if win_rate > best_win_rate:\r\n best_win_rate, best_value = win_rate, value\r\n value += 1\r\n return best_value", "def getx(v, lb, ub, i, B):\r\n x = lb + np.multiply((ub - lb), v)\r\n x[i] = B - (x.sum() - x[i])\r\n # Test if variable x[i] is within the bounds\r\n if x[i] <= ub[i] and x[i] >= lb[i]:\r\n return x\r\n else:\r\n return np.array([])", "def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()", "def compute_pair_bounds(self, edges, pair):\n lower_bounds =[]\n upper_bounds = []\n for arc in edges:\n l_e = self.arc_info[arc][\"lower_bound\"]\n u_e = self.arc_info[arc][\"upper_bound\"]\n f_mij = self.compute_f_mij(arc, pair)\n lower_bounds.append(l_e - f_mij)\n upper_bounds.append(u_e - f_mij)\n lb = max(lower_bounds + [0])\n # in case no edges in here, make max of 5,000\n if len(upper_bounds) == 0:\n i = pair[0]\n j = pair[1]\n print(\"Path i ({}): {}\".format(i, self.paths[i]))\n print(\"Path j ({}): {}\".format(j, self.paths[j]))\n ub = min(upper_bounds + [5000])\n #print(\"lower bounds: {}\".format(lower_bounds))\n #print(\"upper bounds: {}\".format(upper_bounds))\n return(lb, ub)", "def get_l_n_u_ramping(ppc, lower_bound, upper_bound, Nhrs=2):\n \"\"\"lower_bound must be neagtiva, upper_bound must be positive\"\"\"\n gens_hrs = ppc['gen'][:, 0]\n gens_hrs = np.sort(gens_hrs)\n \n n_buses = set_n_buses(ppc, Nhrs)\n n_gens = len(gens_hrs) // 2\n l = np.zeros(n_buses)\n u = np.zeros(n_buses)\n for i in range(len(l)):\n if (i+1) in gens_hrs:\n l[i] = lower_bound\n u[i] = upper_bound\n else:\n l[i] = -np.inf\n u[i] = np.inf\n return l, u", "def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res", "def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)", "def get_range(start, stop):\n \n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n\n return nums", "def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))", "def lower_bound(self) -> float:\n ...", "def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound", "def range_around(goal_val: int, spread: int, min_val: int = 0, max_val: int = math.inf):\n lower = max(min_val, goal_val - spread)\n upper = min(max_val, goal_val + spread)\n return (lower, upper)", "def get_range(n0: int, n1: int, ns: int) -> List[int]:\n # Return a range as a list\n def lrange(a, b, n=1) -> List[int]:\n return list(range(a, b, n))\n # Get the in-bounds part of the range\n n_range = lrange(max(0, n0), min(ns, n1))\n # Handle out-of-bounds indices by reflection across boundaries\n if n0 < 0:\n # Underflow\n n_range = lrange(-n0, 0, -1) + n_range\n if n1 > ns:\n # Overflow\n n_range = n_range + lrange(ns - 1, 2 * ns - n1 - 1, -1)\n\n return n_range", "def location_bounds(glimpse_w, input_w):\n offset = float(glimpse_w) / input_w\n lower = (-1 + offset)\n upper = (1 - offset)\n\n assert lower >= -1 and lower <= 1, 'lower must be in (-1,1), is {}'.format(lower)\n assert upper >= -1 and upper <= 1, 'upper must be in (-1,1), is {}'.format(upper)\n\n return lower, upper", "def get_range(start, stop):\n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n print(nums)", "def RestrictionRangeBound(self, compsIdList, lowerBound, upperBound):\n for i in range(len(compsIdList)): compsIdList[i] -= 1\n if self.solverTypeOptimize:\n self.solver.add(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) >= lowerBound)\n else:\n self.solver.assert_and_track(\n PbGe(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]),\n lowerBound), \"LabelRangeBound: \" + str(self.labelIdx))\n self.labelIdx += 1\n if self.solverTypeOptimize:\n PbLe(self.solver.add(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]),\n upperBound))\n else:\n self.solver.assert_and_track(\n sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) <= upperBound, \"LabelRangeBound: \" + str(self.labelIdx))\n self.labelIdx += 1", "def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]", "def index_interval(lb: int, ub: int, nbits=None, graycode=False) -> List[int]:\n if graycode:\n assert nbits is not None\n else:\n assert lb <= ub\n\n window = []\n i = lb\n while True:\n window.append(i)\n if i == ub:\n break\n i = increment_index(i, 1, nbits, graycode)\n return window", "def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox", "def _bucket_boundaries(self, max_length, min_length=8, length_bucket_step=1.1):\n assert min_length <= max_length\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def checkRange(currentNumRange: tuple, currentLevel: int):\n\n\tlowerNumber, higherNumber = currentNumRange[0], currentNumRange[1]\n\tmid = (higherNumber + lowerNumber) // 2\n\tans = getAnswer(f\"Does your number is greater than {mid}?\", mid)\n\n\tif ans:\n\t\tlowerNumber = mid\n\telse:\n\t\thigherNumber = mid\n\n\n\treturn (lowerNumber, higherNumber)", "def is_in_range(value: float, lower_bound: float, upper_bound: float, err_string: str) -> None:\n if value < lower_bound or value > upper_bound:\n print('\\n' + err_string + '\\n')\n sys.exit(1)", "def _convert_bound(value, lower_bound, upper_bound):\n # Converts value to 16 bit two's complement integer via bitwise.\n most_sig_bit = 0x8000\n\n # Gets the two least significant bits\n convert_val = value & _BYTE << _BYTE_SIZE | value & _BYTE\n # Extends the most significant bit if it is a 1. This is done by\n # carrying out the most significant bit.\n if bool(convert_val & most_sig_bit):\n convert_val |= ~(_BYTE << _BYTE_SIZE | _BYTE)\n\n # Bounds the converted value\n if convert_val > upper_bound:\n return upper_bound\n elif convert_val < lower_bound:\n return lower_bound\n return convert_val", "def _break_list(in_list, index, parameter):\n array = np.array([parameters[index] for parameters in in_list],\n dtype=np.float64)\n upper_array = array[array >= parameter]\n lower_array = array[array <= parameter]\n\n if upper_array.size == 0:\n raise exceptions.ParameterOutOfBounds(\n \"Parameter '{0}' exceeds data. Max allowed={1}, \"\n \"entered={2}.\".format(_PARAM_NAMES[index], array.max(), parameter))\n if lower_array.size == 0:\n raise exceptions.ParameterOutOfBounds(\n \"Parameter '{0}' exceeds data. Min allowed={1}, \"\n \"entered={2}.\".format(_PARAM_NAMES[index], array.min(), parameter))\n\n upper = upper_array.min()\n lower = lower_array.max()\n upper_list = []\n lower_list = []\n\n for i, parameters in enumerate(in_list):\n if array[i] >= parameter and array[i] <= upper:\n upper_list.append(parameters)\n if array[i] >= lower and array[i] <= parameter:\n lower_list.append(parameters)\n\n return upper_list, lower_list", "def rvsWithinbounds(self,lowerBound,upperBound):\n CDFupper = self._distribution.cdf(upperBound)\n CDFlower = self._distribution.cdf(lowerBound)\n randResult = self.rvsWithinCDFbounds(CDFlower,CDFupper)\n return randResult", "def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('bucket %d out of range' % bucket)\n if bucket == self.total_buckets - 1:\n return (self._lower_bounds[bucket], float('Inf'))\n return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])", "def _builtin_between(low, high, value, **k):\n mode = check_mode((low, high, value), ['iii', 'iiv'], functor='between', **k)\n low_v = int(low)\n high_v = int(high)\n if mode == 0: # Check\n value_v = int(value)\n if low_v <= value_v <= high_v:\n return [(low, high, value)]\n else: # Enumerate\n results = []\n for value_v in range(low_v, high_v + 1):\n results.append((low, high, Constant(value_v)))\n return results", "def range(x):\n try:\n return (min(min(y) for y in x), max(max(y) for y in x))\n except ValueError:\n return (None, None)", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def _check_range(range_):\n try:\n if not isinstance(range_, list):\n range_ = list(range_)\n min_, max_ = range_\n except (ValueError, TypeError):\n raise TypeError(\"each range in ising_linear_ranges should be a list of length 2.\")\n if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:\n raise ValueError((\"each range in ising_linear_ranges should be a 2-tuple \"\n \"(min, max) where min <= max\"))\n return range_", "def get_upper_bound(self):\n return self._upper_bound", "def bounds(self): # -> tuple[()]:\n ...", "def get_bounds(self):\n log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_bounds()\")\n\n # TODO: Move the operation out of here.\n\n xmin = Inf\n ymin = Inf\n xmax = -Inf\n ymax = -Inf\n\n # for obj in self.object_list:\n for obj in self.get_list():\n try:\n gxmin, gymin, gxmax, gymax = obj.bounds()\n xmin = min([xmin, gxmin])\n ymin = min([ymin, gymin])\n xmax = max([xmax, gxmax])\n ymax = max([ymax, gymax])\n except Exception as e:\n log.warning(\"DEV WARNING: Tried to get bounds of empty geometry. %s\" % str(e))\n\n return [xmin, ymin, xmax, ymax]", "def get_l_n_u_inegral(ppc, lower_bound, upper_bound, Nhrs=2):\n \"\"\"either lower and upper bound must be positive\"\"\"\n gens_hrs = ppc['gen'][:, 0]\n gens_hrs = np.sort(gens_hrs)\n \n n_buses = set_n_buses(ppc, Nhrs)\n n_gens = len(gens_hrs) // 2 \n l = np.zeros(n_buses)\n u = np.zeros(n_buses)\n for i in range(len(l)):\n if (i+1) in gens_hrs:\n l[i] = lower_bound\n u[i] = upper_bound\n else:\n l[i] = -np.inf\n u[i] = np.inf\n return l, u", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def bcRange(self):\n\t\treturn fabs(self.Upper - self.Lower)", "def range(self) -> ty.Tuple[float, float]:\r\n ...", "def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))", "def getInputDeviceRange(*args, maxValue: bool=True, minValue: bool=True,\n **kwargs)->List[float]:\n pass", "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def _compute_gershgorin_bounds(main_model):\n model_hessian = main_model.square_terms\n\n hessian_diag = np.diag(model_hessian)\n hessian_diag_abs = np.abs(hessian_diag)\n hessian_row_sums = np.sum(np.abs(model_hessian), axis=1)\n\n lower_gershgorin = np.min(hessian_diag + hessian_diag_abs - hessian_row_sums)\n upper_gershgorin = np.max(hessian_diag - hessian_diag_abs + hessian_row_sums)\n\n return lower_gershgorin, upper_gershgorin", "def best_modifier_in_range(phrases, lower: float, upper: float):\n\n ok_phrases = list(filter(lambda w: w.possibility > lower and w.possibility < upper, phrases))\n best_phrases = sorted(ok_phrases, key=lambda w: w.confidence)\n if best_phrases:\n return best_phrases[-1]\n return None", "def get_number():\n valid_input = False\n while not valid_input:\n try:\n user_num = int(input(\"Enter a number between {} and {}: \".format(LOWER_BOUND, UPPER_BOUND)))\n if LOWER_BOUND <= user_num <= UPPER_BOUND:\n return user_num\n except ValueError:\n pass\n print(\"That is not a valid number !\")", "def calcBRange(c,n=10):\n \n bMin = -abs(c)/2.0 \n bMax = abs(c)/2.0 \n return np.linspace(bMin,bMax,n)", "def _initialize_bounds(problem, bounds, get_bound, set_bound):\n for constraint in problem.constraints:\n root_expr = constraint.root_expr\n expr_bounds = Interval(constraint.lower_bound, constraint.upper_bound)\n if root_expr not in bounds:\n set_bound(root_expr, expr_bounds)\n else:\n existing_bounds = get_bound(root_expr)\n new_bounds = existing_bounds.intersect(expr_bounds)\n set_bound(root_expr, new_bounds)", "def calculate_ranges(a, b):\n try:\n ranges = list(range(0, a, a//b))\n if ranges[-1] != a:\n ranges.append(a)\n return ranges\n except ValueError:\n return [0, a]", "def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds", "def midrange(lo, hi, mid=0, scale=1.0):\n return [min(mid, (mid + lo) / (1.0 + scale)),\n max(mid, (mid + hi) / (1.0 + scale))]", "def get_abnormal_price_values(ls_ls_prices, lower_bound, upper_bound):\n ls_abnormal_prices = []\n for indiv_ind, ls_prices in enumerate(ls_ls_prices):\n day_ind = 0\n while day_ind < len(ls_prices):\n if (ls_prices[day_ind] < lower_bound) or (ls_prices[day_ind] > upper_bound):\n relative_day = 0\n ls_day_inds = []\n while (day_ind + relative_day < len(ls_prices)) and\\\n (ls_prices[day_ind] == ls_prices[day_ind + relative_day]):\n ls_day_inds.append(day_ind + relative_day)\n relative_day += 1\n ls_abnormal_prices.append((indiv_ind, ls_prices[day_ind], ls_day_inds))\n day_ind += relative_day\n else:\n day_ind += 1\n return ls_abnormal_prices", "def get_input_voltage_ranges(self):\r\n bufsize = 32\r\n range_list_type = cfloat64 * bufsize\r\n range_list = range_list_type()\r\n NIDAQ_dll.DAQmxGetDevAIVoltageRngs(self.dev_id.encode('ascii'),\r\n ctypes.byref(range_list), uInt32(bufsize))\r\n range_list = list(range_list)\r\n range_values_n = range_list.index(0.0)\r\n range_n = range_values_n / 2\r\n return_list = []\r\n for idx in range(range_n):\r\n return_list.append([range_list[2*idx],\r\n range_list[(2*idx)+1]]) \r\n return return_list", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def get_rangelist(start, end, count):\n if start is not None and end is not None:\n if count != 0 and not (start == 0 and count < end):\n start = int(start)\n end = int(end)\n cnt = end - start\n rangelist = []\n div = int(start) / count + 1\n multiple = round(div, 0)\n start_range = int(count * multiple)\n n = 1\n for itr in range(0, start_range + count, (end - start)):\n if itr < count:\n rangelist.append([itr, itr + cnt, n])\n n += 1\n return rangelist\n return []", "def bounds(lines):\n min_x = bench_util.Max\n min_y = bench_util.Max\n max_x = bench_util.Min\n max_y = bench_util.Min\n \n for line in lines.itervalues():\n for x, y in line:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n \n return ((min_x, min_y), (max_x, max_y))", "def _get_shear_vals(lower_bound: float,\n upper_bound: float,\n step: float) -> Tuple[float]:\n return tuple(np.arange(lower_bound, upper_bound + step, step))", "def _bi_range(start, end):\n if start == end:\n return (start,)\n\n elif end < start:\n return reversed(range(end, start + 1))\n\n else:\n return range(start, end + 1)", "def map_bound(value, in_low, in_high, out_low, out_high):\n result = None\n\n if value <= in_low:\n result = out_low\n else:\n if value >= in_high:\n result = out_high\n else:\n # http://stackoverflow.com/a/5650012/574981\n result = out_low + (\n (out_high - out_low) * (value - in_low) / (in_high - in_low)\n )\n return result", "def constrain(amt, low, high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt", "def constrain(amt,low,high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest,lowest)", "def map_range(x, in_min, in_max, out_min, out_max):\n mapped = (x-in_min) * (out_max - out_min) / (in_max-in_min) + out_min\n if out_min <= out_max:\n return max(min(mapped, out_max), out_min)\n return min(max(mapped, out_max), out_min)", "def range() -> List[int]:\n pass", "def GetInteger(prompt=\"Please enter a number:\",\n lowerbound=0, upperbound=99,\n smaller_prompt=\"It's Smaller, please re-enter:\",\n bigger_prompt=\"It's Bigger, please re-enter:\",\n not_int_prompt=\"You did not enter a number, please re-enter:\"):\n user_input = input(prompt)\n\n def InternalFunc1(num):\n while True:\n try:\n return int(num)\n except ValueError:\n num = input(not_int_prompt)\n result = InternalFunc1(user_input)\n\n while not lowerbound <= result <= upperbound:\n if result < lowerbound:\n user_input = input(smaller_prompt)\n result = InternalFunc1(user_input)\n if upperbound < result:\n user_input = input(bigger_prompt)\n result = InternalFunc1(user_input)\n return result", "def get_coordinates():\n\tallowed_range = [0,1,2]\n\trow = int(input(\"Enter row: \")) - 1\n\tcol = int(input(\"Enter column: \")) - 1", "def simple_bounds(child, lb, ub):\n assert len(lb) == len(ub), 'Lower and upper bounds have different #s of design variables in simple_bounds function.'\n assert len(lb) == len(child), 'Bounds and child have different #s of design variables in simple_bounds function.'\n for i in range(0, len(child), 1):\n if child[i] < lb[i]:\n child[i] = lb[i]\n\n for i in range(0, len(child), 1):\n if child[i] > ub[i]:\n child[i] = ub[i]\n\n return child", "def input_bounds(self):\n return self.__input_bounds", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ..." ]
[ "0.682871", "0.6680871", "0.6663449", "0.6461233", "0.6457602", "0.6383037", "0.6375019", "0.63575613", "0.6352746", "0.6348871", "0.6233797", "0.6187964", "0.6147755", "0.613873", "0.61301994", "0.6121017", "0.6093464", "0.6086115", "0.6073772", "0.6073625", "0.6072353", "0.6051004", "0.60436636", "0.6038423", "0.6015893", "0.60152787", "0.6005109", "0.6004818", "0.59935975", "0.59680074", "0.59597397", "0.59597355", "0.5948313", "0.5934267", "0.59282994", "0.59157217", "0.5911773", "0.591029", "0.59074455", "0.5892934", "0.589271", "0.5888605", "0.588318", "0.58828354", "0.58637977", "0.58578306", "0.5857821", "0.5854145", "0.584161", "0.5832221", "0.5821438", "0.5817561", "0.58172303", "0.5809609", "0.5804843", "0.58023506", "0.57986593", "0.577444", "0.5773863", "0.5772287", "0.57581294", "0.5754103", "0.57381743", "0.5738119", "0.57347894", "0.5732152", "0.57240206", "0.5721552", "0.571866", "0.5712589", "0.5702661", "0.56919557", "0.5688643", "0.5688614", "0.5686495", "0.56838685", "0.56817746", "0.5680271", "0.56685483", "0.56676584", "0.5666241", "0.56599164", "0.56595623", "0.5658819", "0.5656847", "0.5653576", "0.56519806", "0.5645542", "0.5644294", "0.56322086", "0.56251246", "0.56205714", "0.5612417", "0.56113887", "0.56059164", "0.55932933", "0.5591352", "0.5589388", "0.5589388", "0.5589388" ]
0.8307625
0
Test that noun_chunks raises Value Error for 'fi' language if Doc is not parsed. To check this test, we're constructing a Doc with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run.
def test_noun_chunks_is_parsed(fi_tokenizer): doc = fi_tokenizer("Tämä on testi") with pytest.raises(ValueError): list(doc.noun_chunks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_noun_chunks_is_parsed_fr(fr_tokenizer):\n doc = fr_tokenizer(\"trouver des travaux antérieurs\")\n with pytest.raises(ValueError):\n list(doc.noun_chunks)", "def test_parse_simple_nonmember(self):\n lexed = [\n Token(\n value=\"qet\",\n token_type=KT.UNKNOWN,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=KT.NOUN,\n line_number=0,\n ),\n ]\n self.assertFalse(parse(SimpleKlingonGrammar, lexed))", "def test_no_ngrams():\n tokenizer = Tokenizer(quadgram_freq=2)\n X = tokenizer.transform([[\"a b c d\"]])\n assert X[\"corpus\"][0] == [\"a\", \"b\", \"c\", \"d\"]\n assert tokenizer.quadgrams is None", "def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)", "def test_issue401(EN, text, i):\n tokens = EN(text)\n assert tokens[i].lemma_ != \"'\"", "def test_extract_incorrect_embeddings():\n with pytest.raises(ValueError):\n model = BERTopic(language=\"Unknown language\")\n model._extract_embeddings([\"Some document\"])", "def noun_phrase_chunking(part_of_speech_data):\n\n grammar = r\"\"\"\n NP: {<DT|JJ|NN.*>+}\n PP: {<IN><NP>}\n VP: {<VB.*><NP|PP|CLAUSE>+$}\n CLAUSE: {<NP><VP>}\n \"\"\"\n\n grammar2 = r\"\"\"\n NP: {<DT|NN>+} # Chunk sequences of NN and DT\n {<DT><JJ><NN>} # Chunk det+adj+noun\n \"\"\"\n\n return RegexpParser(grammar).parse(part_of_speech_data).draw()", "def countChunks(args,inputFile):\r\n boundary = \"-X-\" # sentence boundary\r\n # delimiter = args.delimiter\r\n # raw = args.raw\r\n # oTag = args.oTag\r\n #inputFile=args.inputFile\r\n\r\n delimiter = args[\"delimiter\"]\r\n raw = args[\"raw\"]\r\n oTag = args[\"oTag\"]\r\n\r\n fileIterator=open(inputFile)\r\n\r\n correctChunk = defaultdict(int) # number of correctly identified chunks\r\n foundCorrect = defaultdict(int) # number of chunks in corpus per type\r\n foundGuessed = defaultdict(int) # number of identified chunks per type\r\n\r\n tokenCounter = 0 # token counter (ignores sentence breaks)\r\n correctTags = 0 # number of correct chunk tags\r\n\r\n lastType = None # temporary storage for detecting duplicates\r\n inCorrect = False # currently processed chunk is correct until now\r\n lastCorrect, lastCorrectType = \"O\", None # previous chunk tag in corpus\r\n lastGuessed, lastGuessedType = \"O\", None # previously identified chunk tag\r\n\r\n for line in fileIterator:\r\n # each non-empty line must contain >= 3 columns\r\n features = line.strip().split(delimiter)\r\n #print(features)\r\n if not features or features[0] == boundary:\r\n features = [boundary, \"O\", \"O\"]\r\n elif len(features) < 3:\r\n raise IOError(\"conlleval: unexpected number of features in line %s\\n\" % line)\r\n\r\n # extract tags from last 2 columns\r\n guessed, guessedType = splitTag(features[-1], oTag=oTag, raw=raw)\r\n correct, correctType = splitTag(features[-2], oTag=oTag, raw=raw)\r\n\r\n # 1999-06-26 sentence breaks should always be counted as out of chunk\r\n firstItem = features[0]\r\n if firstItem == boundary:\r\n guessed, guessedType = \"O\", None\r\n\r\n # decide whether current chunk is correct until now\r\n if inCorrect:\r\n endOfGuessed = endOfChunk(lastCorrect, correct, lastCorrectType, correctType)\r\n endOfCorrect = endOfChunk(lastGuessed, guessed, lastGuessedType, guessedType)\r\n if (endOfGuessed and endOfCorrect and lastGuessedType == lastCorrectType):\r\n inCorrect = False\r\n correctChunk[lastCorrectType] += 1\r\n elif ( endOfGuessed != endOfCorrect or guessedType != correctType):\r\n inCorrect = False\r\n\r\n startOfGuessed = startOfChunk(lastGuessed, guessed, lastGuessedType, guessedType)\r\n startOfCorrect = startOfChunk(lastCorrect, correct, lastCorrectType, correctType)\r\n if (startOfCorrect and startOfGuessed and guessedType == correctType):\r\n inCorrect = True\r\n if startOfCorrect:\r\n foundCorrect[correctType] += 1\r\n if startOfGuessed:\r\n foundGuessed[guessedType] += 1\r\n\r\n if firstItem != boundary:\r\n if correct == guessed and guessedType == correctType:\r\n correctTags += 1\r\n tokenCounter += 1\r\n\r\n lastGuessed, lastGuessedType = guessed, guessedType\r\n lastCorrect, lastCorrectType = correct, correctType\r\n\r\n if inCorrect:\r\n correctChunk[lastCorrectType] += 1\r\n\r\n return correctChunk, foundGuessed, foundCorrect, correctTags, tokenCounter", "def test_parse_empty_is_never_part_of_grammar(self):\n self.assertFalse(parse(SimpleKlingonGrammar, []))", "def test_narrative_data_missing(self, config_handler):\n spec = Spec.from_dict(\n {\"name\": \"homogeneity_coefficient\", \"unit\": \"percentage\", \"dtype\": \"float\"}\n )\n with raises(SmifDataNotFoundError):\n config_handler.read_narrative_variant_data(\"does not exist\", spec)", "def test_forbidden_words(self) -> None:\n pad_open: bool = False\n words: List[Word] = self.report.get_words()\n forbidden_words: List[Word] = []\n last_error: bool = False\n\n for word in words:\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n if (word.text in self.rules.forbidden_words) or any(\n [b in self.rules.forbidden_words for b in word.baseform]\n ):\n forbidden_words.append(word)\n last_error = True\n continue\n if last_error:\n last_error = False\n combo = \" \".join([w.text for w in forbidden_words])\n start, _ = self.report.get_word_postion(forbidden_words[0])\n _, end = self.report.get_word_postion(forbidden_words[-1])\n self.add_error(\n f\"Ordet {combo} får endast förekomma i citat.\", position=(start,end)\n )", "def _parse_tokens(chunk, format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n tokens = []\n # Only process <chunk> and <chink> elements, \n # text nodes in between return an empty list.\n if not (chunk.tag == XML_CHUNK or chunk.tag == XML_CHINK):\n return []\n type = chunk.get(XML_TYPE, \"O\")\n if type == \"PNP\":\n # For, <chunk type=\"PNP\">, recurse all the child chunks inside the PNP.\n for ch in chunk:\n tokens.extend(_parse_tokens(ch, format))\n # Tag each of them as part of the PNP.\n if PNP in format:\n i = format.index(PNP)\n for j, token in enumerate(tokens):\n token[i] = (j==0 and \"B-\" or \"I-\") + \"PNP\"\n # Store attachments so we can construct anchor id's in parse_string().\n # This has to be done at the end, when all the chunks have been found.\n a = chunk.get(XML_OF).split(_UID_SEPARATOR)[-1]\n if a:\n _attachments.setdefault(a, [])\n _attachments[a].append(tokens)\n return tokens\n # For <chunk type-\"VP\" id=\"1\">, the relation is VP-1.\n # For <chunk type=\"NP\" relation=\"OBJ\" of=\"1\">, the relation is NP-OBJ-1.\n relation = _parse_relation(chunk, type)\n # Process all of the <word> elements in the chunk, for example:\n # <word type=\"NN\" lemma=\"pizza\">pizza</word> => [pizza, NN, I-NP, O, NP-OBJ-1, O, pizza]\n for word in filter(lambda n: n.tag == XML_WORD, chunk):\n tokens.append(_parse_token(word, chunk=type, relation=relation, format=format))\n # Add the IOB chunk tags:\n # words at the start of a chunk are marked with B-, words inside with I-.\n if CHUNK in format:\n i = format.index(CHUNK)\n for j, token in enumerate(tokens):\n token[i] = token[i] != \"O\" and ((j==0 and \"B-\" or \"I-\") + token[i]) or \"O\"\n # The chunk can be the anchor of one or more PNP chunks.\n # Store anchors so we can construct anchor id's in parse_string().\n a = chunk.get(XML_ANCHOR, \"\").split(_UID_SEPARATOR)[-1]\n if a: \n _anchors[a] = tokens\n return tokens", "def test_text_found_in_single_slide(collected_seg_motif):\n slide_not_found, error_records_future, error_records_seg, skip_test = collected_seg_motif\n if skip_test:\n pytest.skip(\"File does not have nti_data\")\n #print(\"error_records_future : \", error_records_future)\n print(\"error_records_seg : \", error_records_seg)\n if len(error_records_seg) > 0:\n print(\"error count : \", len(error_records_seg))\n print(\"error slides : \", (error_records_seg))\n pytest.fail(\n \"errors found. Count of missing rem slides in segments %s , count of remediation slides in nore than 1 slide %s\"\n % (slide_not_found, error_records_seg))", "def test_unaffected(self, entries, errors, __):\n # Note that this checks that the plugin did not insert any Open\n # directive by itself where not required. This is correct behaviour.\n self.assertEqualEntries(self.test_unaffected.__input__, entries)", "def test_word_not_found_in_file(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/word_not_found)\n indexer.index(_path_prefix + 'word_not_found')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"hello\")\n print(result)\n self.assertTrue(result == {})\n\n result = indexer.search(\"world\")\n self.assertTrue(result == {})", "def _get_vals(self, doc: Doc) -> Iterator[Span]:\n\n for ngram in doc._.ngrams:\n if ngram.text.isalpha() and OOV_PHONEMES not in ngram._.phonemes:\n yield ngram", "def is_noun(tag):\r\n return tag in ['NN', 'NNS', 'NNP', 'NNPS']", "def _utf8_chunk_may_contain_inf_or_nan(chunk: pyarrow.Array) -> bool:\n _, offsets_buf, data_buf = chunk.buffers()\n\n offsets = array.array(\"i\")\n assert offsets.itemsize == 4\n offsets.frombytes(offsets_buf)\n if sys.byteorder != \"little\":\n offsets.byteswap() # pyarrow is little-endian\n\n offset0 = offsets[chunk.offset]\n offsetN = offsets[chunk.offset + len(chunk)] # len(offsets) == 1 + len(chunk)\n\n b = data_buf[offset0:offsetN].to_pybytes()\n return SCARY_BYTE_REGEX.search(b) is not None", "def test_unparse_invalid_examples(self):\n for description, example in INVALID_EXAMPLES.items():\n for mode in MODES:\n if example['trees'][mode] is None:\n continue\n with self.assertRaises(SyntaxError, msg=(description, mode)) as raised:\n typed_astunparse.unparse(example['trees'][mode])\n self.assertIn('PEP 526', str(raised.exception), msg=(description, mode))\n\n with self.assertRaises(SyntaxError, msg=(description, mode)):\n typed_ast.ast3.parse(source=example['code'], mode=mode)", "def test_ngrams_valence_processing():\n language = \"ita\"\n letter = \"z\"\n\n valence_data = load_valence_data(language)\n\n temp_directory = \"{}/googlebooksdata\".format(PACKAGE_LOCATION)\n os.makedirs(temp_directory, exist_ok=True)\n\n ngrams_fpath = download_nrgams_file(temp_directory, language, letter)\n\n ngrams_valence_scores = merge_ngrams_and_ANEW_data(valence_data, ngrams_fpath)\n\n zucchero_data = ngrams_valence_scores[\n (ngrams_valence_scores['ngram'] == \"zucchero\") & (ngrams_valence_scores['year'] == 2009)]\n\n os.remove(ngrams_fpath)\n\n assert float(zucchero_data['valence']) == 6.55", "def test_quest_load_data_fail(testing_quest_page):\n testing_quest_page.save()\n\n # fetch the data\n doc = testing_quest_page.doc_ref.get()\n data = testing_quest_page.storage_model.parse_obj(doc.to_dict())\n\n # mess with the data\n data.serialized_data = json.dumps({\"this\": \"nonesense\"})\n testing_quest_page.doc_ref.set(data.dict())\n\n # try to load with the bad version\n with pytest.raises(QuestLoadError):\n testing_quest_page.load()\n\n # cleanup\n testing_quest_page.delete()", "def test_word_positions_in_file(self):\n pass", "def test_no_brackets_in_words():\n raise SkipTest\n assert_raises(ParseError, grammar['word'].parse, ']')", "def test_noDocumentsFound(self):\n self.assertRaises(NoDocumentsFound, self.builder.build, self.manDir)", "def test_sanity(self) -> None:\n if self.report.headlines:\n return\n\n if self.report.document.paragraphs:\n self.add_error(\n \"Rubrikerna i dokumentet är felformaterade eller saknas. \"\n \"Rubrikerna ska vara skrivna i versaler och ha samma \"\n \"typsnitt, stil och storlek som brödtexten. \"\n \"Rubriker avslutas med radbrytning.\"\n )\n\n if not self.report.document.paragraphs:\n self.add_error(\"Ditt dokument är antigen tomt eller i fel format.\")", "def test_invalid_tokens(self):\n self.assertTrue(1 + 1)", "def verifyProperNounAtSentenceStart(idx, tagged_term, tagged_terms, lexicon):\n term, tag, norm = tagged_term\n if (tag in ('NNP', 'NNPS') and\n (idx == 0 or tagged_terms[idx-1][1] == '.')):\n lower_term = term.lower()\n lower_tag = lexicon.get(lower_term)\n if lower_tag in ('NN', 'NNS'):\n tagged_term[0] = tagged_term[2] = lower_term\n tagged_term[1] = lower_tag", "def test_issue3625():\n nlp = Hindi()\n doc = nlp(u\"hi. how हुए. होटल, होटल\")\n assert [token.text for token in doc] == ['hi', '.', 'how', 'हुए', '.', 'होटल', ',', 'होटल']", "def test_missing_one_correction(self):\n errors = self.response.data[\"errors\"]\n errors[0][\"correction\"] = []\n self.response2 = self.client.post(\n reverse(\"correct\"), {\"id\": 1, \"errors\": errors}, format=\"json\"\n )\n\n self.assertEqual(\n self.response2.data,\n f\"You must insert a correction for the word {errors[0]['word']} or delete this error entry if you want to ignore it\",\n )", "def test_lang_is_missing(app):\n rv = app.test_client().post('/tokenize', \n json={\n 'text': \"I still haven't found what i'm looking for\",\n })\n json_data = rv.get_json()\n tokens = json_data['tokens']\n lang = json_data['lang']\n assert tokens == ['I', 'still', 'have', 'not', 'found', 'what', 'i', 'am', 'looking', 'for']\n assert lang == 'en'", "def test_spelling(self) -> None:\n misstakes: Dict[Word, List[str]] = self.report.spellcheck(\n self.rules.spelling_skip_wordclasses\n )\n for word, corrections in misstakes.items():\n if word.text.lower() in self.rules.forbidden_words:\n continue\n if word.text.lower() in [\n ab[\"word\"] for ab in self.rules.police_abbreviations\n ]:\n continue\n error_text: str = f\"Ordet {word.text} är felstavat.\"\n if corrections:\n error_text += \" Rättningsförslag: \" + \", \".join(corrections) + \".\"\n self.add_error(error_text, word=word)", "def test_not_blank_validator_invalid_value_should_return_false(self):\n for item in self.stdtype_fixtures:\n self.assertFalse(NotBlankValidator(TypeHint(item.get('type')), item.get('invalid')))", "def test_no_start_open_parens(self):\n self.assertRaises(ValueError, NewickTokenizer, newick='hi')", "def testTermKnownValuesWikiWrapAsExample(self):\n for wikilang, pos, termlang, thisterm, termgender, asexample, \\\n forlist in self.knownValues:\n if pos == 'noun':\n aterm = term.Noun(termlang, thisterm, gender=termgender)\n if pos == 'verb':\n aterm = term.Verb(termlang, thisterm)\n result = aterm.wikiWrapAsExample(wikilang)\n self.assertEqual(asexample, result)", "def test_no_key_words(self):\n for msg_test in MSG_TEST_NO_RESULT:\n result = self.parser.msg_analysis(msg_test)\n assert len(result) == 0", "def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)", "def nangdok(data_dir, batch_size, test_max_size, **kwargs):\n join = lambda f: _path.join(data_dir, f)\n texts = []\n with open(join(\"script_nmbd_by_sentence.txt\"), encoding=\"utf-16-le\") as f:\n tmp = []\n for line in f.readlines():\n if line.startswith(\"<\"):\n texts.append(tmp)\n tmp = []\n elif _re.match(r\"^\\d+\\..*\", line):\n tmp.append(line)\n texts.append(tmp)\n del texts[0]\n participants = sorted(filter(lambda l: _re.match(\"^[fm][v-z][0-9]+\", l),\n _os.listdir(data_dir)))\n test_sentences = kwargs.get(\"test_sentences\",\n [_random.choice(ts) for ts in texts])\n test_participants = kwargs.get(\"test_participants\",\n [_random.choice(list(g))\n for _, g in _groupby(participants, lambda p: p[:2])])\n train = []\n test = []\n for participant in sorted(participants):\n for i, _ in enumerate(texts):\n for j, text in enumerate(_):\n f = join(\"{0}/{0}_t{1:0>2}_s{2:0>2}.wav\".format(participant, i+1, j+1))\n if _path.isfile(f):\n if text in test_sentences or participants in test_participants:\n test.append((f, text))\n else:\n train.append((f, text))\n _random.shuffle(test)\n valid = test[:batch_size]\n if test_max_size and batch_size + test_max_size < len(test):\n test = test[batch_size:(batch_size + test_max_size)]\n else:\n test = test[batch_size:]\n return train, valid, test", "def test_unclosed(self):\n nt = NewickTokenizer(newick='(a,(b,c)')\n self.assertRaises(ValueError, nt.tokens)", "def test_parsing(parser, parsing_tests):\n for expected_result, files in parsing_tests.items():\n for file in files:\n try:\n parser.load(file)\n except ValueError:\n # The source document was not expected to fail.\n if expected_result == 'y':\n raise", "def test_corpus_labeling(self):\n corpusName = \"test\"\n built_corpus_Path = corpus_tools.getDataPath(corpusName)\n filename = built_corpus_Path + \"-GT\"\n reader = LinguoDatasetReader()\n with tempfile.TemporaryDirectory() as temp_dir:\n outpath = temp_dir + \"-labeled\"\n corpus_tools.labelCorpus(filename, outpath,\n g_label=0, ug_type=\"WS\")\n original = corpus_tools.load_tokenized_corpus(filename)\n loaded = reader.read(outpath)\n for original_sent, loaded_sent in zip(original, loaded):\n self.assertEqual(loaded_sent.fields[\"g_label\"].label,\n \"ungrammatical\")\n self.assertEqual(loaded_sent.fields[\"ug_type\"].label, \"WS\")\n plain_loaded = [str(token) for\n token in loaded_sent.fields[\"sentence\"].tokens]\n self.assertEqual(plain_loaded, original_sent)", "def test_open_closed(self):\n nt = NewickTokenizer(newick='(a,(),(d,e));')\n self.assertRaises(ValueError, nt.tokens)", "def test_no_section_by_section(self):\n notice = {\n \"document_number\": \"111-22\",\n \"fr_volume\": 22,\n \"cfr_part\": \"100\",\n \"publication_date\": \"2010-10-10\"\n }\n s = SectionBySection(None, notices=[notice])\n self.assertEqual(None, s.process(Node(label=['100', '22'])))", "def test_unwanted_words(self) -> None:\n pad_open: bool = False\n for word in self.report.get_words():\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n for u_word in self.rules.unwanted_words:\n if word.text == u_word[\"word\"]:\n self.add_error(\n f\"Ordet {word.text} är inte tillåtet, \"\n f\"använd {u_word['alternative']} istället.\",\n word=word,\n )\n break", "def ngrams(doc, n, filter_stopwords=True, good_tags={'NOUN', 'PROPN', 'ADJ'}):\n ngrams_ = (doc[i:i + n] for i in range(len(doc) - n + 1))\n ngrams_ = (ngram for ngram in ngrams_\n if not any(w.is_space or w.is_punct for w in ngram))\n\n if filter_stopwords:\n ngrams_ = (ngram for ngram in ngrams_\n if not any(w.is_stop for w in ngram))\n if good_tags:\n ngrams_ = (ngram for ngram in ngrams_\n if all(w.pos_ in good_tags for w in ngram))\n\n for ngram in ngrams_:\n yield ngram", "def test__rules__base__chunkrule():\n # A rule that returns a chunk\n test_chunk = PositionedChunk('foo', 1, 20, 'a')\n TRuleD = BaseRule.rule('TRuleD', \"NA\", lambda c, m: test_chunk)\n r = TRuleD()\n eval_chunk = PositionedChunk('bar', 1, 20, 'a')\n # Check that the violation refers to the other chunk\n assert r.evaluate(eval_chunk).chunk == test_chunk", "def test_article_has_no_page_title(self, fake_article_missing_elements):\n\n fake_analysis = PageTitleAnalyzer(title=fake_article_missing_elements.title)\n assert not fake_analysis.has_page_title()", "def test_HTMLChunker(self):\n text = \"\"\"hello<html><head><title>my title</title></head><body>this is a\n <b>simple</b> HTML document for <p> test<i>ing</i> purposes</p>.\n It < contains > various <-- special characters.\n \"\"\"\n tkns = get_tokenizer(\"en_US\", chunkers=(HTMLChunker,))(text)\n out = [t for t in tkns]\n exp = [(\"hello\", 0), (\"my\", 24), (\"title\", 27), (\"this\", 53), (\"is\", 58),\n (\"a\", 61), (\"simple\", 82), (\"HTML\", 93), (\"document\", 98), (\"for\", 107),\n (\"test\", 115), (\"ing\", 122), (\"purposes\", 130), (\"It\", 160),\n (\"contains\", 165), (\"various\", 176), (\"special\", 188),\n (\"characters\", 196)]\n self.assertEqual(out, exp)\n for (word, pos) in out:\n self.assertEqual(text[pos:pos + len(word)], word)", "def testTermKnownValuesWikiWrapAsTranslation(self):\n for wikilang, pos, termlang, thisterm, termgender, asexample, \\\n forlist in self.knownValues:\n if pos == 'noun':\n aterm = term.Noun(termlang, thisterm, gender=termgender)\n if pos == 'verb':\n aterm = term.Verb(termlang, thisterm)\n result = aterm.wikiWrapAsTranslation(wikilang)\n self.assertEqual(forlist, result)", "def test_no_arg(self):\n self.assertRaises(ValueError, NewickTokenizer)", "def retrieveMissingChunks(self, existing_chunks={}):\n foo.info('Retrieving prefixes')\n raw_data = foo._fetchData(existing_chunks)\n preparsed_data = foo._preparseData(raw_data)\n d = foo(preparsed_data)\n return d", "def test_check_chunk_n(self):\n st, frontend_setup = self.get_st_and_fill_frontends()\n\n sf = st.storage[0]\n st_new = st.new_context()\n st_new.storage = [sf]\n key = st_new.key_for(self.run_id, self.target)\n backend, backend_key = sf.find(key, **st_new._find_options)\n prefix = strax.storage.files.dirname_to_prefix(backend_key)\n md = st_new.get_metadata(self.run_id, self.target)\n md['chunks'][0]['n'] += 1\n md_path = os.path.join(backend_key, f'{prefix}-metadata.json')\n with open(md_path, \"w\") as file:\n json.dump(md, file, indent=4)\n\n with self.assertRaises(strax.DataCorrupted):\n assert st_new.is_stored(self.run_id, self.target)\n st_new.get_array(self.run_id, self.target)", "def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())", "def detect_cuewords():\n\n # cuewords\n\n if t_word[:2] == 'ni':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:4] == 'kein':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:4] == 'nein':\n create_negation_frame()\n create_target_focus_scope()", "def test_no_delimiter_error(self):\n val = DwcaValidator(yaml.load(self.yaml_delimited5, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'sex': 'male | female'}\n with self.assertRaises(ValueError):\n val.validate(document)", "def guess_splitwords():\n\n if t_word[:2] == 'un' and (t_pos == 'ADJD' or t_pos == 'ADJA'):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])", "def _nix_utf8_chunk_empty_strings(chunk: pyarrow.Array) -> pyarrow.Array:\n # pyarrow's cast() can't handle empty string. Create a new Array with\n # \"\" changed to null.\n _, offsets_buf, data_buf = chunk.buffers()\n\n # Build a new validity buffer, based on offsets. Empty string = null.\n # Assume `data` has no padding bytes in the already-null values. That way\n # we can ignore the _original_ validity buffer and assume all original\n # values are not-null. (Null values are stored as \"\" plus \"invalid\".)\n #\n # Validity-bitmap spec:\n # https://arrow.apache.org/docs/format/Columnar.html#validity-bitmaps\n\n # first offset must be 0. Next offsets are used to calculate lengths\n offsets = array.array(\"i\")\n assert offsets.itemsize == 4\n offsets.frombytes(offsets_buf)\n if sys.byteorder != \"little\":\n offsets.byteswap() # pyarrow is little-endian\n\n validity = bytearray()\n null_count = 0\n last_offset = offsets[0]\n assert last_offset == 0\n pos = 1\n while True:\n # Travel offsets in strides of 8: one per char in the validity bitmap.\n # Pad with an extra 1 bit -- [2020-02-20, adamhooper] I think I read\n # this is needed somewhere.\n valid_byte = 0x00\n block = offsets[pos : pos + 8]\n try:\n if block[0] > last_offset:\n valid_byte |= 0x1\n else:\n null_count += 1\n if block[1] > block[0]:\n valid_byte |= 0x2\n else:\n null_count += 1\n if block[2] > block[1]:\n valid_byte |= 0x4\n else:\n null_count += 1\n if block[3] > block[2]:\n valid_byte |= 0x8\n else:\n null_count += 1\n if block[4] > block[3]:\n valid_byte |= 0x10\n else:\n null_count += 1\n if block[5] > block[4]:\n valid_byte |= 0x20\n else:\n null_count += 1\n if block[6] > block[5]:\n valid_byte |= 0x40\n else:\n null_count += 1\n if block[7] > block[6]:\n valid_byte |= 0x80\n else:\n null_count += 1\n validity.append(valid_byte)\n last_offset = block[7]\n pos += 8\n except IndexError:\n validity.append(valid_byte)\n break # end of offsets\n\n validity_buf = pyarrow.py_buffer(validity)\n\n # We may have over-counted in null_count: anything before `chunk.offset`\n # should not count.\n #\n # It's less work to \"undo\" the counting we did before -- otherwise we'd\n # riddle the above loop with if-statements.\n for i in range(chunk.offset):\n if offsets[i + 1] == offsets[i]:\n null_count -= 1\n\n return pyarrow.StringArray.from_buffers(\n length=len(chunk),\n value_offsets=offsets_buf,\n data=data_buf,\n null_bitmap=validity_buf,\n null_count=null_count,\n offset=chunk.offset,\n )", "def test_split_orphan(self):\r\n orphans = self.split_mongo.get_orphans(self.split_course_key)\r\n self.assertEqual(len(orphans), 3, \"Wrong # {}\".format(orphans))\r\n location = self.split_course_key.make_usage_key('chapter', 'OrphanChapter')\r\n self.assertIn(location, orphans)\r\n location = self.split_course_key.make_usage_key('vertical', 'OrphanVert')\r\n self.assertIn(location, orphans)\r\n location = self.split_course_key.make_usage_key('html', 'OrphanHtml')\r\n self.assertIn(location, orphans)", "def test_missing_all_tokens(self):\n self.helper_test_evaluate_raises(\n '(A nand B) and not D',\n expected_exc_type=MissingSymbolError)", "def __init__(self, variable, pnoun, nucleus):\n super(ProperNounExpression, self).__init__(variable, EmptyExpression(), nucleus)\n assert(pnoun in proper_nouns)\n self.pnoun = pnoun", "def test_corpus_is_not_present():\n with pytest.raises(ValueError, match=\".*not contain a text corpus.*\"):\n TextCleaner().transform(X_bin)", "def test_undsplit(self):\n convert2 = cnv()\n # normal case where tens != 0 and ones != 0\n self.assertEqual(convert2.undsplit('einundzwanzig'), 21)\n # special case 1 only one word\n self.assertEqual(convert2.undsplit('fünf'), 5)\n self.assertEqual(convert2.undsplit('fünfzig'), 50)\n # special case 2 empty string\n self.assertEqual(convert2.undsplit(''), 0)", "def test_text_cleaning(basic_multilingual, clean_multilingual):\n docs = [\"Bonjour le monde! #thisisfrench #ilovefrance\",\n \"Bonjour le monde! https://t.co/U0Zjp3tusD\"]\n docs = [Document([], text=text) for text in docs]\n \n basic_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"it\", \"it\"]\n \n assert clean_multilingual.processors[\"langid\"]._clean_text\n clean_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"fr\", \"fr\"]", "def test_non_empty_file_no_metadata(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS, PANDOC_ARGS=PANDOC_ARGS\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"no_metadata.md\")\n\n # If the file is not empty but has no metadata it should fail\n with self.assertRaises(Exception) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n \"Could not find metadata header '...' or '---'.\", message\n )", "def test_NL_NP_correct():\n\tNLNP_pairs = []\n\tfor search_row in map_rows:\n\t\tNL, NP = search_row[2], search_row[3]\n\t\tif not NL == 'NA':\n\t\t\tNLNP_pairs.append((NL, NP))\n\tis_cat_correct = []\n\tfor cat in nset['categories']:\n\t\tcat_pair = (cat['name'], cat['supercategory'])\n\t\tis_cat_correct.append(cat_pair in NLNP_pairs)\n\n\tassert False not in is_cat_correct", "def test_metadata_regular_is_normal(self):\n have = False\n for x in self.metadata.fonts:\n if x.full_name.endswith('Regular') and x.style == 'normal':\n have = True\n self.assertTrue(have)", "def test_score_of_unknown_token(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor()\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates)\n self.assertFalse(scores.get('unknown'))", "def check_extracted_data(data):\n for item in data:\n document_name = item[\"document\"]\n data_frame = item[\"data_frame\"]\n entity_position = item[\"entity_position\"]\n relations = item[\"relations\"]\n\n # Check if entities correctly embedded\n entity_embedding = data_frame[\"entity_embedding\"].tolist()\n cnt = 0\n for entity_key in entity_position:\n low, high = entity_position[entity_key]\n cnt += high - low\n if high == low:\n print(CHECK_FAILED_AT_DOCUMENT, document_name, \"in 'entity_embedding', key\", entity_key,\n \"is empty (from\", low, \"to\", high, \")\")\n else:\n try:\n assert abs(min(entity_embedding[low:high]) - max(entity_embedding[low:high])) <= 1\n except AssertionError:\n print(CHECK_FAILED_AT_DOCUMENT, document_name, \"in 'entity_embedding', key\", entity_key,\n \", values from\", low, \"to\", high, \":\", entity_embedding[low:high], \"are inconsistent\")\n try:\n assert cnt == (np.array(entity_embedding) != 0).astype(int).sum()\n except AssertionError:\n print(CHECK_FAILED_AT_DOCUMENT, document_name, \"in total entity embedded tokens\",\n (np.array(entity_embedding) != 0).astype(int).sum(), \"does not match the record\", cnt)\n\n # Check if all relations are valid\n for value in relations.values():\n first = value[\"source\"]\n second = value[\"target\"]\n try:\n assert first in entity_position\n except AssertionError:\n print(CHECK_FAILED_AT_DOCUMENT, document_name, \"in 'relations',\", first,\n \"is not found in record\")\n try:\n assert second in entity_position\n except AssertionError:\n print(CHECK_FAILED_AT_DOCUMENT, document_name, \"in 'relations',\", second,\n \"is not found in record\")", "def test_lang_subset_unlikely_language(en_multilingual):\n sentences = [\"你好\" * 200]\n docs = [Document([], text=text) for text in sentences]\n en_multilingual(docs)\n assert [doc.lang for doc in docs] == [\"en\"]\n\n processor = en_multilingual.processors['langid']\n model = processor._model\n text_tensor = processor._text_to_tensor(sentences)\n en_idx = model.tag_to_idx['en']\n predictions = model(text_tensor)\n assert predictions[0, en_idx] < 0, \"If this test fails, then regardless of how unlikely it was, the model is predicting the input string is possibly English. Update the test by picking a different combination of languages & input\"", "def test_word_info_bad_word(self):\n word = \"hdiasudhisuahdiasushdiaushdiaushdiasuhdisauh\"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n word: {\n \"frequency\": None,\n \"defination\": None,\n \"antonyms\": None,\n \"examples\": None,\n \"pronounciation\": None,\n \"synonyms\": None\n }\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(response_data[word][\"defination\"], expected_output[word][\"defination\"])\n self.assertEquals(response_data[word][\"antonyms\"], expected_output[word][\"antonyms\"])\n self.assertEquals(response_data[word][\"examples\"], expected_output[word][\"examples\"])\n self.assertEquals(response_data[word][\"frequency\"], expected_output[word][\"frequency\"])\n self.assertEquals(response_data[word][\"pronounciation\"], expected_output[word][\"pronounciation\"])\n self.assertEquals(response_data[word][\"synonyms\"], expected_output[word][\"synonyms\"])", "def test_check_metadata_fields(self):\n contents = self.read_metadata_contents()\n family = Metadata.get_family_metadata(contents)\n\n keys = [(\"name\", str), (\"postScriptName\", str),\n (\"fullName\", str), (\"style\", str),\n (\"weight\", int), (\"filename\", str),\n (\"copyright\", str)]\n\n missing = set([])\n unknown = set([])\n\n for j, itemtype in keys:\n\n for font_metadata in family.fonts:\n if j not in font_metadata:\n missing.add(j)\n\n for k in font_metadata:\n if k not in map(lambda x: x[0], keys):\n unknown.add(k)\n\n if unknown:\n msg = 'METADATA.json \"fonts\" property has unknown items [%s]'\n self.fail(msg % ', '.join(unknown))\n\n if missing:\n msg = 'METADATA.json \"fonts\" property items missed [%s] items'\n self.fail(msg % ', '.join(missing))", "def load_conll_notags(unfile, max_slen, vocab=[], oovs={}, pads={}, lower=False, mwe=True, unk_case=True):\n # special characters used for splitting words\n split_chars = set([',', '.', ':', '-', '~', \"'\", '\"'])\n\n # punctuation that denotes when a sentence finishes\n sent_split_words = set(['.', '?', '!', ';', '—'])\n\n input_sents = []\n input_words = []\n windex = -1\n\n # number of words from which to split sentences\n LIMIT_SENT_LEN = max_slen\n\n sents = []\n if 'begin' in pads:\n next_words = [pads['begin']]\n next_syms = ['']\n next_indexs = [windex]\n sent_base_length = 1\n else:\n next_words = []\n next_syms = []\n next_indexs = []\n sent_base_length = 0\n\n # select files to use\n input_files = [unfile]\n\n # counters\n num_raw_sents = 0\n num_sents = 0\n num_words = 0\n num_oovs = 0\n\n # iterate over lines in the input files\n for ifile in input_files:\n for line in codecs.open(ifile, mode = 'r', errors = 'ignore', encoding = 'utf-8'):\n # discard newline character\n line = line[:-1]\n\n # keep adding words while in the middle of a sentence\n if line:\n word = line.split('\\t')[0]\n sym = word\n # add new original word\n windex += 1\n input_words.append(word)\n num_words += 1\n # lowercase when indicated\n if lower:\n word = word.lower()\n # use an heuristic and try to map oov words\n if vocab and word not in vocab:\n if word not in split_chars:\n if re.match('^[0-9\\.\\,-]+$', word):\n word = oovs['number']\n elif _match_word_vocab(word, vocab) != word:\n word = _match_word_vocab(word, vocab)\n elif ' ' in word or '~' in word or '-' in word and mwe:\n # attempt to split multi-word expressions\n constituents_text = re.split('[\\s~ | \\s-]+', word)\n constituents = [_match_word_vocab(w, vocab) for w in constituents_text]\n if all([True if c in vocab else False for c in constituents]):\n next_words += constituents[:-1]\n next_syms += constituents[:-1]\n next_indexs += [windex] * len(constituents[:-1])\n word = constituents[-1]\n sym = constituents[-1]\n else:\n if unk_case and word[0].isupper():\n word = oovs['UNKNOWN']\n else:\n word = oovs['unknown']\n num_oovs += 1\n else:\n if unk_case and word[0].isupper():\n word = oovs['UNKNOWN']\n else:\n word = oovs['unknown']\n num_oovs += 1\n else:\n word = oovs['unknown']\n num_oovs += 1\n\n next_words.append(word)\n next_syms.append(sym)\n next_indexs.append(windex)\n\n # stack the current sentence upon seeing an empty line or a sentence end mark\n if not line or (len(next_words) > 3 and next_words[-4] in sent_split_words) or (len(next_words) >= LIMIT_SENT_LEN and len(sent_split_words.intersection(next_words)) < 1):\n if len(next_words) > sent_base_length:\n # split when an empty line marks a sentence end\n if not line:\n if 'end' in pads:\n next_words.append(pads['end'])\n next_syms.append('')\n next_indexs.append(-1)\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n input_sents.append(input_words)\n input_words = []\n windex = -1\n next_words = []\n next_syms = []\n next_indexs = []\n num_raw_sents += 1\n num_sents += 1\n # split when punctuation marks a sentence end\n elif len(next_words) > 3 and next_words[-4] in sent_split_words:\n split_words = next_words[:-3]\n split_syms = next_syms[:-3]\n split_indexs = next_indexs[:-3]\n if 'end' in pads:\n split_words.append(pads['end'])\n split_syms.append('')\n split_indexs.append(-1)\n sents.append(list(zip(split_words, split_indexs, split_syms)))\n next_words = next_words[-3:]\n next_syms = next_syms[-3:]\n next_indexs = next_indexs[-3:]\n num_sents += 1\n # split when the maximum sentence length is reached\n # a bad guess is better than not guessing when predicting tags\n else:\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n next_words = []\n next_syms = []\n next_indexs = []\n num_sents += 1\n\n if 'begin' in pads:\n next_words = [pads['begin']] + next_words\n next_syms = [''] + next_syms\n next_indexs = [-1] + next_indexs\n\n else:\n input_sents.append(input_words)\n input_words = []\n windex = -1\n num_raw_sents += 1\n\n # double check the last sentence\n if len(next_words) > sent_base_length:\n if 'end' in pads:\n next_words.append(pads['end'])\n next_syms.append('')\n next_indexs.append(-1)\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n input_sents.append(input_words)\n input_words = []\n windex = -1\n num_raw_sents += 1\n num_sents += 1\n\n # find the allowed sentence length\n print('[INFO] Number of unlabelled OOV words: ' + str(num_oovs) + ' / ' + str(num_words))\n print('[INFO] Original number of unlabelled sentences: ' + str(num_raw_sents))\n print('[INFO] Number of extracted unlabelled sentences ' + str(num_sents))\n return input_sents, sents", "def test_make_ngram_map_and_list_study_words(self):\n study_names, study_ngram_map = make_ngram_map_and_list(STUDY_WORDS, min_word_len=0, max_num_tokens=inf)\n\n expected_study_names = [\n 'vitro', 'in-vitro', 'cell_culture', 'fbs', 'fetal_bovine_serum',\n 'co2', 'air-liquid_interface', 'mouse', 'affected_health_professionals',\n 'iqr', 'chest_computed_tomographic_scans', 'non-invasive_ventilation',\n 'patient-to-patient',\n ]\n\n expected_study_ngram_map = {\n 'cell culture': 'cell_culture',\n 'fetal bovine serum': 'fetal_bovine_serum',\n 'air-liquid interface': 'air-liquid_interface',\n 'affected health professionals': 'affected_health_professionals',\n 'chest computed tomographic scans': 'chest_computed_tomographic_scans',\n 'non-invasive ventilation': 'non-invasive_ventilation',\n }\n\n self.assertTrue(list_compare(study_names, expected_study_names))\n self.assertEqual(study_ngram_map, expected_study_ngram_map)", "def validateTaggedCompFile(comp_file, tagged_comp_file):\n comp_data = SimpleDataReader(comp_file)\n tagged_comp_data = SimpleDataReader(tagged_comp_file)\n assert comp_data.getSentencesSize() == tagged_comp_data.getSentencesSize(), \"Missing Sentences!\"\n mistakes = 0\n for i in range(comp_data.getSentencesSize()):\n comp_sentence = comp_data.getSentenceByIndex(i)\n tagged_comp_sentence = tagged_comp_data.getSentenceByIndex(i)\n assert len(comp_sentence) == len(tagged_comp_sentence), \"Missing Words in Sentence: \" + str(i)\n for k in range(len(comp_sentence)):\n word = comp_sentence[k]\n tagged_word = tagged_comp_sentence[k].split(TAGCHAR)[0]\n if word != tagged_word:\n mistakes += 1\n print(\"Sentences differ:\", word, tagged_word)\n if mistakes == 0:\n print(\"Files are Identical!\")\n else:\n print(\"Files are NOT Identical!\")", "def main():\n token_dict_dict = {}\n all_dict = {}\n pronoun_proportion_list = []\n tag = 'PRP' # base tag for all pronouns, see 'https://www.clips.uantwerpen.be/pages/MBSP-tags' for more info\n\n for text in glob.glob(file_loc):\n file_title = os.path.basename(text).split('.')[0]\n\n with open(text, 'r') as f:\n speech = f.read()\n text_dict = {}\n\n try:\n #TextBlob goodness that tags all the words for me\n speech_blob = TextBlob(clean(speech))\n speech_blob.tags\n except:\n #for some reason Trump's address contained a unicode 128 character that I couldn't find\n #instead of getting rid of it in a single file, i decided to have an except that could catch that case in\n #all sitations and handle them accordingly\n\n #lets the user know that there was an issue, and that it's been handled\n print file_title,\n print \"contains unexpected unicode characters. they have been removed and the document has been processed\"\n\n #gets rid of all unicode characters. i could do this by default, but all the other files ran fine\n #so i didn't think it was worth it\n speech_blob = TextBlob(clean(speech.decode('unicode_escape').encode('ascii','ignore')))\n\n for token in speech_blob.tags:\n # builds the inital dictionary of data, only looks at words with a specified tag\n if tag in token[1]:\n try:\n text_dict[token[0]] += 1\n except:\n text_dict[token[0]] = 1\n try:\n all_dict[token[0]] += 1\n except:\n all_dict[token[0]] = 1\n #breaks the title into 3 pieces: number, president, date\n token_dict_dict[file_title] = text_dict\n partial_split, date = string.rsplit(file_title, '_', 1)\n num_pres, pres = string.split(partial_split, '_', 1)\n\n pronoun_proportion_list.append(\n (pres, date, total_to_proportion(pronoun_breakdown(token_dict_dict[file_title])))\n )\n create_pronoun_graph(sort_list_by_president_order(pronoun_proportion_list))", "def test_parse_simple_member(self):\n lexed = [\n Token(\n value=\"SuS\",\n token_type=KT.VERB,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=KT.NOUN,\n line_number=0,\n ),\n ]\n self.assertTrue(parse(SimpleKlingonGrammar, lexed))", "def test_empty_file(self):\n field = TypedFileField(required=False)\n for v in EMPTY_VALUES:\n assert field.clean(v) is None", "def test_word_found_in_file(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/empty_directory)\n indexer.index(_path_prefix + 'word_not_found')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"unit\")\n self.assertTrue(result != [])\n\n result = indexer.search(\"index\")\n self.assertTrue(result != [])\n print(result)", "def test_parsing(self):\n truth = self.generate_fake_pos()\n batch_size = 4\n records = []\n for i in range(batch_size):\n record = b''\n for j in range(2):\n record += self.v4_record(*truth)\n records.append(record)\n\n parser = ChunkParser(ChunkDataSrc(records),\n shuffle_size=1,\n workers=1,\n batch_size=batch_size)\n batchgen = parser.parse()\n data = next(batchgen)\n\n batch = (np.reshape(np.frombuffer(data[0], dtype=np.float32),\n (batch_size, 112, 64)),\n np.reshape(np.frombuffer(data[1], dtype=np.int32),\n (batch_size, 1858)),\n np.reshape(np.frombuffer(data[2], dtype=np.float32),\n (batch_size, 3)),\n np.reshape(np.frombuffer(data[3], dtype=np.float32),\n (batch_size, 3)))\n\n fltplanes = truth[1].astype(np.float32)\n fltplanes[5] /= 99\n for i in range(batch_size):\n data = (batch[0][i][:104],\n np.array([batch[0][i][j][0] for j in range(104, 111)]),\n batch[1][i], batch[2][i], batch[3][i])\n self.assertTrue((data[0] == truth[0]).all())\n self.assertTrue((data[1] == fltplanes).all())\n self.assertTrue((data[2] == truth[2]).all())\n scalar_win = data[3][0] - data[3][-1]\n self.assertTrue(np.abs(scalar_win - truth[3]) < 1e-6)\n scalar_q = data[4][0] - data[4][-1]\n self.assertTrue(np.abs(scalar_q - truth[4]) < 1e-6)\n\n parser.shutdown()", "def test_issue7306(en_lookup_nlp):\n doc = Doc(en_lookup_nlp.vocab, words=[\"singing\"])\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert doc[0].lemma_ == \"sing\"", "def testConvertMissingLabels(self):\n self.assertEqual(self.data['no_species']['labels'][0]['species'], '-1')\n self.assertEqual(self.data['no_count']['labels'][0]['count'], '-1')\n self.assertEqual(self.data['no_standing']['labels'][0]['standing'], '-1')", "async def test_no_matches(test_handler):\n queries = [\n \"braf\", \"braf v600000932092039e\", \"NP_000213.1:cp.Leu862=\",\n \"NP_000213.1:cp.Leu862\", \"BRAF V600E 33\", \"NP_004324.2:p.Glu600Val\",\n \"NP_004324.2:p.Glu600Gal\", \"NP_004324.2839:p.Glu600Val\",\n \"NP_004324.2:t.Glu600Val\", \"this:c.54G>H\", \"NC_000007.13:g.4T<A\",\n \"test\", \"131\", \"braf z600e\", \"braf e600z\", \"Thr790Met\", \"p.Tyr365Ter\",\n \"ERBB2 G776delinsVCZ\", \"NP005219.2:p.Glu746_Thr751delinsValAla\",\n \"NP_005219.2:p.Glu746Thr751delinsValAla\", \"EGFR L747_L474delinsP\",\n \"NP_005219.2:p.Glu746_Thr751delinssValAla\", \"EGFR delins\",\n \"NM_004333.4:c.1799_1800delTGinsAT\",\n \"NM_173851.3(SLC30A8):c.973C>T%20(p.Arg325Trp)\"\n ]\n for q in queries:\n resp = await test_handler.normalize(q, untranslatable_returns_text=True)\n assert resp.variation_descriptor.type == \"VariationDescriptor\", q\n assert resp.variation_descriptor.variation.type == \"Text\", q\n assert resp.variation_descriptor.label == q.strip(), q\n\n resp = await test_handler.normalize(\"clinvar:10\")\n assert resp.variation_descriptor is None\n\n resp = await test_handler.normalize(\" \")\n assert resp.variation_descriptor is None\n\n resp = await test_handler.normalize(\"\")\n assert resp.variation_descriptor is None", "def test_small_naive_token_stream():\n tokens = list(sp.tokenize('{<abc>}'))\n assert tokens == [\n tk.START_GROUP,\n tk.START_GARBAGE,\n tk.CHARACTER,\n tk.CHARACTER,\n tk.CHARACTER,\n tk.END_GARBAGE,\n tk.END_GROUP\n ]", "def test_bad_data(self):\n # bad data file has:\n # 1 bad status\n # particle A has bad timestamp\n # particle B has bad dark fit\n # particle C has bad frame type\n # particle D has bad year\n stream_handle = open(os.path.join(RESOURCE_PATH,\n 'bad_SNA_SNA.txt'), MODE_ASCII_READ)\n\n self.create_parser(stream_handle, telem_flag=False)\n\n # get E, since it is first it will generate a metadata\n particles = self.parser.get_records(2)\n\n # check all the values against expected results.\n self.assert_particles(particles, 'last_and_meta_SNA_recov.yml', RESOURCE_PATH)\n\n # should have had 5 exceptions by now\n self.assertEqual(len(self.exception_callback_value), 5)\n\n for exception in self.exception_callback_value:\n self.assert_(isinstance(exception, RecoverableSampleException))", "def test_spanish_no_translation(self):\n resp = ResponseFactory(\n locale=u'es',\n product=u'firefox',\n description=u'hola',\n translated_description=u''\n )\n\n # No jobs should be translated\n eq_(len(resp.generate_translation_jobs()), 0)\n\n # Nothing should be translated\n eq_(resp.translated_description, u'')", "def test_missing_multiple_tokens(self):\n self.helper_test_evaluate_raises(\n 'A or (B and (C and not D))',\n expected_exc_type=MissingSymbolError,\n A=0,\n D=1)", "def guess_cuewords():\n\n if t_word[:3] == 'nie':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:3] == 'nic':\n create_negation_frame()\n create_target_focus_scope()", "def _check_pofiles_content(self):\n\n # The list of invalid chars is specific to Catalan language\n invalid_chars = {u'á', u'ñ', u'ë', u'ù', u'â', u'ê', u'î', u'ô', u'û',\n u'ë', u'ÿ', u'ä', u'ö'}\n\n try:\n\n THRESHOLD_PERCENTAGE = 1\n findFiles = FindFiles()\n for filename in findFiles.find(self.temp_dir, \"*.po\"):\n poFile = pofile(filename)\n\n invalid = 0\n for entry in poFile:\n # Only localized segments. Skips developers names,\n # untranslated country names, etc\n if entry.msgid == entry.msgstr:\n continue\n\n for char in entry.msgstr.lower():\n if char in invalid_chars:\n invalid = invalid + 1\n\n if len(poFile) > 100 and invalid > 0:\n percentage = 100.0 * invalid / len(poFile)\n if percentage > THRESHOLD_PERCENTAGE:\n self.errors = self.errors + 1\n print \"Unsual number of invalid chars at {0} ({1}%)\".\\\n format(filename, str(percentage))\n\n except Exception as detail:\n print detail", "def test_in_word(self):\n with self.assertRaises(ValueError):\n term, rmd = util.parse_date(\"notasearch1902foradatepartial\")", "def test_issue5141(en_vocab):\n doc_bin = DocBin(attrs=[\"DEP\", \"HEAD\"])\n assert list(doc_bin.get_docs(en_vocab)) == []\n doc_bin_bytes = doc_bin.to_bytes()\n\n doc_bin_2 = DocBin().from_bytes(doc_bin_bytes)\n assert list(doc_bin_2.get_docs(en_vocab)) == []", "def test_legal_names(self):\n gen_prods_split = [p.name.split(\" \")\n for p in generate_products()]\n should_be_adjs = [n[0] for n in gen_prods_split]\n should_be_nouns = [n[1] for n in gen_prods_split]\n\n for a in should_be_adjs:\n self.assertIn(a, ADJECTIVES)\n\n for n in should_be_nouns:\n self.assertIn(n, NOUNS)", "def test_get_texts_ignores():\n file_map = sd.get_file_map(\".\")\n texts = sd.get_texts(file_map)\n ingnores = \"[:.,;:!?\\\"-()]\\n\".split()\n for text in texts:\n for char in ingnores:\n assert text.find(char) == -1", "def test_raise_error_unknown_field_filtered_files():\n\n files = ['Unihan_Variants.txt']\n\n options = {'input_files': files, 'fields': ['kDefinition']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match('Field ([a-zA-Z].*) not found in file list.')", "def test_no_metadata_block_end(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS, PANDOC_ARGS=PANDOC_ARGS\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"no_metadata_end.md\")\n\n # Metadata blocks should end with '___' or '...' if not it should fail\n with self.assertRaises(Exception) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Could not find end of metadata block.\", message)", "def test_invalid_metadata_block_end(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS, PANDOC_ARGS=PANDOC_ARGS\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"no_metadata_end.md\")\n\n # Metadata blocks should end with '___' or '...' if not it should fail\n with self.assertRaises(Exception) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Could not find end of metadata block.\", message)", "def setUp(self):\n tokenizer = TokenizerWrapper('openai/clip-vit-base-patch32')\n\n # 'Goodbye' in kiswahili\n tokenizer.add_placeholder_token('kwaheri', num_vec_per_token=1)\n # 'how much' in kiswahili\n tokenizer.add_placeholder_token('ngapi', num_vec_per_token=4)\n\n with self.assertRaises(AssertionError):\n tokenizer.add_placeholder_token('hello', num_vec_per_token=1)\n\n self.tokenizer = tokenizer", "def test_text_is_missing(app):\n rv = app.test_client().post('/tokenize', json={})\n assert rv.status_code == 400\n\n rv = app.test_client().post('/tokenize', json={'txt':'This is the text.'})\n assert rv.status_code == 400", "def test_issue4104(en_lookup_nlp):\n words = [\"dry\", \"spun\", \"spun-dry\"]\n doc = Doc(en_lookup_nlp.vocab, words=words)\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert [token.lemma_ for token in doc] == [\"dry\", \"spin\", \"spin-dry\"]", "def test_labeled_corpus_saving(self):\n\n original_corpus = [[\"Yo\", \"soy\", \"una\", \"oración\", \"gramatical\", \",\",\n \"regocíjense\", \"en\", \"mi\", \"glória\", \".\"],\n [\"Yo\", \"ungrammatical\", \"es\", \"oración\", \",\"\n \"tú\", \"presumido\", \"elitista\", \".\"]]\n reader = LinguoDatasetReader()\n\n with tempfile.TemporaryDirectory() as temp_dir:\n # first test the grammatical case\n fileName_asG = temp_dir + \"testfile\"\n corpus_tools.save_uniform_labeled_corpus(fileName_asG,\n original_corpus,\n g_label=1)\n loaded_asG = reader.read(fileName_asG)\n self.assertEqual(len(original_corpus), len(loaded_asG))\n for original_sent, loaded_sent in zip(original_corpus, loaded_asG):\n self.assertEqual(loaded_sent.fields[\"g_label\"].label,\n \"grammatical\")\n self.assertEqual(loaded_sent.fields[\"ug_type\"].label, \"G\")\n plain_loaded = [str(token) for\n token in loaded_sent.fields[\"sentence\"].tokens]\n self.assertEqual(plain_loaded, original_sent)\n # Now to test it for ungrammatical (with a valid ug_type)\n fileName_asUG = temp_dir + \"testfileUG\"\n corpus_tools.save_uniform_labeled_corpus(fileName_asUG,\n original_corpus,\n g_label=0, ug_type=\"WS\")\n loaded_asUG = reader.read(fileName_asUG)\n self.assertEqual(len(original_corpus), len(loaded_asUG))\n for original_sent, loaded_sent in zip(original_corpus,\n loaded_asUG):\n self.assertEqual(loaded_sent.fields[\"g_label\"].label,\n \"ungrammatical\")\n self.assertEqual(loaded_sent.fields[\"ug_type\"].label, \"WS\")\n plain_loaded = [str(token) for\n token in loaded_sent.fields[\"sentence\"].tokens]\n self.assertEqual(plain_loaded, original_sent)", "def test_invalid_post_data_empty_fields(self):\n data = {\n # first_author_form\n 'name': '',\n \n # authors_fs --> 'form-0-name', ..., 'form-3-name' + ManagementForm\n 'form-0-name': '',\n 'form-1-name': '',\n 'form-2-name': '',\n 'form-3-name': '',\n 'form-TOTAL_FORMS': ['4'],\n 'form-INITIAL_FORMS': ['0'],\n 'form-MIN_NUM_FORMS': ['0'],\n 'form-MAX_NUM_FORMS': ['1000'],\n \n # language_form\n 'code': '',\n \n # book_form\n 'title': '',\n 'pub_date': '',\n 'pages': '',\n 'isbn': '',\n 'cover_url': '',\n }\n self.assertTrue(Book.objects.count() == 0)\n self.assertTrue(Author.objects.count() == 0)\n self.assertTrue(Language.objects.count() == 0)\n \n response = self.client.post(self.url, data)\n self.assertEquals(response.status_code, 200)\n \n language_form = response.context.get('language_form')\n book_form = response.context.get('book_form')\n self.assertTrue(language_form.errors)\n self.assertTrue(book_form.errors)\n\n self.assertTrue(Book.objects.count() == 0)\n self.assertTrue(Author.objects.count() == 0)\n self.assertTrue(Language.objects.count() == 0)", "def testTermKnownValuesWikiWrapForList(self):\n for wikilang, pos, termlang, thisterm, termgender, asexample, \\\n forlist in self.knownValues:\n if pos == 'noun':\n aterm = term.Noun(termlang, thisterm, gender=termgender)\n if pos == 'verb':\n aterm = term.Verb(termlang, thisterm)\n result = aterm.wikiWrapForList(wikilang)\n self.assertEqual(forlist, result)" ]
[ "0.7741058", "0.5441808", "0.51667565", "0.5122168", "0.5025865", "0.49811262", "0.49760607", "0.4974837", "0.4910542", "0.48398942", "0.48027694", "0.4799192", "0.47787705", "0.4753563", "0.474306", "0.47351125", "0.47177085", "0.47013432", "0.4684846", "0.46820647", "0.46782914", "0.4673973", "0.46650305", "0.46616966", "0.4661319", "0.46491688", "0.46367228", "0.4619315", "0.46184957", "0.46131784", "0.4607191", "0.46009168", "0.45850158", "0.45849055", "0.4580296", "0.4561965", "0.4541537", "0.4535841", "0.45219833", "0.45194703", "0.4516629", "0.4511329", "0.4508468", "0.45066658", "0.4506553", "0.44877714", "0.44764256", "0.44745338", "0.44722345", "0.44682643", "0.4463061", "0.4460847", "0.44602445", "0.44591665", "0.4448828", "0.44476846", "0.44423333", "0.44423205", "0.44393554", "0.44386587", "0.4436095", "0.44338107", "0.44326052", "0.44306755", "0.4426322", "0.4421038", "0.4420248", "0.44020385", "0.44019532", "0.44000864", "0.43979642", "0.43979412", "0.43923774", "0.4389784", "0.4386134", "0.43851802", "0.43843764", "0.43770763", "0.43710253", "0.43659458", "0.43562722", "0.43525887", "0.4345964", "0.434528", "0.43415403", "0.43366617", "0.4336472", "0.43305925", "0.43290922", "0.43213308", "0.43204492", "0.4319471", "0.43178982", "0.43161878", "0.4314875", "0.431318", "0.43118787", "0.4309554", "0.4304175", "0.43036988" ]
0.80459315
0
{ Carrionette|VRGR} { Gibbering mouther} { chapter 5|VRGR|5|Priests of Osybus} { Keepers of the Feather|VRGR|3|Keepers of the Feather} { Monster Manual|MM} { MM|MM} { chapter 5|VRGR|5} { Curse of Strahd|CoS} { Vine Blight||vine blights} { Domain of Alien Memories} itallics { modify memory} { star spawn emissary} bolded { greater restoration} { poisoned} { Investigation} { 1d6}
def fix_links(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_item_formatting(possible_matches):\n for i in range(len(possible_matches)):\n if i == 0:\n continue\n words = possible_matches[i].split()\n\n #If previous term has same ending (\"Dinner\") as current term, remove it\n if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:\n #8 = amount of characters taken up by [' during ']\n length = len(possible_matches[i].split()[-1]) + 8\n possible_matches[i - 1] = possible_matches[i - 1][:length*-1]\n\n return possible_matches", "def ParserTeil2():\r\n Bags = {}\r\n for Line in Lines:\r\n Words = Line.split()\r\n Farbe = Words[0]+\" \"+Words[1]\r\n Bags[Farbe] = re.findall(r\"(\\d+?) (.+?) bags?\", Line)\r\n return Bags", "def structure_data(characters):\n # Ignore superfluous diachritics and optional symbol\n IGNORE = [\"ˈ\", \"ˌ\", \"'\", \"̪\", \"̞\", \"ˣ\", \"̯\", \"-\", \"(\", \")\", \"[\", \"]\"]\n out = []\n for i, c in enumerate(characters):\n if c in IGNORE:\n continue\n # \"ː\" should be part of the character (no space between), also account for variation\n # in vowel length character used\n elif c == \"ː\" or c == \":\":\n out[-1] += \"ː\"\n # Do not add the optional characters, either (In Finnish seems to just be a glottal stop)\n elif i > 0 and characters[i-1] == \"(\" and characters[i + 1] == \")\":\n continue\n else:\n out.append(c)\n\n return ' '.join(out)", "def ROOM_DESCRIPTIONS(adventurer):\n rooms = (\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} is inside a wine cellar, most of it is filled with cracked, empty bottles.\\n\"\n \"Perhaps it is better to not go looking for an intact bottle.\\n\"\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} suspects it will be very detrimental to their health to fight monsters while drunk.\",\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} is inside a gaol, the cells are mostly filled with the skeletons of prisoners long dead.\\n\"\n \"Most of the cell doors are open with the exception of one in the far corner.\\n\"\n \"There seems to be a person in there but they are crouched in the corner muttering to themself.\\n\"\n \"The cleaver they are holding looks very dangerous so perhaps it is best to leave them alone.\\n\",\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR}is inside a dusty old library, piles and piles of books are scattered amongst the dirty floor.\\n\"\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} remembers the last time they perused books inside of that nast vampire\\'s castle.\\n\"\n f\"The tentacles that came out of the pages almost strangled {text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR}.\\n\"\n f\"Perhaps it is better to finish what {text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} came here to do instead of doing some light reading.\",\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} is inside what appears to be a ritual room.\\n\"\n \"The only things inside a room are black candles mounted on the walls and a pentagram in the middle of the floor.\\n\"\n f\"The pentagram appears to be made from dried blood, {text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} would rather not think of who or what provided that blood.\\n\"\n \"On each corner of the star there appears to be a random assortment of objects.\\n\"\n \"For some reason one of them looks like a phallic object.\\n\"\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} decides they don\\'t want to stay here any longer than they have to.\",\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} is inside their childhood bedroom.\\n\"\n f\"The bed and drawer is just like how they remember it.\\n\"\n f\"There's even that empty aquarium {adventurer} left inside their room even though the fish all died within a day.\\n\"\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} would rather not think too hard about why their room is inside of a dungeon.\\n\",\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} is inside of a sewer, the smell feels like its burning away their nostril hairs.\\n\"\n f\"{text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} wonders how there could even be a sewer when the doors connecting it lead into different rooms.\\n\"\n f\"As {text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} stands there they see a figure in their peripheral vision.\\n\"\n \"What looks like a large bipedal rat is standing there 4 smaller bipedal turtles.\\n\"\n f\"Before {text_color.MAGENTA_TEXT}{adventurer}{text_color.FINISH_COLOR} can say anything a large cloud of smoke appears and when it disappears they are gone.\\n\"\n )\n return random.choice(rooms)", "def genQuestion(line):\r\n if type(line) is str: # If the passed variable is of type string.\r\n line = TextBlob(line) # Create object of type textblob.blob.TextBlob\r\n \r\n bucket = {} # Create an empty dictionary\r\n \r\n subject_list = []\r\n question_subject=\"\"\r\n answer_subject=\"\"\r\n for i,j in enumerate(line.tags): # line.tags are the parts-of-speach in English \r\n question_subject += j[0] + \" \"\r\n if (j[1] == \"NNP\" or j[1] == \"NNS\"): \r\n subject_list.append(j[0])\r\n if j[1] not in bucket:\r\n bucket[j[1]] = i # Add all tags to the dictionary or bucket variable\r\n \r\n if len(subject_list):\r\n random_subject_val = random.randint(0, len(subject_list)-1)\r\n question_subject = question_subject.replace(str(subject_list[random_subject_val]), \"______\")\r\n answer_subject = str(subject_list[random_subject_val])\r\n \r\n return question_subject, answer_subject", "def map2mw_C(d,k1,entry):\n if k1 in map2mw_special_C:\n return map2mw_special_C[k1]\n regexes = [\n u'<ab>c.</ab> de {%(.*?)%}',\n u'<ab>c.</ab> {%(.*?)%}',\n #u'<ab>c.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'", "def parse_courses():\n\n subjects = collections.OrderedDict()\n name = '' # the most recent course name acronym (ex. 'COMP')\n\n courses = re.sub(r'\\([^)]*\\)', '', COURSES).split() # Remove parens and their contents\n\n for course in courses:\n if course == 'OR':\n continue\n\n if course[0].isalpha():\n\n index = 0 # the upper bound character index of the subject name\n for char in course:\n if char.isalpha():\n index += 1\n else:\n break\n\n name = course[:index]\n number = course[index:index+4]\n else:\n number = course[:4]\n\n try:\n subjects[name].append(number)\n except KeyError:\n subjects[name] = [number]\n\n return subjects", "def extract_dialogue(seq):\n for el in seq:\n if el.startswith('(') and 'Visitor' in el:\n first_part = el[el.find(')'):]\n yield first_part[first_part.find(':')+2:-1]", "def __init__(self, n, sents):\n assert n > 0\n self._n = n\n print(\"Counting...\")\n count = defaultdict(int)\n while n >= 0:\n for sent in sents:\n s = sent[:] # En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n print(\"Computing vocabulary...\")\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc) # vocabulary size\n print(\"Done\")", "def outline_nodes(str_in):\n\n # TASKPAPER REGEX REQUIREMENTS ARE SIMPLER THAN MARKDOWN\n rgx_body = re.compile(r'(\\t*)([^\\t]*.*)$')\n rgx_tp_tsk = re.compile(r'^(\\t*)(\\-\\s.*)$')\n rgx_tp_prj = re.compile(r'^(\\t*)(\\s*)([^-\\s].*\\:)$')\n\n def _read_tags(dct_node):\n \"\"\" Store the key-value pairs and key list\n and return text leaving in-line tags in place\n but pruning off any tags at the end of the line\n \"\"\"\n str_text = dct_node[ATT_TEXT]\n\n bln_mode = False\n str_point = str_text\n\n # and then digest all tags, right to left, eating terminal tags.\n str_s_point = str_point.rstrip()\n i_end = len(str_s_point)\n lst_keys = []\n lst_not_duplicate = []\n rgx_tag = re.compile(RGX_TP_TAG)\n lst_matches = [_ for _ in rgx_tag.finditer(str_s_point)]\n for o_match in lst_matches:\n str_key = o_match.group(2)\n # Valid key assignment ? or a duplicate ?\n if str_key not in lst_keys:\n lst_keys.append(str_key)\n var_value = o_match.group(3)\n if var_value != None: #treat simple keys as boolean flags\n dct_node[ATT_TAGS][str_key] = var_value\n else:\n dct_node[ATT_TAGS][str_key] = ''\n lst_not_duplicate.append(True)\n else:\n lst_not_duplicate.append(False)\n\n # and now shed any string of non-duplicate tags from the end\n for i in reversed(range(len(lst_matches))):\n o_match = lst_matches[i]\n if lst_not_duplicate[i]:\n if i_end == o_match.end():\n i_end = o_match.start()\n else:\n break\n else:\n break\n\n\n # store any keys in textual order,\n lng_keys = len(lst_keys)\n if lng_keys:\n if lng_keys > 1:\n dct_node[ATT_TAG_NAMES] = lst_keys\n else:\n dct_node[ATT_TAG_NAMES] = lst_keys\n # and assign any remaining text\n if bln_mode or lng_keys:\n dct_node[ATT_TEXT] = str_s_point[0:i_end]\n\n\n def _set_tp_node(dct_node, var_type, o_match):\n \"\"\"set TP node properties by reference\"\"\"\n bln_empty = False\n if var_type != TYP_NOTE:\n dct_node[ATT_TYPE] = var_type\n if var_type != TYP_PROJ: # strip prefix\n dct_node[ATT_TEXT] = o_match.group(2)[2:]\n else: # or suffix\n dct_node[ATT_TEXT] = o_match.group(2) + o_match.group(3)[:-1]\n else:\n # str_text = dct_node[ATT_LINE].lstrip()\n dct_node[ATT_TEXT] = dct_node[ATT_TEXT].lstrip()\n if dct_node[ATT_LINE].lstrip() == '':\n dct_node[ATT_TYPE] = TYP_EMPTY\n bln_empty = True\n\n if not bln_empty:\n lng_indent = len(o_match.group(1))\n if lng_indent:\n dct_node[ATT_INDENT] = lng_indent\n\n str_vanilla = TYP_NOTE\n\n\n lst_nodes = [\n {ATT_ID:0, ATT_PARENT: None, ATT_LEVEL:0,\n ATT_CHILD_INDEX: None, ATT_INDENT:None, ATT_TYPE:TYP_ROOT,\n ATT_LINE_INDEX:None, ATT_TEXT_INDEX:None, ATT_TEXT:'',\n ATT_LINE:'', ATT_TAG_NAMES:[], ATT_TAGS:{},\n ATT_CHILN:[], ATT_PATH:[]}\n ] + [\n {ATT_ID:i+1, ATT_TYPE:str_vanilla, ATT_LINE:str_line,\n ATT_LINE_INDEX:i, ATT_TEXT:str_line, ATT_INDENT:0, ATT_TAGS:{},\n ATT_LEVEL:0, ATT_TAG_NAMES:[], ATT_CHILN:[], ATT_PATH:[]}\n for i, str_line in\n enumerate(str_in.splitlines())\n ]\n\n\n # MAIN PARSE LOOP TO DERIVE TYPE, AND OTHER ATTRIBUTES OF EACH NODE\n\n lng_txt = 0\n for dct_node in lst_nodes[1:]:\n # Maintain an index into the text\n # (Note that [ATT_ID] serves as a 1-based index to the lines)\n dct_node[ATT_TEXT_INDEX] = lng_txt\n\n str_point = dct_node[ATT_LINE]\n lng_chars = len(str_point)\n lng_txt += (lng_chars + 1) # splitlines is dropping \\n\n\n # IDENTIFY THE INDENT COUNT & NESTING LEVEL\n # Assume Note text until there is counter-evidence\n if lng_chars < 1:\n dct_node[ATT_TYPE] = TYP_EMPTY\n else:\n _read_tags(dct_node)\n str_point = dct_node[ATT_TEXT]\n o_match = rgx_tp_prj.match(str_point)\n\n if o_match != None:\n _set_tp_node(dct_node, TYP_PROJ, o_match)\n else:\n o_match = rgx_tp_tsk.match(str_point)\n if o_match != None:\n _set_tp_node(dct_node, TYP_TASK, o_match)\n else:\n o_match = rgx_body.match(str_point)\n if o_match != None:\n _set_tp_node(dct_node, TYP_NOTE, o_match)\n else:\n print \"Unexpected TP pattern:\" + str_point\n\n\n # Now that we know the provisional type of each node,\n # digest any infixed or postfixed tags\n # DETECT ANY REMAINING EMPTIES BEFORE WE TAKE OUT MODES & TAGS\n if dct_node[ATT_TYPE] != TYP_EMPTY:\n str_line = dct_node[ATT_LINE]\n str_rs_line = str_line.rstrip()\n if str_rs_line == '':\n dct_node[ATT_TEXT] = ''\n if dct_node[ATT_TYPE] == TYP_NOTE:\n dct_node[ATT_TYPE] = TYP_EMPTY\n\n return lst_nodes", "def map2mw_Des(d,k1,entry):\n if k1 in map2mw_special_Des:\n return map2mw_special_Des[k1]\n regexes = [\n u'<ab>dés.</ab> de {%(.*?)%}',\n u'<ab>dés.</ab> {%(.*?)%}',\n u'<ab>dés.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'", "def parseGeno(this_groups, proportions) -> str:\n subclades = []\n clades = []\n primary = []\n for group in this_groups:\n level = len(group.split(\".\"))\n if level == 5:\n subclades.append(group)\n if level == 4:\n subclades.append(group)\n if level == 3:\n subclades.append(group)\n elif level == 2:\n clades.append(group)\n elif level == 1:\n primary.append(group)\n\n # fix 4.3.1/4.3.1.1/4.3.1.2/4.3.1.P1/4.3.1.3 nesting\n if ('4.3.1.3' in subclades) and ('4.3.1' in subclades):\n subclades.remove('4.3.1')\n if ('4.3.1.1' in subclades) and ('4.3.1' in subclades):\n subclades.remove('4.3.1')\n if ('4.3.1.2' in subclades) and ('4.3.1' in subclades):\n subclades.remove('4.3.1')\n if ('4.3.1.1.P1' in subclades) and ('4.3.1' in subclades):\n subclades.remove('4.3.1')\n if ('4.3.1.1.P1' in subclades) and ('4.3.1.1' in subclades):\n subclades.remove('4.3.1.1')\n\n # fix 3.3.2.Bd nesting\n if ('3.3.2.Bd1' in subclades) and ('3.3.2' in subclades):\n subclades.remove('3.3.2')\n if ('3.3.2.Bd2' in subclades) and ('3.3.2' in subclades):\n subclades.remove('3.3.2')\n\n # fix 2.3, 2.2 nesting\n if ('2.2' in clades) and ('2.3' in clades):\n clades.remove('2.2')\n\n # fix 3.5.3, 3.5.4 nesting\n if ('3.5.3' in subclades) and ('3.5.4' in subclades):\n subclades.remove('3.5.3')\n\n # fix 2.3.1, 2.3.3 nesting\n if ('2.3.1' in subclades) and ('2.3.2' in subclades):\n subclades.remove('2.3.2')\n\n # fix 2.3.1, 2.3.3 nesting\n if ('2.3.5' in subclades) and ('2.3.3' in subclades):\n subclades.remove('2.3.3')\n\n # fix primary clades relative to CT18 = 3.2.1, ie has clade1, clade2, clade3 SNPs\n if len(primary) == 1:\n if '3' in primary:\n primary = ['2'] # clade 2 differs from CT18 by the clade3-defining SNP\n # note other option is clade 4 snp, which defines clade 4 relative to CT18\n elif len(primary) == 2:\n if ('2' in primary) and ('3' in primary):\n primary = ['1'] # clade 2 differs from CT18 by the clade3-defining SNP\n elif len(primary) == 0:\n primary = ['3']\n elif len(primary) == 3:\n if ('1' in primary) and ('2' in primary) and ('3' in primary):\n primary = ['0']\n\n # fix clade relative to CT18:\n if '3.2' in clades:\n clades.remove('3.2') # anything NOT in 3.2 will have this SNP\n else:\n if len(clades) == 0:\n clades.append('3.2') # anything with no clade, and 3.2 SNP not called, belongs in 3.2 with CT18\n\n # fix 3.5.3, 3.5.4 nesting\n if ('3.5.3' in clades) and ('3.5.4' in clades):\n clades.remove('3.5.3')\n\n # fix subclades relative to CT18:\n if '3.2.1' in subclades:\n subclades.remove('3.2.1') # anything NOT in 3.2.1 will have this SNP\n else:\n if len(subclades) == 0:\n subclades.append('3.2.1') # anything with no subclade, and 3.2.1 SNP NOT called, belongs in 3.2.1 with CT18\n\n # add zero-th clade/subclade where unresolved -- disabled\n # if len(clades) == 0:\n #\tif len(primary) == 1:\n #\t\tclades.append(primary[0] + '.0')\n # if len(subclades) == 0:\n # if len(clades) == 1:\n #\t\tsubclades.append(clades[0] + '.0')\n\n # store final genotype, to the lowest level available\n final_geno = primary[0]\n if len(clades) > 0:\n final_geno = ','.join(clades)\n if len(subclades) > 0:\n final_geno = ','.join(subclades)\n\n # add proportion of reads supporting each of these groups\n p_prod = 1\n\n p_sub = []\n for group in subclades:\n if group in proportions:\n p_sub.append(str(round(proportions[group], 2)))\n p_prod = p_prod * proportions[group]\n\n p_cl = []\n for group in clades:\n if group in proportions:\n p_cl.append(str(round(proportions[group], 2)))\n p_prod = p_prod * proportions[group]\n\n p_pr = []\n for group in primary:\n if group in proportions:\n p_pr.append(str(round(proportions[group], 2)))\n p_prod = p_prod * proportions[group]\n\n # final call\n info = final_geno + '\\t'\n if 'A' in proportions:\n info += 'A' # annotate as 'A' to indicate this comes from assembled data and not reads\n else:\n info += str(round(p_prod, 2)) # indicate proportion of reads supporting this call\n\n # level calls\n info += '\\t' + ','.join(subclades) + '\\t' + ','.join(clades) + '\\t' + ','.join(primary)\n\n # level proportions\n info += '\\t' + ','.join(p_sub) + '\\t' + ','.join(p_cl) + '\\t' + ','.join(p_pr)\n\n return info", "def make_text(chains):\n words = []\n not_end_of_list = True\n # your code goes here\n \n # starts with a capital lettered word from source text\n capitalized_ngrams = []\n for key in chains.keys():\n # check if the first tuple in key[0][0]\n if key[0][0].isupper():\n capitalized_ngrams.append(key)\n \n \n\n selected_keys = list(capitalized_ngrams)\n count = 0\n while not_end_of_list:\n choice_n = choice(selected_keys)\n\n if count == 0:\n words.extend(choice_n)\n \n if chains[choice_n] and count <= 150:\n # as long as there is an option, picks a random element from dict list\n choose_next = choice(chains[choice_n])\n # adds new word to list\n words.append(choose_next)\n # creates a list of keys whose last item in tuple is item from list\n selected_keys = [x for x in chains.keys() if x == tuple([*choice_n[1:], choose_next])]\n # it is possible continues\n if selected_keys:\n pass\n else:\n not_end_of_list = False\n\n count += 1\n \n else:\n not_end_of_list = False\n\n return \" \".join(words)", "def tab2reactions(convtab):\n \n rxnList = dict()\n \n for line in convtab:\n \n line = str(line)\n line = line.replace('\\n','')\n line = line.replace('\"','')\n vec = line.split('\\t') # 0 ReactionID (r_xxxx), 1 Reversibility (bool), 2 type(rct, act, inh),\n #3 substrateId (t_xxxx) , 4 bindingSite (double),5 stoi(int),6 hill (int),\n \n rxnID = vec[0].replace('r_','')\n \n if not(rxnID in rxnList.keys()):\n rxnList[rxnID] = dict()\n rxnList[rxnID]['rev'] = vec[1]\n rxnList[rxnID]['rct'] = dict()\n rxnList[rxnID]['act'] = dict()\n rxnList[rxnID]['inh'] = dict()\n \n # go through the substrate, if not yet present in the dictionary add them\n # each substrate is a dictionary with two values: active site and stoichiometry\n # a suffix is appended if the kinetic parameter varies across isoenzymes\n \n metID = nr2id(vec[3].replace('t_',''))\n if vec[2] == 'rct':\n if not(metID in rxnList[rxnID].keys()):\n rxnList[rxnID]['rct'][metID] = dict()\n rxnList[rxnID]['rct'][metID]['actsit'] = float(vec[4])\n rxnList[rxnID]['rct'][metID]['stoi'] = int(vec[5])\n if str(vec[8]) != 'NA':\n rxnList[rxnID]['rct'][metID]['suffix'] = '_'+vec[8]\n else:\n rxnList[rxnID]['rct'][metID]['suffix'] = ''\n else:\n raise NameError(metID + ' twice in table for reaction ' + rxnID)\n \n # activator and inhibitor dictionaries are added to their respective\n # rection-specific dictionaries\n # a suffix is appended to distinguish reaction species and regulators in case\n # they are the same metabolite and to allow for kinetic parameters which vary\n # between isoenzymes\n \n # activators\n elif vec[2] == 'act':\n rxnList[rxnID]['act'][metID] = dict()\n rxnList[rxnID]['act'][metID]['hill'] = float(vec[6])\n rxnList[rxnID]['act'][metID]['actsit'] = float(vec[4])\n rxnList[rxnID]['act'][metID]['subtype'] = vec[7].lower()\n if str(vec[8]) != 'NA':\n rxnList[rxnID]['act'][metID]['suffix'] = '_'+vec[2]+'_'+vec[8]\n else:\n rxnList[rxnID]['act'][metID]['suffix'] = '_'+vec[2]+''\n \n # inhibitors \n elif vec[2][0:3] == 'inh':\n rxnList[rxnID]['inh'][metID] = dict()\n rxnList[rxnID]['inh'][metID]['hill'] = float(vec[6])\n rxnList[rxnID]['inh'][metID]['actsit'] = float(vec[4])\n rxnList[rxnID]['inh'][metID]['subtype'] = vec[7].lower()\n if str(vec[8]) != 'NA':\n rxnList[rxnID]['inh'][metID]['suffix'] = '_'+vec[2]+'_'+vec[8]\n else:\n rxnList[rxnID]['inh'][metID]['suffix'] = '_'+vec[2]+''\n \n else:\n print(vec[2] + ' not a valid value for type.')\n \n return(rxnList)", "def mina2_reader():\n with open(MINA2_FILE_PATH, 'r') as voc_file:\n\n voc_list = []\n lesson_list = []\n\n voc_count = 0\n lesson_count = 0\n\n for voc_line in voc_file:\n if voc_line.find(\"第\") != -1 and voc_line.find(\"课\") != -1:\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n voc_list = []\n voc_count = 0\n lesson_count = lesson_count + 1\n elif voc_line != \"\\n\" and voc_line.find(\"会 話\") == -1 and voc_line.find(\"読み物\") == -1:\n voc_line.strip()\n\n voc_split = voc_line.split(\"\\t\")\n\n if len(voc_split) < 2:\n continue\n\n voc_dict = {\n \"Voc\": voc_split[0],\n \"Ext\": voc_split[1],\n \"Type\": \"\",\n \"Meaning\": voc_split[2]\n }\n\n if not voc_dict.has_key(\"Voc\"):\n print voc_line\n continue\n\n voc_count = voc_count + 1\n voc_list.append(voc_dict)\n\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n return lesson_list", "def reformatSS(rv):\n ss = []\n laneOut = []\n bcLens = []\n nLanes = 0\n\n for k, v in rv.items():\n ss.append(\"\\n\".join(v[0]))\n lanes = \"\"\n if len(v[1]) > 0:\n nLanes += len(v[1])\n lanes = \"_\".join([\"{}\".format(x) for x in sorted(list(v[1]))])\n laneOut.append(lanes)\n bcLens.append(v[2])\n\n if len(ss) < 2 and nLanes == 8:\n laneOut = None\n return ss, laneOut, bcLens", "def mina1_reader():\n with open(MINA1_FILE_PATH, 'r') as voc_file:\n\n voc_list = []\n lesson_list = []\n\n voc_count = 0\n lesson_count = 0\n\n for voc_line in voc_file:\n if voc_line.find(\"大家日语\") != -1:\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n voc_list = []\n voc_count = 0\n lesson_count = lesson_count + 1\n elif voc_line != \"\\n\":\n voc_line.strip()\n\n voc_split = voc_line.split(\"\\t\")\n while '' in voc_split:\n voc_split.remove('')\n\n if len(voc_split) < 3:\n continue\n\n voc_dict = {\n \"Ext\": voc_split[0],\n \"Voc\": voc_split[1],\n \"Type\": \"\",\n \"Meaning\": voc_split[2]\n }\n\n if not voc_dict.has_key(\"Voc\"):\n print voc_line\n continue\n\n voc_count = voc_count + 1\n voc_list.append(voc_dict)\n\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n return lesson_list", "async def mm(self):\n mm = [\n \"Melon et Melèche vont a la pêche. Melon prend les hameçons et Melèche la gaule :joy:\",\n \"Melon et Melèche sont à la pêche. Melon pêche le thon et Melèche la raie :joy:\",\n \"Melon et Melèche achètent une vieille maison. Melon la répare et Melèche l'habite :joy:\",\n \"Melon et Melèche veulent débuter dans l'apiculture. Melon achète la ruche et Melèche l'essaim :joy:\",\n \"Melon et Melèche apprennent la navigation à voile. Melon étudie le vent, et Melèche le noeud :joy:\",\n \"Melon et Melèche se font attaquer par un chien. Melon lui tire les oreilles et Melèche la queue :joy:\",\n \"Melon et Melèche attrapent une abeille. Melon lui arrache les pattes et Melèche le dard :joy:\",\n \"Melon et Melèche sont au bal. Melon regarde les rockeuses et Melèche les valseuses :joy:\",\n \"Melon et Melèche sont à la mer et trouvent des coquillages. Melon garde la coquille et Melèche la moule :joy:\",\n \"Melon et Melèche sont au port. Melon regarde le bateau et Melèche la bite :joy:\",\n \"Melon et Melèche font l'autopsie d'une personne assassinée. Melon examine les coups de couteau et Melèche le trou de balle :joy:\",\n \"Melon et Melèche sont boulangers. Melon enfourne les baguettes et Melèche les miches :joy:\",\n \"Melon et Melèche sont a Bricomarché. Melon prend le raccord et Melèche le clou :joy:\",\n \"Melon et Melèche sont charcutiers. Melon prépare les filets et Melèche les rognons :joy:\",\n \"Melon et Melèche partagent une glace. Melon mange le cornet et Melèche les boules :joy:\",\n \"Melon et Melèche vont chez le poissonnier. Melon achète les sardines et Melèche la raie :joy:\",\n \"Melon et Melèche hissent un drapeau. Melon tient le tissu et melèche le poteau :joy:\",\n \"Melon et Melèche sont en forêt, au pied d'un chêne. Melon ramasse une feuille et Melèche le gland :joy:\",\n \"Melon et Melèche sont viticulteurs. Melon récolte le raisin et Melèche la grappe :joy:\",\n \"Melon et Melèche construisent un bateau. Melon travaille le bois et Melèche le jonc :joy:\",\n \"Melon et Melèche sont électriciens. Melon casse le fusible et Melèche l'ampoule :joy:\",\n \"Melon et Melèche embêtent un chien. Melon lui tire les poils et Melèche la queue :joy:\",\n \"Melon et Melèche pilotes de ligne. Melon surveille les instruments et Melèche le manche :joy:\",\n \"Melon et Melèche ont envie de se fumer un petit pet. Melon achète les feuilles et Melèche le bout :joy:\",\n \"Melon et Melèche arrosent les fleurs. Melon prend l’arrosoir et Melèche le tuyau :joy:\",\n \"Melon et Melèche fabriquent une ampoule. Melon amène le culot Melèche le plot :joy:\",\n \"Melon et Melèche font des filetages de boulons. Melon fait l'excentrique et Melèche le concentrique :joy:\",\n \"Melon et Melèche font de la mécanique. Melon place la vis et Melèche la rondelle :joy:\",\n \"Melon et Melèche font la vaisselle. Melon lave les assiettes et melèche les poêles :joy:\",\n \"Melon et Melèche jouent à la pétanque. Melon jette le cochonnet et Melèche les boules :joy:\",\n \"Melon et Melèche préparent le billard. Melon nettoie les boules et Melèche la queue :joy:\",\n \"Melon et Melèche jouent au Scrabble. Melon pioche le W et Melèche le Q :joy:\",\n \"Melon et melèche sont a l'animalerie. Melon prend le chat et melèche la chatte :joy:\",\n \"Melon et Melèche sont mariés à la même femme. Melon la saute et Melèche la trompe :joy:\",\n \"Melon et Melèche dorment à la belle étoile. Melon regarde les étoiles et Melèche la lune :joy:\",\n \"Melon et Melèche agrèssent un policier. Melon lui vole le képi et Melèche la trique :joy:\",\n \"Melon et melèche vont au jardin. Melon cherche les tomates et Melèche le poireau :joy:\",\n \"Melon et Melèche boivent l'apéro. Melon mange les gâteaux apéro et Melèche le saucisson :joy:\",\n \"Melon et Melèche préparent une sauce. Melon la cuisine et Melèche la goûte :joy:\",\n \"Melon et Melèche s'occupent du jardin. Melon fait le gazon et Melèche la fleur :joy:\",\n \"Melon et Melèche sont contrôleurs de train. Melon contrôle la tête et Melèche la queue :joy:\",\n \"Melon et Melèche entrent dans une boucherie. Melon achète la côtelette et Melèche la saucisse :joy:\",\n \"Melon et Melèche restaurent un vieux vélo. Melon répare le guidon et Melèche la béquille :joy:\",\n \"Melon et Melèche font une tarte aux pruneaux. Melon prépare la tarte et Melèche les pruneaux :joy:\",\n \"Melon et Melèche partagent une baguette. Melon mange la mie et Melèche le croûton :joy:\",\n \"Melon et Melèche s’en vont. Melon l’est plus là et Melèche l’est parti :joy:\",\n \"Melon et Melèche ont de grandes oreilles. Melon est lapin et Melèche lapine :joy:\",\n \"Melon et Melèche cherchent un mot. Melon consulte le Larousse et Melèche le Robert :joy:\",\n \"Melon et Melèche veulent planter un parasol. Melon apporte la toile et Melèche le piquet :joy:\",\n \"Melon et Melèche ont leur première relation sexuelle. Melon fait le réticent et Melèche le consentant :joy:\",\n \"Melon et Melèche prennent leur 4 heure. Melon mange les figues et Melèche les noix :joy:\",\n \"Melon et Melèche réparent un pneu. Melon le gonfle et Melèche le troue :joy:\",\n \"Melon et Melèche sont fans des rôles de Roger Moore. Melon aime James Bond et Melèche Le Saint :joy:\",\n \"Melon et Melèche s'échappent de prison. Melon force la serrure et Melèche le barreau :joy:\",\n \"Melon et Melèche sont à Eurodisney. Melon fait Space Mountain et Melèche encore la queue :joy:\",\n \"Melon et Melèche sont à l'Assemblée. Melon vote la poursuite des essais nucléaires et Melèche l'arrêt :joy:\",\n \"Melon et Melèche sont chausseurs. Melon vend les souliers et Melèche les bottes :joy:\",\n \"Melon et Melèche cueillent une fleur. Melon observe les pétales et Melèche le nectar :joy:\",\n \"Melon et Melèche fabriquent une tirelire. Melon fait le moule et Melèche la fente :joy:\",\n \"Melon et Melèche veulent faire des crêpes. Au supermarché, Melon achète la farine et Melèche les oeufs :joy:\",\n \"Melon et Melèche sont victimes de maladies microbiennes et parasitaires. Melon peste et poux et Melèche lepre et puces (me lèche le prépuce) :joy:\",\n \"Melon et Melèche vont à l'église. Melon cherche le curé et Melèche les Saints :joy:\",\n \"Melon et Melèche vont voter. Melon cherche l'isoloir et Melèche la fente :joy:\",\n \"Melon et Melèche voyagent en Angleterre. Melon visite Londres et Melèche l'Essex :joy:\"\n ]\n print('Blague melon & meleche')\n await self.bot.say(random.choice(mm))", "def getMatch(data):\n if len(data) > 15:\n return 'date: {0} {1}, match => {2}, {3}, {4}| 1x2 => {5}, {6}, {7}| handicap => {8}, {9}, {10}, {11}| OU => {12}, {13}, {14}, {15}'.format(data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16])\n return 'date: {0} {1}, match => {2}, {3}, {4}| handicap => {5}, {6}, {7}, {8}| OU => {9}, {10}, {11}, {12}'.format(data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13])", "def hyou_reader():\n with open(HYOU_FILE_PATH, 'r') as voc_file:\n\n voc_list = []\n lesson_list = []\n\n voc_match = [\n re.compile(r\"^(\\S+)\\s*((\\S*))\\s*〔(\\S*)〕\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*((\\S*))\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*〔(\\S+)〕\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*(\\S+)\")\n ]\n\n voc_key = [\n {\"Voc\": 1, \"Ext\": 2, \"Type\": 3, \"Meaning\": 4},\n {\"Voc\": 1, \"Ext\": 2, \"Type\": 0, \"Meaning\": 3},\n {\"Voc\": 1, \"Ext\": 0, \"Type\": 2, \"Meaning\": 3},\n {\"Voc\": 1, \"Ext\": 0, \"Type\": 0, \"Meaning\": 2},\n ]\n\n match_count = len(voc_match)\n voc_count = 0\n lesson_count = 0\n\n for voc_line in voc_file:\n if voc_line.find(\"第\") != -1 and voc_line.find(\"课\") != -1:\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n voc_list = []\n voc_count = 0\n lesson_count = lesson_count + 1\n sound_list = sound_reader(lesson_count)\n elif not voc_line.find(\"----\") != -1 and voc_line != \"\\n\":\n voc_line.strip()\n\n voc_dict = {}\n for i in range(0, match_count):\n voc_group = voc_match[i].match(voc_line)\n if voc_group:\n for key, value in voc_key[i].items():\n if value != 0:\n voc_dict[key] = voc_group.group(value)\n else:\n voc_dict[key] = \"\"\n break\n\n if not voc_dict.has_key(\"Voc\"):\n print voc_line\n continue\n\n voc_dict[\"Time\"] = sound_list[voc_count]\n voc_count = voc_count + 1\n voc_list.append(voc_dict)\n\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n return lesson_list", "def trigrams(content):\n for item in range(len(content) - 2):\n buld_tupls = tuple(contents[item: item + 2])\n # test to make sure that two items are in a tuple\n # print(buld_tupls)\n # print(type(buld_tupls))", "def record(aline):\n # delect the Illustration\n bodylst = aline.split('[Illustration]') \n linestr = functools.reduce(lambda x, y : x + y, bodylst) \n \n # handle punctuation & capitalization\n punclst = [i for i in string.punctuation]\n punclst.remove(\"'\") \n punclst.remove('-')\n for i in punclst:\n linestr = linestr.replace(i,' ')\n alst = linestr.split()\n txtlst = [i.lower() for i in alst]\n # Considering the capitalization\n \n # record the words found and update worddict\n for j in txtlst:\n if j not in worddict:\n worddict.update({j:1})\n else:\n worddict[j] += 1", "def abstract(res):\n o = r'\\begin{center}'\n #o += r'\\begin{tikzpicture}[x={(-0.5cm,-0.5cm)}, y={(0.966cm,-0.2588cm)}, z={(0cm,1cm)}, scale=0.6, color={lightgray},opacity=.5]'\n #o += r'\\tikzset{facestyle/.style={fill=lightgray,draw=black,very thin,line join=round}}'\n o += r'\\begin{tikzpicture}' + '\\n'\n o += r'\\draw[blue!40!white] (0,0) rectangle (6,4); \\fill[blue!10!white] (2.4,4) rectangle (3.6,4.15);' + '\\n'\n for i in range(res[7]):\n x,y = random.randrange(2,538)/100.0,random.randrange(2,300)/100.0\n o += r'\\filldraw[fill=green!30!white,draw=white] (%s,%s) rectangle (%s,%s);'%(x,y,(x+0.58),(y+0.38)) + '\\n'\n o += r'\\filldraw[white] (%s,%s) -- (%s,%s);'%((x+0.29),(y+0.19),(x+0.58),(y+0.38)) + '\\n'\n o += r'\\filldraw[white] (%s,%s) -- (%s,%s);'%(x,(y+0.38),(x+0.29),(y+0.19)) + '\\n'\n o += r'\\end{tikzpicture}\\end{center}' + '\\n'\n\n o += r'\\begin{tabular}{|l|l|}' + '\\n' + r'\\hline' + '\\n'\n o += r'Election name& \\texttt{\"%s\"}\\\\'%res[1]\n o += r'Box number& \\texttt{%s}\\\\'%res[2]\n o += r'State& \\texttt{%s}\\\\'%('closed' if res[5] == cloSt else ('vote' if res[5] == votSt else 'register'))\n o += r'Registration end& \\textit{%s}\\\\'%res[3]\n o += r'Vote closing& \\textit{%s}\\\\'%res[4]\n o += r'Casted/registered& $%s/%s$\\\\'%(res[7],res[6]) +'\\n'\n o += r'\\hline\\end{tabular}' + '\\n'\n o += r'\\quad' + '\\n'\n o += r'\\begin{tabular}{|l|l|}\\hline'\n l = res[8].items()\n l.sort(key = operator.itemgetter(1),reverse=True)\n n = 0\n for x in l:\n o += r'%s & %s\\\\'%(r'\\textit{%s}'%x[0] if x[0] == 'White Ballot' else r'\\texttt{%s}'%x[0],x[1]) \n if n == 0:\n o += r'\\hline' + '\\n'\n n += 1\n o += r'\\hline\\end{tabular}' + '\\n'\n\n\n #o += r'\\item The designer\\textquoteright s digital signature of this document (the source file \\texttt{oeu.py}) is: \\tiny $$\\verb!%s!$$ \\normalsize'%rsa(IKey).sign(open(__file__).read()) + '\\n'\n\n return r'\\begin{abstract}' + abstract.__doc__ + r'\\end{abstract}' + o", "def scene_to_text(scenes):\n scene_text_dict = []\n scene_text_list = []\n for i, scene in enumerate(scenes):\n if len(scene['frame_data']) == 0:\n break\n scene_image = Image.fromarray(scene['frame_data'])\n str_text = pytesseract.image_to_string(scene_image)\n #list_text = list(filter(('').__ne__, re.split(\" |\\n|, |. |:|.\\n|\\x0c\", str_text)))\n list_text = list(filter(('').__ne__, re.split(\" |\\n\", str_text)))\n bag_of_word = collections.Counter(list_text)\n scene_text_dict.append(\n {'start': scene['start'], \n 'end': scene['end'], \n 'bag_of_word': dict(bag_of_word)\n })\n scene_text_list.append(list_text)\n return scene_text_dict, scene_text_list", "def map2mw_F(d,k1,entry):\n if k1 in map2mw_special_F:\n return map2mw_special_F[k1]\n regexes = [\n u'<ab>f2.</ab> de {%(.*?)%}',\n u'<ab>f2.</ab> {%(.*?)%}',\n #u'<ab>f2.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'", "def subparse(block):\n\n verses = []\n context = None\n for char in block:\n\n if char == \"[\":\n if verses: verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n verses.append({\"surah\": \"\", \"verse\": \"\", \"quran\": \"\"})\n context = \"surah\"\n elif char == \":\" and context == \"surah\":\n verses[-1][\"surah\"] = int(verses[-1][\"surah\"])\n context = \"verse\"\n elif char == \"]\":\n verses[-1][\"verse\"] = int(verses[-1][\"verse\"])\n context = \"quran\"\n else: verses[-1][context] += char\n\n verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n return verses", "def parser(sent_list): #input: list of sentences", "def phrase_dict(phrase):\n switcher = {\n '처음으로': '닥앤미 병원을 찾아주셔서 감사합니다. 직접문의원할시 오른쪽 아래 1:1 버튼을 눌러주시면 직접 상담 가능합니다. 1:1 상담 가능 시간은 09시 – 18시 입니다.',\n '병원 정보': '어떤 정보를 보시고 싶으신가요?',\n '병원 위치': '“닥앤미 병원 주소는 서울시 용산구 이촌동 세움상가 2층입니다.” 더 자세한 지도확인을 원하실 경우 아래 버튼을 눌러주세요',\n '병원 운영시간': '닥앤미 병원을 찾아주셔서 감사합니다. 병원 운영시간은 위의 내용과 같습니다',\n '병원 프로모션': '현재 진행되고 있는 병원 프로모션입니다. 자세히 보길 원하시면 아래의 프로모션을 선택해 주세요',\n '프로모션 A': '닥앤미에서 6월 30일까지 제공되는 프로모션 A 입니다.',\n '프로모션 B': '닥앤미에서 6월 30일까지 제공되는 프로모션 B 입니다.',\n '프로모션 C': '닥앤미에서 6월 30일까지 제공되는 프로모션 C 입니다.',\n '의료진': '안녕하세요, 닥앤미의 홍길동 전문의 입니다. 항상 최선을 다하겠습니다.',\n '병원 사진': '최고의 진료를 제공하는 닥앤미 병원입니다.',\n '병원 진료과목': '닥앤미 병원의 진료과목입니다.',\n '병원 전화하기': '닥앤미 병원 전화번호는 02 3522 XXXX 입니다. 지금 통화를 원하시면 아래 버튼을 눌러주세요'\n }\n default_text = 'Unable to find appropriate text response'\n return switcher.get(phrase, default_text)", "def search_motif(sequences):\n motif = re.compile(r'(?=(N[^P](S|T)[^P]))') #N{P}[ST]{P}\n motif_index = {}\n\n for key,value in sequences.items():\n match_motif = re.finditer(motif, value)\n motif_start_list = []\n\n for i in match_motif:\n motif_start_list.append(str(i.start()+1))\n motif_index[key] = ' '.join(motif_start_list)\n return motif_index", "def get_grams(candidate, n):\n words = candidate.split(' ')\n # print(words)\n grams = list()\n for i in range(len(words) - n + 1):\n # print(words[i:i+n])\n grams.append(' '.join(words[i:i+n]))\n return grams", "def feature_dict(sent, i):\n palabra=sent[i] #suponinedo que al menos tiene una palabra\n especiales= [\"á\",\"é\",\"í\",\"ó\",\"ú\", \"ü\"] #solo chequeo minusculas porque pregunto sobre el lower del string\n\n #sobre la anterior\n if i==0: #primera de la oracion\n alower=\"\"\n aistitle=False\n aisupper=False\n aisnumeric=False\n aisplural=False\n #aunder=False\n aislower=False\n aespecial=False\n else:\n alower = sent[i-1].lower()\n aistitle = sent[i-1].istitle()\n aisupper = sent[i-1].isupper()\n aisnumeric = sent[i-1].isnumeric()\n aisplural= (sent[i-1][-1:].lower() == 's')\n #aunder= (sent[i-1].find('_') >= 0)\n aislower = sent[i-1].islower()\n aespecial = (1 in [c in sent[i-1].lower() for c in especiales]),\n\n #sobre la proxima\n if i==len(sent)-1: #si es la ultima\n plower = \"\"\n pistitle = False\n pisupper = False\n pisnumeric = False\n pisplural= False\n #punder=False\n pislower = False\n pespecial = False\n else:\n plower = sent[i + 1].lower()\n pistitle = sent[i + 1].istitle()\n pisupper = sent[i + 1].isupper()\n pisnumeric = sent[i + 1].isnumeric()\n pisplural= (sent[i + 1][-1:].lower() == 's')\n #punder = (sent[i + 1].find('_') >= 0)\n pislower = sent[i + 1].islower()\n pespecial = (1 in [c in sent[i+1].lower() for c in especiales]),\n\n return {\n 'lower': palabra.lower(),\n 'istitle': palabra.istitle(),\n 'isupper': palabra.isupper(),\n 'isnumeric': palabra.isnumeric(),\n 'isplural': (palabra[-1:].lower() == 's'),\n #'under': (palabra.find('_') >= 0),\n 'islower': palabra.islower(),\n 'especial': (1 in [c in palabra.lower() for c in especiales]),\n 'alower': alower,\n 'aistitle': aistitle,\n 'aisupper': aisupper,\n 'aisnumeric': aisnumeric,\n 'aisplural': aisplural,\n #'aunder': aunder,\n 'aespecial': aespecial,\n 'aislower': aislower,\n 'plower': plower,\n 'pistitle': pistitle,\n 'pisupper': pisupper,\n 'pisnumeric': pisnumeric,\n 'pisplural': pisplural,\n #'punder': punder,\n 'pislower': pislower,\n 'pespecial': pespecial,\n }", "def make_chains(text_string, n):\n text_list = text_string.split()\n\n chains = {}\n\n for i in range(n):\n if text_list[i][0].isupper():\n starters = chains.get('START', [])\n starters.append(text_list[i:i+n])\n chains['START'] = starters\n\n # your code goes here\n for i in range(len(text_list)-n):\n n_gram = tuple(text_list[i:i+n])\n\n #bigram = (text_list[i], text_list[i+1])\n\n followers = chains.get(n_gram, [])\n followers.append(text_list[i+n])\n\n if n_gram[-1][-1] in {'.', '?', '!'}:\n followers.append('EOF')\n\n chains[n_gram] = followers\n\n if text_list[i+n][0].isupper():\n starters = chains.get('START', [])\n starters.append(text_list[i+n:i+(2*n)])\n chains['START'] = starters\n\n return chains", "def map2mw_Aug(d,k1,entry):\n L = entry.metad['L']\n if L in ['7201','7202']: # 7203 relates to 'hay'\n return 'hA'\n if k1 in map2mw_special_Aug:\n return map2mw_special_Aug[k1]\n regexes = [\n u'<ab>aug.</ab> de {%(.*?)%}',\n u'<ab>aug.</ab> {%(.*?)%}',\n u'<ab>aug.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'", "def kwiq(word, text, num = 3):\r\n arr = []\r\n dic = {}\r\n key = 0\r\n textspl = text.split()\r\n for part in textspl:\r\n dic[key] = part\r\n key += 1\r\n for q in list(dic.keys()):\r\n new_elem = dic[q].strip(',.?!:;')\r\n if new_elem == word or new_elem.lower() == word:\r\n string = ''\r\n for b in range(q-num, q+num+1):\r\n if b in dic:\r\n if b == q-1 or b == q:\r\n string += dic[b] + ' '\r\n else:\r\n string += dic[b] + ' '\r\n arr.append(string)\r\n return(arr)", "def form_bios(paired_plant_match_in, text_in):\r\n\ttagged_corpus =[]\r\n\r\n\t# used sorted and filtered indices to bio tag corpus\r\n\tfor i, word in enumerate(text_in): # iterate over words in corpus\r\n\t\tif word: # if not white space\r\n\t\t\tfind = paired_plant_match_in.get(i) # check if word index is in plant match dict keys\r\n\t\t\t#print(find)\r\n\t\t\tif find: # if match\r\n\t\t\t\ttagged_corpus.append(word + ' ' + str(find)) # append word to list WITH plant match dict value tag \r\n\t\t\telse:\r\n\t\t\t\ttagged_corpus.append(word + ' O') # else append word to list with 'O' tag\r\n\r\n\t\telse:\r\n\t\t\ttagged_corpus.append(word)\r\n\r\n\treturn tagged_corpus", "def get_vos(mappings):\n regex = re.compile(\"^/(\\w+)/\")\n patterns = (m.pattern for m in mappings)\n matches = filter(None, (regex.match(p) for p in patterns))\n vo_groups = set(m.group(1).lower() for m in matches)\n\n return vo_groups", "def surface_segment_data_preparation(word_dictionary: {str, str}):\n X = []\n Y = []\n words = []\n for word in word_dictionary:\n word_list = []\n word_label_list = []\n for i in range(len(word)):\n gram_dict = {}\n gram_arr = []\n\n ### Unigram\n # gram_dict[word[i]] = 1\n gram_dict[\"uni_\" + word[i]] = 1\n gram_arr.append(word[i])\n\n ### BIGRAM\n try:\n tmp = word[i - 1: i + 1]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 2:\n gram_dict[\"bi_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n try:\n tmp = word[i: i + 2]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 2:\n gram_dict[\"bi_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n ### TRIGRAM\n try:\n tmp = word[i - 1: i + 2]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 3:\n gram_dict[\"tri_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n ## FourGram\n try:\n tmp = word[i - 1: i + 3]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 4:\n gram_dict[\"four_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n try:\n tmp = word[i - 2: i + 2]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 4:\n gram_dict[\"four_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n ## FiveGram\n try:\n tmp = word[i - 2: i + 3]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 5:\n gram_dict[\"five_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n ## SixGram\n try:\n tmp = word[i - 3: i + 3]\n if tmp:\n if len(tmp) == 6:\n # gram_dict[tmp] = 1\n gram_dict[\"six_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n try:\n tmp = word[i - 2: i + 4]\n if tmp:\n if len(tmp) == 6:\n # gram_dict[tmp] = 1\n gram_dict[\"six_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n if word[i] in 'aeiou':\n gram_dict[\"vowel\"] = 1\n else:\n gram_dict[\"const\"] = 1\n\n if word[i].isupper():\n gram_dict[\"upper\"] = 1\n else:\n gram_dict[\"lower\"] = 1\n\n word_list.append(gram_dict)\n word_label_list.append(word_dictionary[word][i])\n\n X.append(word_list)\n Y.append(word_label_list)\n words.append([char for char in word])\n return X, Y, words", "def create_vector(string):\n vec = {}\n words = string.split()\n\n for word in words:\n if len(word) <= NGRAM_SIZE:\n add(vec, word)\n else:\n for i in range(len(word) - NGRAM_SIZE + 1):\n add(vec, word[i : i + NGRAM_SIZE])\n\n return vec", "def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]", "def __init__(self): # TODO: consider storing all text in an actual text file and reading from it.\n\t\tself.start_text = '''\\nI'm at the entrance to the dungeon. I sure hope I find treasure inside, \\nand not anything nasty!\n\t\t'''\n\n\t\tself.empty_text = '''\\nI'm entering a large, dark room. Looking around, there appears to be nothing \\ninside other than dust, debris and more dust. This room is empty.'''\n\t\t\n\n\t\tself.monster_text = '''\\nI've entered a very dark room. Something is approaching...it's a Monster!\n\t\t'''\n\n\t\tself.treasure_text = '''\\nI'm standing in a room with a very high ceiling. There's an alter at the \\ncenter with something on top...it's treasure!'''\n\t\t\n\n\t\tself.exit_text = '''\\nI'm standing in a long, narrow corridor. There's a large, engraded gate at the \\nend of this passage. I think this must be the exit!'''\n\n\t\t# all the text entries stored in one dictionary, indexed by room type\n\t\t\n\t\tself.room_book = {'Start':self.start_text, 'Empty':self.empty_text, 'Monster':self.monster_text, 'Treasure':\n\t\t\tself.treasure_text, 'Exit':self.exit_text}", "def viterbi(sent, dqml, eqml, S, V_CASE=-1):\n\n if type(sent) is list:\n sent_words = sent\n else:\n sent_words = word_tokenize(sent)\n n = len(sent_words)\n\n # define and initialize PI table\n pi = defaultdict(Counter)\n pi[0]['*'] = 1\n bp = {}\n\n for k in range(1, n+1):\n bp[k] = {}\n for v in S:\n eml = compute_eml(V_CASE, eqml, k, sent_words, v)\n if k-1 is 0: # w e S_0 -> w = '*'\n qmlr = compute_qml(dqml, v, '*')\n pival = pi[0]['*'] * qmlr * eml\n pi[k][v] = pival\n bp[k][v] = '*'\n else: # for w e S_k, S_k = S\n max_S = None\n max_w = -1\n for w in S:\n qmlr = compute_qml(dqml, v, w)\n currmax = pi[k-1][w] * qmlr * eml\n if currmax > 0 and currmax > max_w:\n max_w = currmax\n max_S = w\n # if word is unknown use tag 'NN'\n if max_S is None:\n max_w = 0.0\n max_S = UNKNOWN_TAG\n pi[k][v] = max_w\n bp[k][v] = max_S\n\n # calculate y_n\n max_y = -1\n yn = None\n for v in S:\n nextmax = pi[n][v] * compute_propability('STOP', v, dqml)\n if nextmax > max_y:\n max_y = nextmax\n yn = v\n\n # calculate y_n-1....y1\n yk1 = yn\n tagSequence = list()\n tagSequence.append(yn)\n for k in range(n-1,0,-1):\n yk = bp[k+1][yk1]\n tagSequence.append(yk)\n yk1 = yk\n\n tagSequence.reverse()\n return tagSequence", "def features(sequence, i):\n seq = sequence[i].split(\"\\t\")[1]\n\n # first position in the sentence\n if i == 0:\n yield \"first\"\n\n if i == len(sequence) - 1:\n yield \"last\"\n\n # word's length\n yield \"len=\" + get_word_len(seq)\n\n # first 4 letters\n yield \"first_four_letters=\" + seq[:4] if len(seq) > 4 else seq\n\n # last 3 letters\n yield \"last_three_letters=\" + seq[-3:] if len(seq) > 3 else seq\n\n # word shape\n yield \"word_shape=\" + str(get_word_shape(seq))\n yield \"short_word_shape=\" + get_short_word_shape(seq)\n yield \"digits_count=\" + str(digits_count(seq))\n\n # currency\n if currency_pattern.search(seq):\n yield \"currency\"\n\n if has_affixes(seq):\n yield \"starts_with_affixes\"\n\n # contains -'its'\n if 'its' in seq or re.search(r'\\w+(tel|nik)', seq, re.I):\n yield \"with_tel_its\"\n\n # contains letter + 'к' suffix\n if re.search(r'\\w+[bjlmnpstvz]k', seq, re.I):\n yield \"with_k_suffix\"\n\n # contains letter + 'в' suffix\n if re.search(r'\\w+(st|z|o)v', seq, re.I):\n yield \"with_v_suffix\"\n\n if re.search(r'\\w+[eio]k', seq, re.I):\n yield \"with_eiok_suffix\"\n\n if re.search(r'\\w+stn', seq, re.I):\n yield \"with_stn_suffix\"\n\n if re.search(r'\\w+[dk]r', seq, re.I):\n yield \"with_dr_suffix\"\n\n if re.search(r'\\w+(sh|jj)k', seq, re.I):\n yield \"with_shk_suffix\"\n\n if re.search(r'\\w+[ln]`k', seq, re.I):\n yield \"with_lnk_suffix\"\n\n if re.search(r'l[aeio]?$', seq, re.I):\n yield \"ends_with_l\"\n\n # contains 'нн'\n if 'nn' in seq:\n yield \"with_nn\"\n\n # contains 'чн', 'чк'\n if 'chk' in seq or 'chn' in seq or 'schn' in seq:\n yield \"with_chk\"\n\n # contains letter + 'н' suffix\n if re.search(r'\\w+[jlmrstvz]n', seq, re.I):\n yield \"with_n_suffix\"\n\n # contains suffixes 'ющ', 'ящ', 'ищ', 'вш'\n if re.search(r'\\w+((y[au]|i)s?ch|vsh)', seq, re.I) or seq.endswith('v'):\n yield \"with_part_sch_suffixes\"\n\n # ends with 'ся'\n if seq.endswith(\"sya\") or seq.endswith('s\\''):\n yield \"ends_with_sya\"\n\n if seq.endswith('j') and len(seq) > 1 and is_vowel(seq[-2]):\n yield \"ends_with_j\"\n\n if seq.endswith('t') and len(seq) > 1 and is_vowel(seq[-2]):\n yield \"ends_with_t\"\n\n if seq.endswith('\\''):\n yield \"ends_with_apo\"\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n # previous word's length\n yield \"prev_len=\" + str(get_word_len(prev))\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n # last letters of the previous word\n yield \"prev_last_letters=\" + (prev[-3:] if len(prev) > 3 else prev)\n\n if i > 0:\n prev = sequence[i - 1].split(\"\\t\")[1]\n yield \"prev_short_word_shape=\" + get_short_word_shape(prev)\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n # next word's length\n yield \"next_len=\" + str(get_word_len(next))\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n # last letters of the next word\n yield \"next_last_letters=\" + (next[-3:] if len(next) > 3 else next)\n\n if i < len(sequence) - 1:\n next = sequence[i + 1].split(\"\\t\")[1]\n yield \"next_short_word_shape=\" + get_short_word_shape(next)", "def from_diagram(diagram: str) -> List['GridQubit']:\n lines = diagram.strip().split('\\n')\n no_qubit_characters = ['.', '-', ' ']\n qubits = []\n for row, line in enumerate(lines):\n for col, c in enumerate(line.strip()):\n if c not in no_qubit_characters:\n if not c.isalnum():\n raise ValueError(\"Input string has invalid character\")\n qubits.append(GridQubit(row, col))\n return qubits", "def list(self, frame=0):\n text = []\n if not self.number_of_variations:\n return \"\"\n for group_number in range(1, len(self.varexercise_numbers)+1):\n text.extend(\n self.group_list(group_number))\n return text", "def make_chains(text_string):\n\n chains = {}\n n = 2\n # n = int(raw_input(\"Enter the number of ngrams you want? >\"))\n # your code goes here\n words = text_string.split()\n for i in range(len(words) - 1):\n ngram = tuple(words[i: i + n])\n if i >= len(words) - n:\n chains[ngram] = None\n else:\n nth_word = words[i + n]\n if ngram not in chains:\n chains[ngram] = [nth_word]\n else:\n chains[ngram].append(nth_word)\n return (chains, n)", "def produced_by(entry):\n if \"ArtsEntRec\" in entry:\n return \"Arts Entertainment Recreation\"\n if \"DurableWholesaleTrucking\" in entry:\n return \"Durable Wholesale Trucking\"\n if \"Education\" in entry:\n return \"Education\"\n if \"ElectronicEquipment\" in entry:\n return \"Electronic Equipment\"\n if \"FoodBeverageStores\" in entry:\n return \"Food Beverage Stores\"\n if \"FoodNondurableWholesale\" in entry:\n return \"Food Nondurable Wholesale\"\n if \"HotelLodging\" in entry:\n return \"Hotel Lodging\"\n if \"MedicalHealth\" in entry:\n return \"Medical Health\"\n if \"Multifamily\" in entry:\n return \"Multifamily\"\n if \"NotElsewhereClassified\" in entry:\n return \"Not Elsewhere Classified\"\n if \"OtherManufacturing\" in entry:\n return \"Other Manufacturing\"\n if \"OtherRetailTrade\" in entry:\n return \"Other Retail Trade\"\n if \"PublicAdministration\" in entry:\n return \"Public Administration\"\n if \"Restaurants\" in entry:\n return \"Restaurants\"\n if \"ServicesManagementAdminSupportSocial\" in entry:\n return \"Services Management Administration Support Social\"\n if \"ServicesProfessionalTechFinancial\" in entry:\n return \"Services Professional Technical Financial\"\n if \"ServicesRepairPersonal\" in entry:\n return \"Services Repair Personal\"", "def __line_parse(index: int, line: list, dictionary: dict, word_list: list):\n\n if index + 2 >= len(line):\n return\n word_1 = line[index + 2]\n word_2 = line[index + 1]\n word_3 = line[index]\n if word_1 == \"\" or word_2 == \"\" or word_3 == \"\":\n return\n\n if word_1 not in dictionary:\n dictionary[word_1] = {\n str(word_1 + \"_1\"): {\n\n },\n str(word_1 + \"_2\"): {\n\n },\n str(word_1 + \"_3\"): {\n\n }\n }\n if word_2 not in dictionary:\n dictionary[word_2] = {\n str(word_2 + \"_1\"): {\n\n },\n str(word_2 + \"_2\"): {\n\n },\n str(word_2 + \"_3\"): {\n\n }\n }\n if word_3 not in dictionary:\n dictionary[word_3] = {\n str(word_3 + \"_1\"): {\n\n },\n str(word_3 + \"_2\"): {\n\n },\n str(word_3 + \"_3\"): {\n\n }\n }\n if word_1 not in word_list:\n word_list.append(word_1)\n if word_2 not in word_list:\n word_list.append(word_2)\n if word_3 not in word_list:\n word_list.append(word_3)\n \"\"\" word_3 word_2 word_1\"\"\"\n if word_2 not in dictionary[word_1][str(word_1 + \"_1\")]:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = 1\n else:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = dictionary[word_1][str(word_1 + \"_1\")][word_2] + 1\n if word_3 not in dictionary[word_1][str(word_1 + \"_2\")]:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = 1\n else:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = dictionary[word_1][str(word_1 + \"_2\")][word_3] + 1\n if word_3 not in dictionary[word_2][str(word_2 + \"_1\")]:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = 1\n else:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = dictionary[word_2][str(word_2 + \"_1\")][word_3] + 1\n if index + 3 >= len(line) or line[index + 3] == \"\":\n return\n word_0 = line[index + 3]\n if word_0 not in dictionary:\n dictionary[word_0] = {\n str(word_0 + \"_1\"): {\n\n },\n str(word_0 + \"_2\"): {\n\n },\n str(word_0 + \"_3\"): {\n\n }\n }\n\n if word_0 not in word_list:\n word_list.append(word_0)\n\n if word_3 not in dictionary[word_0][str(word_0 + \"_3\")]:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = 1\n else:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = dictionary[word_0][str(word_0 + \"_3\")][word_3] + 1", "def homonymic_list_generator(content):\n\t# Making it global so that it can be used in other functions too.\n\tglobal verseDetails\n\t# Initialize a VerseInfo class instance.\n\tverseDetails = VerseInfo()\n\t# Result will store tuples (headword, meaning, verse)\n\tresult = []\n\t# Initialize blank verse\n\tverse = ''\n\t# lineType list holds 'h', 'm', 'v' for headword, meaning and verse lines.\n\tlineType = []\n\t# Read the content into list of lines.\n\tlines = content.split('\\n')\n\t# A temporary placeholder which will be emptied into result list\n\t# whenever the verse is allocated to it.\n\twordsOnHand = []\n\tfor line in lines:\n\t\t# If the line is headword line,\n\t\tif line.startswith('$'):\n\t\t\t# If the preceding line was a verse, and current a headword,\n\t\t\t# time to add to result list\n\t\t\tif lineType[-1] == 'v':\n\t\t\t\tverseDetails.update_verseNum(verse)\n\t\t\t\t(verse, wordsOnHand, result) = putVerse(verse, wordsOnHand, result)\n\t\t\t# Extract the headword and gender from headword line.\n\t\t\t# Typical headword line is `$headword;gender`\n\t\t\theadword, gender = line.rstrip().lstrip('$').split(';')\n\t\t\t# lineType is appended with 'h' for headword.\n\t\t\tlineType.append('h')\n\t\t# If the line is a meaning line,\n\t\telif line.startswith('#'):\n\t\t\t# typical meaning line is `#meaning1,meaning2,meaning3,...`\n\t\t\tmeanings = line.rstrip().lstrip('#').split(',')\n\t\t\t# Store the (headword, meaning) tuples in temporary wordsOnHand list.\n\t\t\t# They will keep on waiting for the verse.\n\t\t\t# Once verse is added, and a new headword starts, this will be added to result list.\n\t\t\twordsOnHand.append((headword, meanings))\n\t\t\t# lineType is marked 'm' for meaning.\n\t\t\tlineType.append('m')\n\t\telif line.startswith(';'):\n\t\t\t(tag, value) = utils.extract_tag(line)\n\t\t\tif tag == 'p':\n\t\t\t\tverseDetails.update_pageNum(value)\n\t\t\tif tag == 'k':\n\t\t\t\tverseDetails.update_kanda(value)\n\t\t\tif tag == 'v':\n\t\t\t\tverseDetails.update_varga(value)\n\t\t\tif tag == 'vv':\n\t\t\t\tverseDetails.update_subvarga(value)\n\t\t# Pass the lines having some other markers like ;k for kanda, ;v for varga etc.\n\t\telif line.startswith(';end'):\n\t\t\t# Put the last verse, as there will not be any next headword.\n\t\t\tputVerse(verse, wordsOnHand, result)\n\t\t# Lines which are unmarked are verses.\n\t\t# The verses may span more than one line too. Therefore adding them up.\n\t\telse:\n\t\t\tverse += line + '<BR>'\n\t\t\t# Mark lineType 'v' for verse.\n\t\t\tlineType.append('v')\n\treturn result", "def phonemes2structure(self, phonemes):\n #print(phonemes)\n phonemes_reduced = self.reduce_double_syllables(phonemes)\n structure = [self.phonemes_dict[phone] if phone in self.alphabet else phone for phone in phonemes_reduced]\n #print(structure)\n return structure", "def func2(string:str):\n with open(string,\"r\") as file:\n data = file.read()\n data = data.split(\"bandwidths [1]:\")[0]\n\n final = {}\n for i in range(1,3):\n final[\"formants [{}]\".format(i)] = []\n my_list = data.split(\"formants\")\n for i in range(2,4):\n final[\"formants [{}]\".format(i-1)].extend(list(map(pars_points,my_list[i].split(\"points \")[1:])))\n return final", "def test_example():\n example_text = ['''Mark and Jack welcome back to couch on crackerjacks today I'm gonna show you how to make a basic and delicious potato salad some people might call this a country style potato salad some people might refer to it as a deli style of potato salad either way it's got the perfect balance of sweet and tangy from the sugar and the vinegar and pickles and everything else that's in this it's just your basic homemade potato salad you can add any number of things to this to make it your own but I'm just going to show you how I like to make mine so without further ado let's get started so naturally I'm going to start out with my potatoes every potato salad starts with potatoes for this recipe and for my potato salad I prefer using just regular old russet potatoes they're the cheapest they're the best I've tried using Yukon Gold potatoes and red potatoes for this recipe I prefer hands down at the russet potatoes it just it makes the best potato salad for me you can use whatever kind of potatoes you like though and using a potato peeler I'm just going to peel these potatoes a little trick for you that little end on most potato peelers it's kind of rounded use that to dig out the eyes of your potato it's what I've always used it for so it's just the perfect little tool to dig out the eyes of a potato but what you want to do is just go ahead and peel your potatoes and you don't have to peel your potatoes if you don't want to if you like skin on potato salad by all means go ahead and leave the skin on it doesn't make any difference personal preference and as you're peeling your potatoes and you get one done go ahead and put them into a large pot this is going to be the same profit I cut these in that's filled up with water you want to make sure and keep your potatoes covered that will prevent your potatoes from oxidizing and turning that pinky brown color but you just want to go through and peel all of your potatoes and I am using three pounds of potatoes for this recipe now once you get all your potatoes peeled you want to go ahead and cut them up basically you want to cut these into about 3/4 inch square pieces so for these medium potatoes I cut them half I turn them 90 degrees cut them into three pea is if you will that way if it's a larger potato do four and then cut those into chunks basically like I said you want about three quarters of an inch by three quarters of an inch by three quarters of an inch pieces and then again throw your potatoes back into the water that you pulled the potatoes out of that way they do not oxidize on you now when you get all your potatoes cut up your water is going to be cloudy and it's gonna be murky and it's gonna be just full of all the starch coming off of those potatoes what you want to do is rinse your potatoes well you want to make sure that the water coming off of that is completely clear go ahead and rinse these a good three or four times and then drain them completely you want to make sure that all of that starch gets off of those potatoes then you want to go ahead and light your stove and take your pot and you want a large pot for this put it over a medium-high heat time actually even high heat or at this point take your drained potatoes and put those into your pot and you want to add enough cold water to this to come up about one inch over the top of the potatoes starting off with cool water your potatoes cook evenly as the water comes up to temperature your potatoes come up with them to temperature if you start out putting cold potatoes into boiling water the outside of the potato is gonna be mush before the inside is actually cooked and before this gets going too far I'm gonna take two large eggs and I'm gonna put those in the water with the potatoes this recipe uses hard-boiled eggs and since I'm boiling the potatoes anyway I might as well just boil the eggs right along with the potatoes so just go ahead and add two large eggs to the pot and you want to cover your pot and you want to bring this up to a boil now once your water is that a boy I'll go ahead and give your potatoes an egg a gentle stir you want to be careful with this because you don't do not want to break your eggs and you also don't want to break up the potatoes but once this comes up to a boil you want to boil this for exactly ten minutes and how to check to make sure that your potatoes are done you want to take a couple large pieces take them out put them on a spoon and using a fork you want to put the fork into the potato and you want just a little bit of give in your potatoes before they break apart if you can see there it's just the slightest little bit of give before the potato breaks up you don't want to cook these any longer than that because they they will finish cooking when you take them off heat but you want to go ahead and drain these in a colander and once they are drained well go ahead and pour your potatoes and eggs back into the pot that you cooked them in and here you can dig out your eggs and you want to put your eggs in a bowl of cold water you want to stop that cooking process as soon as possible because if you cook your eggs too long you're gonna get that dreaded green ring around the yolk go ahead and put those in a bowl of cold water to stop the cooking process immediately and then you want to keep your potatoes in the pot that you cook them in to cool and you want to cool them completely before you do anything else with them if you add a salad dressing to hot potatoes it's gonna break on you and you don't want that so just go ahead and let your potatoes steam off and cool and I'm gonna let these sit for about a half an hour before I even start making the dressing for my potato salad and while you're waiting for your potatoes to cool off you can go ahead and peel your eggs it helps to wait a little bit for your eggs to cool down before you peel them just go ahead and crack them on a countertop and then start peeling them if you peel them underneath water or running water they peel super easy so as you can see here's I mean it takes nothing to do it under water water gets under there and the shell just slips off I just go ahead and peel your egg eggs and set them off until later I'm gonna need a few vegetables for my dressing I went ahead and already cut up half of a yellow onion here off a video I thought I was recording when I wasn't you don't need to see me chopped onions anyway everybody knows how to do that I've also got two stalks of celery here I'm just going to cut the ends off as well as the tops if you want to save the tops they make a nice garnish you don't have to keep them and I'm not gonna keep them here the celery I'm going to cut these sticks or stalks into orders and then I'm going to chop those up because I don't like really big chunks of celery in my potato salad so I'm just gonna cut these into four slices and then turn them around and cut these into dices if you will and I'm just going to go ahead after I get that died and set those off to the side until I need them later now for our dressing in a large bowl and you want to make sure that you use a plenty large bowl for this because it does make a lot of potato salad I've got one and a half cups of mayonnaise this recipe really does not work with Miracle Whip so since we're gonna be adding sugar to this stick to the plain old mayonnaise I'm gonna throw my eggs in there and using the back of a fork I'm just gonna break up my eggs if you like big chunks of egg in your potato salad don't mash it up as much but I'm gonna mash this up pretty fine and then you want to add in a quarter of a cup of sugar as well as a teaspoon and a half of salt it seems like a lot of salt it really isn't because there are a lot of potatoes here two teaspoons of white vinegar just plain white distilled vinegar then you want to add two tablespoons of sweet pickle relish you could also use dill pickle relish if you wanted to I like sweet in mine and finally I'm gonna add in two teaspoons of prepared yellow mustard if you like a more mustardy potato salad you can add more mustard if you want to this perfectly acceptable and then using a spoon or a fork whatever just go ahead and mix this up well and then you want to add in your onions and celery and go ahead and get that mixed in and you want to make sure to mix all of your ingredients and get your dressing thoroughly mixed before you add the potatoes because you don't want to over mix this once you get your potatoes added so go ahead and take your cooled potatoes again make sure that they are at least room temperature you do not want them warm or hot at all but go ahead and add those into your bowl and then using a spatula I'm going to gently fold the dressing into my potatoes you want your potatoes to remain as in this large of chunks as possible so don't go crazy you know stirring it stirring stirring you want to gently fold this so your potatoes do stay as whole as possible and a little secret for you just to bind up the dressing just a little bit I'm going to add two tablespoons of instant mashed potato flakes into the finished mixture I'm just going to fold this in basically what those do the potato flakes they bind up the dressing and make the dressing firm it also helps it kind of stick to the potatoes a little bit better so you you know the dressing doesn't run off of the potatoes which can be a problem with some recipes so there you go you want to make sure that those potato flakes are evenly distributed in there and everything is well mixed together everything is combined perfectly go ahead and give this a taste make sure that the salt is ok for you if you need a little bit more salt go ahead and add it if you want to if you need more mustard or vinegar or eggs whatever now is the time to do it but you want to go ahead and cover this with a piece of cling wrap saran wrap and refrigerate this for at least four to six hours before you serve this the longer you let this sit the better it gets but there you go there's your basic all-around simple homemade deli style or country style potato salad definitely give this recipe a try if you do let me know how you like it down below in the comment section if you like this video be sure to give it a thumbs up I would greatly appreciate it subscribe for more deliciousness and to keep up to date on all my latest videos thanks so much for watching and we will see you next time''']\n\n return str(example_text)", "def extract_features_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon, mode='training'):\n instances = []\n for sent in sentence_dicts:\n # print(sent)\n for key, value in sent.items():\n features = {}\n if isinstance(key, int):\n if not_known_cue_word(value[3].lower(), cue_lexicon, affixal_cue_lexicon):\n sent[key]['not-pred-cue'] = True\n continue\n\n features['token'] = value[3].lower()\n features['lemma'] = value[4].lower()\n features['pos'] = value[5]\n\n if key == 0:\n features['bw-bigram1'] = 'null'\n else:\n features['bw-bigram1'] = \"%s_*\" %sent[key-1][4].lower()\n if not (key+1) in sent:\n features['fw-bigram1'] = 'null'\n else:\n features['fw-bigram1'] = \"*_%s\" %sent[key+1][4].lower()\n \n affix = get_affix_cue(value[3].lower(), affixal_cue_lexicon)\n if affix != None:\n base = value[3].lower().replace(affix, \"\")\n features['char-5gram1'], features['char-5gram2'] = get_character_ngrams(base, affix, 5)\n features['char-4gram1'], features['char-4gram2'] = get_character_ngrams(base, affix, 4)\n features['char-3gram1'], features['char-3gram2'] = get_character_ngrams(base, affix, 3)\n features['char-2gram1'], features['char-2gram2'] = get_character_ngrams(base, affix, 2)\n features['char-1gram1'], features['char-1gram2'] = get_character_ngrams(base, affix, 1)\n features['affix'] = affix\n else:\n features['char-5gram1'], features['char-5gram2'] = 'null','null'\n features['char-4gram1'], features['char-4gram2'] = 'null','null'\n features['char-3gram1'], features['char-3gram2'] = 'null','null'\n features['char-2gram1'], features['char-2gram2'] = 'null','null'\n features['char-1gram1'], features['char-1gram2'] = 'null','null'\n features['affix'] = 'null'\n \n instances.append(features)\n if mode == 'training':\n labels = extract_labels_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon)\n return sentence_dicts, instances, labels\n return sentence_dicts, instances", "def ner_features(tokens, index, history):\n\n # init the stemmer\n stemmer = SnowballStemmer('english')\n #print tokens\n\n # Pad the sequence with placeholders\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'),\n ('[END2]', '[END2]')]\n history = ['[START2]', '[START1]'] + list(history)\n\n # shift the index with 2, to accommodate the padding\n index += 2\n\n word, pos = tokens[index]\n prevword, prevpos = tokens[index - 1]\n prevprevword, prevprevpos = tokens[index - 2]\n nextword, nextpos = tokens[index + 1]\n nextnextword, nextnextpos = tokens[index + 2]\n previob = history[index - 1]\n contains_dash = '-' in word\n contains_dot = '.' in word\n allascii = all([True for c in word if c in string.ascii_lowercase])\n\n allcaps = word == word.capitalize()\n capitalized = word[0] in string.ascii_uppercase\n\n prevallcaps = prevword == prevword.capitalize()\n prevcapitalized = prevword[0] in string.ascii_uppercase\n\n nextallcaps = prevword == prevword.capitalize()\n nextcapitalized = prevword[0] in string.ascii_uppercase\n\n return {\n 'word': word,\n 'lemma': stemmer.stem(word),\n 'pos': pos,\n 'all-ascii': allascii,\n 'all-num': word.isdigit(),\n\n 'next-word': nextword,\n 'next-lemma': stemmer.stem(nextword),\n 'next-pos': nextpos,\n\n 'next-next-word': nextnextword,\n 'nextnextpos': nextnextpos,\n\n\n 'prev-word': prevword,\n 'prev-lemma': stemmer.stem(prevword),\n 'prev-pos': prevpos,\n 'prev-pos-num': prevword.isdigit(),\n\n 'prev-prev-word': prevprevword,\n 'prev-prev-pos': prevprevpos,\n\n 'prev-iob': previob,\n\n 'contains-dash': contains_dash,\n 'contains-dot': contains_dot,\n\n 'all-caps': allcaps,\n 'capitalized': capitalized,\n\n 'prev-all-caps': prevallcaps,\n 'prev-capitalized': prevcapitalized,\n\n 'next-all-caps': nextallcaps,\n 'next-capitalized': nextcapitalized,\n }", "def parse_template(string):\n count = 0\n list1 = []\n for character in string:\n count = count + 1\n if character == \"{\":\n end = string.find(\"}\", count)\n s_strg = string[count:end]\n list1.append(s_strg)\n string = string.replace(s_strg, \"\", 1)\n count = count - len(s_strg)\n\n subs = tuple(list1)\n\n return(string, subs)\n print(subs)", "def parse(filepath):\n wos_list = []\n\n paper_start_key = 'PT'\n paper_end_key = 'ER'\n\n\n #\n line_list = []\n try:\n with open(filepath, 'r') as f:\n line_list = f.read().splitlines()\n except IOError: # File does not exist, or couldn't be read.\n raise IOError(\"File does not exist, or cannot be read.\")\n\n if len(line_list) is 0:\n raise IOError(\"Unable to read filepath or filepath is empty.\")\n # Convert the data in the file to a usable list of dictionaries.\n # Note: first two lines of file are not related to any paper therein.\n last_field_tag = paper_start_key # initialize to something.\n for line in line_list[2:]:\n\n field_tag = line[:2]\n\n if field_tag == ' ':\n pass\n\n if field_tag == paper_start_key:\n # Then prepare for next paper.\n wos_dict = _new_wos_dict()\n\n if field_tag == paper_end_key:\n # Then add paper to our list.\n wos_list.append(wos_dict)\n\n # Handle keys like AU,AF,CR that continue over many lines.\n if field_tag == ' ':\n field_tag = last_field_tag\n\n # Add value for the key to the wos_dict: only for the five tags.\n try:\n if field_tag in ['DE', 'DI', 'TI', 'SO', 'UT','PY']:\n wos_dict[field_tag] += ' ' + str(line[3:])\n # Rest all will just get passed\n else:\n pass\n\n except (KeyError, TypeError, UnboundLocalError):\n wos_dict[field_tag] = str(line[3:])\n\n last_field_tag = field_tag\n # End line loop.\n\n # Define keys that should be lists instead of default string.\n list_keys = ['DE']\n delims = {'DE': ';'}\n\n # And convert the data at those keys into lists.\n for wos_dict in wos_list:\n for key in list_keys:\n delim = delims[key]\n try:\n key_contents = wos_dict[key]\n if delim != '\\n':\n wos_dict[key] = key_contents.split(delim)\n else:\n wos_dict[key] = key_contents.splitlines()\n except KeyError:\n # One of the keys to be converted to a list didn't exist.\n pass\n except AttributeError:\n # Again a key didn't exist but it belonged to the wos\n # data_struct set of keys; can't split a None.\n pass\n\n return wos_list", "def parse_course_pre_to_list(self):\n prere_courses = []\n\n # convert non-word to spaces except \"-\"\n self.prere_raw = re.sub(\"[^\\w-]\", \" \", self.prere_raw)\n\n # split the string by spaces\n words = self.prere_raw.split()\n\n # check if the string contains number, if True then the string is of the form: \"140A\"\n def append_to_list(word, previous_word):\n try:\n if word[0].isdigit():\n toappend = None\n # course abbs = words[i-1]\n try:\n toappend = \"{} {}\".format(previous_word.upper(), word.upper())\n except AttributeError:\n #TODO check this error for HIGR 216A-B\n print(\"previous word is {}, word is {}\".format(previous_word, word))\n if toappend not in prere_courses:\n prere_courses.append(toappend)\n except IndexError:\n #TODO why this would occur?\n print(\"word is {}, previous word is {}\".format(word, previous_word))\n\n # iterate through words to find numbers\n for i in range(len(words)):\n\n previous_word = None\n if i is not 0:\n # define the previous word like MATH\n previous_word = words[i-1]\n\n if \"-\" in words[i]:\n num = re.split(\"[A-Z]\", words[i])[0]\n letters = re.split(\"-\", words[i])\n new_words = []\n for i in range(len(letters)):\n if i is 0:\n new_words.append(letters[0])\n else:\n new_words.append(num + letters[i])\n for word in new_words:\n if word is not None and previous_word is not None:\n append_to_list(word, previous_word)\n else:\n #TODO: what if the word is None?\n pass\n else:\n append_to_list(words[i], previous_word)\n\n return prere_courses", "def parse_treefile_general(treefile, get_questions=False):\n\n\n f = open(treefile, \"r\")\n file_data = f.readlines()\n f.close()\n \n file_data = [line.strip(\"\\n\") for line in file_data]\n data = [line for line in file_data if line[:2] != \"QS\"] ## strip qwuestions \n \n if get_questions:\n questions = [line for line in file_data if line[:2] == \"QS\"] \n questions = [line.replace(\"{\", \"\").replace(\"}\", \"\") for line in questions]\n questions = [line.strip(\" \") for line in questions]\n questions = [re.split(\"\\s+\", line) for line in questions]\n for line in questions:\n assert len(line) == 3,line # \"Line does not contain 3 items: %s\"%(\" \".join(line))\n questions = dict([(line[1], line[2]) for line in questions])\n\n data = \"\\n\".join(data)\n \n bracketed = re.findall(\"\\{[^\\}]*\\}\",data)\n \n #print bracketed\n #### bracketed should consist of name, tree, name, tree... -- sort it out\n if len(bracketed) % 2 != 0:\n print(\"bracketed should consist of name, tree, name, tree\")\n sys.exit(1)\n \n data = []\n i=1\n for item in bracketed:\n #print item\n if i%2!=0.0: ## if i odd\n name = item\n else:\n tree = item\n data.append((name,tree))\n i+=1\n\n def strip_quotes(x):\n x = string.strip(x, '\"') #(\"\\_|-\", \"\", x) \n return x \n \n def to_num(x):\n if x[0] == \"-\" or x[0] == \"0\":\n return int(math.fabs(int(x)))\n else:\n return strip_quotes(x)\n #print data\n names_trees = []\n for (name, treestring) in data:\n \n #### tree\n treestring = treestring.strip(\"{} \\n\")\n\n treestring = re.split(\"\\n\", treestring)\n treestring = [line.strip(\" \\n\") for line in treestring] \n treestring = [re.split(\"\\s+\", line) for line in treestring] \n\n tree = [(to_num(num), quest, to_num(left), to_num(right)) for (num, quest, left, right) in treestring]\n\n\n ### name\n treestring = name.strip(\"{} \\n\")\n\n names_trees.append((name, tree))\n \n ##print names_trees \n if get_questions:\n return names_trees, questions \n else:\n return names_trees", "def format_data(data):\n rules = []\n bags = []\n for bag_data in data:\n bag = bag_data.split('contain')\n bag_type = bag[0].replace('bags', '').strip().lower()\n bag_content = bag[1].replace('.', '')\n if 'no other bags' in bag_content:\n continue\n else:\n bags.append([bag_type, [[re.sub(r'bags?$', '', content).strip().lower()[2:], content[:2].strip()]\n for content in bag_content.split(',')]])\n rule = re.search(fr'\\d+ {BAG_TYPE}', bag_data)\n if rule:\n rules.append([bag_type, rule.group()[:1].strip()])\n # print(rules)\n return rules, bags", "def nom_struc_rebuilding(nom_struc):\n\n #init\n nominal_structure = ns = nn = []\n i = 0\n\n while i < len(nom_struc):\n\n if nom_struc[i]._quantifier == 'SOME' and (nom_struc[i].det == ['a'] or nom_struc[i].det == ['an']):\n nom_struc[i]._quantifier = 'ONE'\n\n #The first nominal group not preceded but 'and' if there is\n if nom_struc[i]._conjunction == 'AND' and i > 0:\n nominal_structure = nominal_structure + ['and']\n elif nom_struc[i]._conjunction == 'OR':\n nominal_structure = nominal_structure + ['or']\n elif nom_struc[i]._conjunction == 'BUT':\n nominal_structure = nominal_structure + ['but']\n\n #We recover the nominal group and his complement\n if nom_struc[i]._quantifier == 'SOME' or nom_struc[i]._quantifier == 'ALL' or nom_struc[\n i]._quantifier == 'ANY' or (nom_struc[i]._quantifier == 'DIGIT' and nom_struc[i].det != 'one'):\n #If there is a specific quantifier (plural)\n for n in ResourcePool().plural_nouns:\n if nom_struc[i].noun != [] and n[1] == nom_struc[i].noun[0]:\n nn = [n[0]]\n\n #If it is not a specific plural, we add 's'\n if nom_struc[i].noun != [] and nn == []:\n nn = [nom_struc[i].noun[0] + 's']\n\n #We reconver the other information \n nominal_structure = nominal_structure + nom_struc[i].det\n for z in nom_struc[i].adj:\n nominal_structure = nominal_structure + z[1] + [z[0]]\n nominal_structure = nominal_structure + nn\n\n #Re-init\n nn = []\n\n else:\n #if not plural\n nominal_structure = nominal_structure + nom_struc[i].det\n for z in nom_struc[i].adj:\n nominal_structure = nominal_structure + z[1] + [z[0]]\n nominal_structure = nominal_structure + nom_struc[i].noun\n\n #We recover noun complement\n if nom_struc[i].noun_cmpl:\n nominal_structure = nominal_structure + ['of']\n nominal_structure = nominal_structure + nom_struc_rebuilding(nom_struc[i].noun_cmpl)\n\n #We recover the relative\n for j in nom_struc[i].relative:\n if not j.sn:\n ns = [nom_struc[i]]\n\n nominal_structure = nominal_structure + [j.aim] + sentence_rebuilding.relative(j, ns)\n ns = []\n\n i += 1\n return nominal_structure", "def map1(inKey, inVal):\n filename, linenum = inKey[0], inKey[1]\n s = inVal[0].lower().strip()\n s = re.sub(\"['\\\"]\", \"\", s)\n s = re.sub(\"[^A-Za-z0-9']\", \" \", s)\n words = [w for w in s.split(\" \") if w != \"\"]\n result = []\n for i in range(4, len(words)):\n temp = [words[c] for c in range(i-4,i+1) if words[c][0].isalpha()]\n if len(temp) == 5: result.append((temp,[\"1\",filename,linenum]))\n return result", "def agline2(line):\n \n vals = {}\n y = line.strip('\\n').split(',')\n y.extend(y[0].strip('.MTS').split('_'))\n \n #print(y)\n \n x = ['movie', 'moviecode', 'offset', 'well', 'agmin', 'agsec', 'agdur', \n 'agtype', 'agcomm', 'escmin', 'escsec', 'escdur', 'esctype', 'escbeh', \n 'esccomm', 'gen', 'date', 'assay', 'fps', 'flyid', 'side', 'moviepart']\n \n z = zip(x, y)\n\n for item in z:\n vals[item[0]] = item[1]\n\n return(vals)", "def counterdict(self):\n vas = []\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.append(s_i)\n for ele in enumerate(vas):\n print(ele)\n logging.debug(\"Starting with to\")", "def voc2json():\n words_count = 0\n word_list = []\n lesson_list = []\n\n reg_word = re.compile(r\"[0-9]+\\.\\s*([a-zA-Z\\S]+)\")\n voc_dict = voc_reader()\n\n with open(LESSON_PATH, 'r') as word_file:\n\n for line in word_file:\n line.strip()\n line = line.replace(\"\\xef\", \" \")\n line = line.replace(\"|\", \" \")\n word_match = reg_word.match(line)\n if not word_match:\n continue\n word_group = word_match.group(1)\n if word_group not in voc_dict:\n continue\n\n words_count += words_count + 1\n\n word_list.append({\n \"Type\": \"\",\n \"Voc\": word_group,\n \"Ext\": voc_dict[word_group][0],\n \"Meaning\": voc_dict[word_group][1],\n \"Time\": 0\n })\n\n if len(word_list) >= MAX_WORD_COUNT:\n lesson_list.append(word_list)\n word_list = []\n\n lesson_len = len(word_list)\n if lesson_len > 0:\n lesson_list.append(word_list)\n\n print(words_count)\n\n json_save(OUT_PATH, lesson_list)", "def test_tax_court_citation_extractor(self):\n\n test_pairs = (\n (\n \"\"\" 1 UNITED STATES TAX COURT REPORT (2018)\n\n\n\n UNITED STATES TAX COURT\n\n\n\n BENTLEY COURT II LIMITED PARTNERSHIP, B.F. BENTLEY, INC., TAX\n MATTERS PARTNER, Petitioner v.\n COMMISSIONER OF INTERNAL REVENUE, Respondent\n\n\n\n Docket No. 5393-04. Filed May 31, 2006.\n\n\n\n Nancy Ortmeyer Kuhn, for petitioner.\n \"\"\",\n \"1 T.C. 2018\",\n ),\n (\n \"\"\" T.C. Memo. 2003-150\n\n\n\n UNITED STATES TAX COURT\n\n\n\n RIVER CITY RANCHES #1 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #2 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #3 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n\n\n \"\"\",\n \"2003 T.C. Memo. 150\",\n ),\n (\n \"\"\" T.C. Summary Opinion 2003-150\n\n\n\n UNITED STATES TAX COURT\n\n\n\n RIVER CITY RANCHES #1 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #2 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #3 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n\n\n \"\"\",\n \"2003 T.C. Summary Opinion 150\",\n ),\n (\n \"\"\"\n MICHAEL KEITH SHENK, PETITIONER v. COMMISSIONER\n OF INTERNAL REVENUE, RESPONDENT\n\n Docket No. 5706–12. Filed May 6, 2013.\n\n P was divorced from his wife, and their 2003 ‘‘Judgment of\n Absolute Divorce’’ provided that his ex-wife would have pri-\n mary residential custody of their three minor children. The\n judgment provided that the dependency exemption deductions\n for the three children would be divided between the two ex-\n spouses according to various conditions but did not provide\n that the ex-wife must execute in P’s favor a Form 8332,\n ‘‘Release of Claim to Exemption for Child of Divorced or Sepa-\n rated Parents’’. The children resided with P’s ex-wife for more\n than half of 2009, and P’s ex-wife did not execute in P’s favor\n any Form 8332 or equivalent document for any year. For 2009\n P timely filed a Federal income tax return on which he\n claimed dependency exemption deductions and the child tax\n credit for two of the children, consistent with his under-\n standing of the terms of the judgment, but he did not attach\n any Form 8332 to his return. He also claimed head-of-house-\n hold filing status. His ex-wife, the custodial parent, timely\n filed a Federal income tax return for 2009 on which she also\n\n 200\n\n\n\n\nVerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00001 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n\f (200) SHENK v. COMMISSIONER 201\n\n\n claimed two dependency exemption deductions, so that one\n child was claimed on both parents’ returns. R allowed to P the\n dependency exemption deduction for one of the children but\n disallowed his claim for the dependency exemption deduction\n for the child who had also been claimed by the custodial\n parent. At trial P contended he is entitled to a dependency\n exemption deduction for all three children. Held: Since the\n custodial parent did not execute, and P could not and did not\n attach to his return, any Form 8332 or equivalent release, P\n is not entitled under I.R.C. sec. 152(e)(2)(A) to claim the\n dependency exemption deduction or the child tax credit. Held,\n further, where both the custodial parent and the noncustodial\n parent have claimed for the same year a dependency exemp-\n tion deduction for the same child, a declaration signed by the\n custodial parent after the period of limitations for assess-\n ments has expired as to the custodial parent could not qualify\n under I.R.C. sec. 152(e)(2)(A), and therefore there is no reason\n to grant P’s request to leave the record open so that he may\n obtain and proffer such a declaration. Held, further, P is not\n entitled to head-of-household filing status under I.R.C. sec.\n 2(b)(1) nor to the child tax credit under I.R.C. sec. 24.\n\n Michael Keith Shenk, for himself.\n Shari Salu, for respondent.\n GUSTAFSON, Judge: The Internal Revenue Service (IRS)\n determined a deficiency of $3,136 in the 2009 Federal income\n tax of petitioner Michael Keith Shenk. Mr. Shenk petitioned\n this Court, pursuant to section 6213(a), 1 for redetermination\n of the deficiency. After Mr. Shenk’s concession that he\n received but did not report $254 in dividend income, the\n issue for decision is whether Mr. Shenk is entitled to a\n dependency exemption deduction for one of his children\n under section 151(c), a child tax credit for that child under\n section 24(a), and head-of-household filing status under sec-\n tion 2(b)(1). On these issues, we hold for the IRS.\n FINDINGS OF FACT\n\n The judgment of divorce\n Mr. Shenk was married to Julie Phillips, and they have\n three minor children—M.S., W.S., and L.S. They divorced in\n 2003. The family court’s ‘‘Judgment of Absolute Divorce’’ pro-\n 1 Unless otherwise indicated, all citations of sections refer to the Internal\n\n Revenue Code (26 U.S.C.) in effect for the tax year at issue, and all cita-\n tions of Rules refer to the Tax Court Rules of Practice and Procedure.\n\n\n\n\nVerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00002 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n\f 202 140 UNITED STATES TAX COURT REPORTS (200)\n\n\n vided: that Ms. Phillips was ‘‘awarded primary residential\n custody’’ of the parties’ three children; and that Mr. Shenk\n would be liable for child support payments; but that, as to\n dependency exemptions—\"\"\",\n \"140 T.C. 200\",\n ),\n )\n site = tax.Site()\n for q, a in test_pairs:\n results = site.extract_from_text(q)\n cite_string = \"%s %s %s\" % (\n results[\"Citation\"][\"volume\"],\n results[\"Citation\"][\"reporter\"],\n results[\"Citation\"][\"page\"],\n )\n\n self.assertEqual(cite_string, a)\n print \"✓\", cite_string", "def join_chapter_text(chapter_content_list):\n cleaned_chapter_content_list = [each for each in chapter_content_list if each != \"\\n\"]\n chap_text = {0 : \"\"}\n tracker = 0\n\n for each in chapter_content_list:\n each = str(each)\n if re.search(r'\\/x\\/d\\?b=drb', each):\n tracker += 1\n verse = re.search(r'\\[(\\d+)\\]', each).group(1)\n chap_text[tracker] = \"\"\n else:\n chap_text[tracker] = chap_text[tracker] + each.strip()\n \n chap_text.pop(0, None)\n return chap_text", "def preprocessing(sample):\n\n content = ' '.join(sample)\n content = re.sub(r\"\\'ll\", \" will\", content)\n content = re.sub(r\"\\'d\", \" would\", content)\n content = re.sub(r\"\\'s\", \" is\", content)\n content = re.sub(r\"\\'m\", \" am\", content)\n content = re.sub(r\"\\'ve\", \" have\", content)\n content = re.sub(r\"\\'re\", \" are\", content)\n content = content.replace('&', 'and')\n content = content.replace('$', '')\n content = content.split()\n return content", "def process(data):\n # words to scrub from data\n strip_words = [\n 'avenue',\n 'ave',\n 'street',\n 'boulevard',\n 'blvd',\n 'st',\n 'road',\n 'rd',\n 'court',\n 'ct',\n 'guest',\n 'guests',\n 'family',\n 'spouse',\n 'spouses'\n ]\n # quick and dirty translator for scrubbing punctuation from data\n translator = str.maketrans({key: None for key in string.punctuation})\n for i in range(len(data)):\n indx, name, addr = data[i] # ,zipc,twn,apt\n\n # scrub the data and normalize to lowercase\n name = name.translate(translator)\n addr = addr.translate(translator)\n name = name.lower()\n addr = addr.lower()\n name = replace_all(name, strip_words)\n addr = replace_all(addr, strip_words)\n\n # identify similar entries from the remainder of the data\n matches = []\n for j in range(i + 1, len(data)):\n\n # scrub the data\n n_indx, n_name, n_addr = data[j] # ,n_zipc,n_twn,n_apt\n n_name = n_name.translate(translator)\n n_addr = n_addr.translate(translator)\n n_name = n_name.lower()\n n_addr = n_addr.lower()\n n_name = replace_all(n_name, strip_words)\n n_addr = replace_all(n_addr, strip_words)\n # print(addr, n_addr)\n\n # check for similarity\n # TODO: should a report be made if only one of these is similar?\n if sim(name, n_name) and sim(addr, n_addr):\n matches.append(data[j])\n\n # report the matches found\n if len(matches) > 0:\n tmp = \"%d: %s, %s\"\n s1 = tmp % tuple(data[i])\n s2 = \"*\" * 15\n print(s1)\n print(s2)\n for m in matches:\n print(tmp % tuple(m))\n print(\"\\n\")", "def find_substitutes(text):\n if CHAINS == {}:\n generate_food_chains()\n\n candidates = []\n subs = []\n for i in range(len(text)):\n char = text[i]\n if CHAINS.get(char):\n candidates = []\n candidates = CHAINS[char]\n else:\n if candidates != []:\n # choose the most popular option from candidates\n counts = {}\n for candidate in candidates:\n if counts.get(candidate):\n counts[candidate] += 1\n else:\n counts[candidate] = 1\n max_count = 0\n chosen = None\n for candidate, count in counts.iteritems():\n if count > max_count:\n max_count = count\n chosen = candidate\n if chosen:\n subs.append((chosen, i))\n\n candidates = []\n return subs", "def main():\n roman = \"la-jangada.txt\"\n #roman = \"texte-de-test.txt\"\n liste_caracteres = extraction_caracteres(roman)\n print(len(liste_caracteres))\n # liste_caracteres = suppression_ponctuation(liste_caracteres)\n # liste_caracteres = suppression_blancs(liste_caracteres)\n # liste_caracteres = suppression_chiffres(liste_caracteres)\n # liste_caracteres = transformation_accents(liste_caracteres)\n liste_caracteres = nettoyage(liste_caracteres)\n \n liste_caracteres = transformation_minuscules(liste_caracteres)\n # print(liste_caracteres)\n dict_car = creation_dict_car(liste_caracteres)\n # print(dict_car)\n dict_car = analyse_dict_car(dict_car)\n #print(dict_car)\n affichage_dict(dict_car)", "def test_tax_court_docket_number_extractor(self):\n\n test_pairs = (\n (\n \"\"\" 1 UNITED STATES TAX COURT REPORT (2018)\n \n \n \n UNITED STATES TAX COURT\n \n \n \n BENTLEY COURT II LIMITED PARTNERSHIP, B.F. BENTLEY, INC., TAX\n MATTERS PARTNER, Petitioner v.\n COMMISSIONER OF INTERNAL REVENUE, Respondent\n \n \n \n Docket No. 5393-04. Filed May 31, 2006.\n \n \n \n Nancy Ortmeyer Kuhn, for petitioner.\n \"\"\",\n \"5393-04\",\n ),\n (\n \"\"\"\n MICHAEL KEITH SHENK, PETITIONER v. COMMISSIONER\n OF INTERNAL REVENUE, RESPONDENT\n \n Docket No. 5706-12. Filed May 6, 2013.\n \n P was divorced from his wife, and their 2003 ‘‘Judgment of\n Absolute Divorce’’ provided that his ex-wife would have pri-\n mary residential custody of their three minor children. The\n judgment provided that the dependency exemption deductions\n for the three children would be divided between the two ex-\n spouses according to various conditions but did not provide\n that the ex-wife must execute in P’s favor a Form 8332,\n ‘‘Release of Claim to Exemption for Child of Divorced or Sepa-\n rated Parents’’. The children resided with P’s ex-wife for more\n than half of 2009, and P’s ex-wife did not execute in P’s favor\n any Form 8332 or equivalent document for any year. For 2009\n P timely filed a Federal income tax return on which he\n claimed dependency exemption deductions and the child tax\n credit for two of the children, consistent with his under-\n standing of the terms of the judgment, but he did not attach\n any Form 8332 to his return. He also claimed head-of-house-\n hold filing status. His ex-wife, the custodial parent, timely\n filed a Federal income tax return for 2009 on which she also\n \n 200\n \n \n \n \n VerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00001 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n \f (200) SHENK v. COMMISSIONER 201\n \n \n claimed two dependency exemption deductions, so that one\n child was claimed on both parents’ returns. R allowed to P the\n dependency exemption deduction for one of the children but\n disallowed his claim for the dependency exemption deduction\n for the child who had also been claimed by the custodial\n parent. At trial P contended he is entitled to a dependency\n exemption deduction for all three children. Held: Since the\n custodial parent did not execute, and P could not and did not\n attach to his return, any Form 8332 or equivalent release, P\n is not entitled under I.R.C. sec. 152(e)(2)(A) to claim the\n dependency exemption deduction or the child tax credit. Held,\n further, where both the custodial parent and the noncustodial\n parent have claimed for the same year a dependency exemp-\n tion deduction for the same child, a declaration signed by the\n custodial parent after the period of limitations for assess-\n ments has expired as to the custodial parent could not qualify\n under I.R.C. sec. 152(e)(2)(A), and therefore there is no reason\n to grant P’s request to leave the record open so that he may\n obtain and proffer such a declaration. Held, further, P is not\n entitled to head-of-household filing status under I.R.C. sec.\n 2(b)(1) nor to the child tax credit under I.R.C. sec. 24.\n \n Michael Keith Shenk, for himself.\n Shari Salu, for respondent.\n GUSTAFSON, Judge: The Internal Revenue Service (IRS)\n determined a deficiency of $3,136 in the 2009 Federal income\n tax of petitioner Michael Keith Shenk. Mr. Shenk petitioned\n this Court, pursuant to section 6213(a), 1 for redetermination\n of the deficiency. After Mr. Shenk’s concession that he\n received but did not report $254 in dividend income, the\n issue for decision is whether Mr. Shenk is entitled to a\n dependency exemption deduction for one of his children\n under section 151(c), a child tax credit for that child under\n section 24(a), and head-of-household filing status under sec-\n tion 2(b)(1). On these issues, we hold for the IRS.\n FINDINGS OF FACT\n \n The judgment of divorce\n Mr. Shenk was married to Julie Phillips, and they have\n three minor children—M.S., W.S., and L.S. They divorced in\n 2003. The family court’s ‘‘Judgment of Absolute Divorce’’ pro-\n 1 Unless otherwise indicated, all citations of sections refer to the Internal\n \n Revenue Code (26 U.S.C.) in effect for the tax year at issue, and all cita-\n tions of Rules refer to the Tax Court Rules of Practice and Procedure.\n \n \n \n \n VerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00002 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n \f 202 140 UNITED STATES TAX COURT REPORTS (200)\n \n \n vided: that Ms. Phillips was ‘‘awarded primary residential\n custody’’ of the parties’ three children; and that Mr. Shenk\n would be liable for child support payments; but that, as to\n dependency exemptions—\"\"\",\n \"5706-12\",\n ),\n )\n site = tax.Site()\n for q, a in test_pairs:\n results = site.extract_from_text(q)\n docket_number = results[\"Docket\"][\"docket_number\"]\n self.assertEqual(docket_number, a)\n print \"✓\", docket_number", "def getRhymingParts(s):\n\n # 1st pass: If CMUDict directly tells us how to pronounce a word,\n # use that to extract its rhyming part\n s = s.lower()\n phones = pronouncing.phones_for_word(s)\n if phones:\n return [pronouncing.rhyming_part(phone) for phone in phones], s\n\n # 2nd pass: Find the longest suffix of the word that CMUDict tells us\n # how to pronounce, and use that to extract its rhyming part.\n # (Produces incorrect results for some words! e.g.:\n # lechugas -> gas\n # magnanimously -> sly\n n = len(s)\n bestSuffix = None\n # iter over s[-1:], s[-2:], s[-3:], ... s[-(n-1):]\n for i in range(1, n):\n suffix = s[-i:]\n partial = pronouncing.phones_for_word(suffix)\n\n # track the longest pronouncable suffix\n if partial:\n bestSuffix = suffix\n phones = partial\n\n if phones:\n print(f\"Guessed word: {s} ~~> {bestSuffix}\")\n return [pronouncing.rhyming_part(phone) for phone in phones], bestSuffix\n\n # Final Pass: CMUDict knows nothing about this word (it's probably a typo\n # or non-English). Use the entire, raw word as the rhyming part,\n # adding a star to indicate we couldn't find any CMUDict information\n print(\"Unknown word: \", s)\n return [s], f\"{s}*\"", "def make_bag(txt, stopw):\n bow = re.split('\\s',txt.lower())\n new_bow=[]\n for word in bow:\n if word not in stopw and len(word)>0 and not re.search('\\d',word):\n new_bow.append(word)\n return(new_bow)", "def get_jungle():\n url = \"http://www.gutenberg.org/files/140/140-h/140-h.htm#link2HCH0002\"\n book = requests.get(url)\n chapters = {}\n chapters_raw = re.compile('</h2>(.*?)<h2>', re.DOTALL).findall(book.text)\n chapters_soup = [BeautifulSoup(text, 'html.parser') for text in chapters_raw]\n for i in range(1, len(chapters_soup)):\n pars = chapters_soup[i].find_all('p')\n chapters[i] = pars\n return(chapters)", "def raw_data() -> Dict:\n return {\"neighbourhood\":\"Buttes-Montmartre\",\"room_type\":\"Entire home/apt\",\"minimum_nights\":1.555,\"mois\":2,\"voyageurs\":2.5,\"chambres\":1,\"lits\":1,\"salle_de_bains\":1}", "def drawsheet_parse(text):\n logging.debug(\"################ PARSING DRAW ##################\")\n\n month = \"({})\".format('|'.join(RE_MONTHS))\n\n patterns = (\n ('surface', r\"Hard|Outdoor Hard|Red Clay|Green Clay|Clay|\"\n r\"Grass|Indoor Hard|Carpet|Indoor Carpet\"),\n ('date', r\"\\d{1,2}(th)? ?- ?\\d{1,2}(th)? \" + month + r\",? \\d{4}|\" +\n month + r\" \\d{1,2}(th)? ?- ?\\d{1,2}(th)?,? \\d{4}\"),\n ('year', r\"\\d{4}\"),\n ('seed', r\"(?<=\\[)\\d+(?=\\])|(?<=\\[ )\\d+(?=\\ ])\"),\n ('round', r\"(1st|2nd|3rd) Round|1/8|1/4|1/2\"),\n ('class', r\"WTA( [A-Za-z0-9]+)*|US Open|\"\n r\"French Open|Australian Open|Wimbledon\"),\n ('orderedname', r\"[A-Z][a-z]+(( |-)[A-Z][a-z]+)*\"\n r\" ([A-Z]+(( |-)[A-Z]+)*)(?= |$)\"),\n ('fullname', r\"(?:^| )[Bb][Yy][Ee](?:$| )|([A-Z]+(( |-)[A-Z]+)*,\\s\"\n r\"[A-Z][a-zA-Z]*(( |-)([A-Z][a-zA-Z]*[a-z]))*)\"),\n #('shortname', r\"[A-Z]\\. ?[A-Z]+(( |-)[A-Z]+)*\"),\n ('shortname', r\"[A-Z]\\. ?[A-Za-z]+(( |-)[A-Za-z]+)*\"),\n ('country', r\"(?:(?!RET)[A-Z]{3}|\\([A-Z]{3}\\))(?= |$)\"),\n ('score',\n r\"([0-7][/-]?[0-7](\\(\\d+\\))?)( [0-7][/-]?[0-7](\\(\\d+\\))?){0,2}\"\n r\" ([Rr]et\\.|[Rr]et'd|[Rr]etired|[Rr]et)\"\n r\"|([0-7][/-]?[0-7](\\(\\d+\\))?)( [0-7][/-]?[0-7](\\(\\d+\\))?){1,2}\"\n r\"|([0-7]/?[0-7](\\(\\d+\\))? ){2}[\\d+]/[\\d+]\"\n r\"|(wo.|[Ww]alkover)\"),\n ('prize', r\"\\$[0-9,]+(?= |$)\"),\n ('number', r\"\\d{1,3}\\.?(?= |$)\"),\n ('city', r\"[A-Z][A-Za-z]*( [A-Z][A-Za-z]+)*,\"\n r\"( [A-Z][A-Z],)? (USA|[A-Z][a-z]*)\"),\n ('status', r\"(^|(?<=\\[|\\(| ))(Q|LL|W|WC)((?=\\]|\\)| )|$)\"),\n ('string', r\"([A-Za-z&,\\']+)( [A-Z&a-z$,]+)*\"),\n )\n \n pattern = re.compile('|'.join([\"(?P<{}>{})\".format(k, v) \n for k, v in patterns]))\n data = { k: [] for k, v in patterns}\n\n short_to_fullnames = {}\n ordered_to_fullnames = {}\n def add_to_fullname_conversion_table(fullname, x, y):\n nm = re.match('(.*), (.)', fullname)\n name = nm.group(2) + \". \" + nm.group(1)\n if name not in short_to_fullnames:\n short_to_fullnames[name] = []\n\n short_to_fullnames[name] += [(fullname, (x,y))]\n\n nm = re.match('(.*), (.*)', fullname)\n name = nm.group(2) + \" \" + nm.group(1)\n ordered_to_fullnames[name] = fullname\n\n\n re_skip = re.compile(r'Seeded +Players')\n # Find scores, names, etc\n y = 0\n skipping_page = False\n\n # collect the data\n width = 0\n lines = text.split('\\n');\n for line in lines:\n if skipping_page:\n if chr(12) in line:\n skipping_page = False\n else:\n continue\n\n if (re_skip.search(line)):\n # skip the seeding/info section, it's useless\n skipping_page = True\n continue;\n\n for m in pattern.finditer(line):\n for group, match in m.groupdict().items():\n if match is not None:\n match = match.strip()\n x1 = m.start(group)\n x2 = m.end(group)\n\n if x2 > width:\n width = x2\n\n data[group] += [(match, ((x1, x2), y))]\n\n if group == 'fullname' and match.upper() != \"BYE\":\n add_to_fullname_conversion_table(match, (x1, x2), y)\n\n y += 1\n\n # hack to catch country codes that got attached to fullnames\n if len(data['country']) > 0:\n cc_re = re.compile(r'^([A-Z]{3}) (.*)')\n # find known country codes\n countries = set(list(zip(*data['country']))[0])\n if len(data['fullname']) > len(data['country']):\n for n, point in data['fullname']:\n m = cc_re.match(n)\n if m and m.group(1) in countries:\n country = m.group(1)\n name = m.group(2)\n idx = data['fullname'].index((n, point))\n del data['fullname'][idx]\n (x1, x2), y = point\n data['fullname'].insert(idx, (name, ((x1 + 4), x2, y)))\n data['country'].append((country, ((x1, x1 + 3), y)))\n add_to_fullname_conversion_table(name)\n if len(data['fullname']) == len(data['country']):\n # we're done\n break\n\n # find any possible country codes\n if len(data['fullname']) > len(data['country']):\n for n, point in data['fullname']:\n m = cc_re.match(n)\n if m:\n country = m.group(1)\n name = m.group(2)\n idx = data['fullname'].index((n, point))\n del data['fullname'][idx]\n (x1, x2), y = point\n data['fullname'].insert(idx, (name, ((x1 + 4, x2), y)))\n data['country'].append((country, ((x1, x1 + 3), y)))\n add_to_fullname_conversion_table(name)\n if len(data['fullname']) == len(data['country']):\n # we're done\n break\n\n orderednames = []\n for n, point in data['orderedname']:\n try:\n n = ordered_to_fullnames[n]\n orderednames += [(n, point)]\n except KeyError:\n data['string'] += [(n, point)]\n\n data['orderedname'] = orderednames\n\n def distance(a, b):\n ax1, ax2 = a[0]\n bx1, bx2 = b[0]\n ax = (ax1 + ax2) / 2\n bx = (bx1 + bx2) / 2\n dx = float(ax - bx) / 10\n dy = float(a[1] - b[1])\n\n return math.sqrt(dx * dx + dy * dy)\n\n # assign shortnames to longnames\n # some people share a shortname, so assign to \n # the longname that is closest\n shortnames = []\n for n, point in data['shortname']:\n n = n.upper()\n if n[2] != ' ':\n short = n[0:2] + ' ' + n[2:]\n else:\n short = n\n\n try:\n shorts = short_to_fullnames[short]\n\n short = min(shorts, key=lambda s: distance(s[1], point))\n shortnames += [(short[0], point)]\n except KeyError:\n data['string'] += [(n, point)]\n\n data['shortname'] = shortnames\n\n logging.debug(pprint.pformat(data))\n\n return data, width;", "def categorize(book: TextIO) -> list:\n chunks = get_texts(book)\n texts = []\n for t in chunks:\n level = difficulty(complexity(t))\n texts.append((t, level, keywords(t)))\n return texts", "def preProcess(text):\n\ttext = text.lower() # lower case the text\n\t# Q4 replace the word with expanded contractions\n\tfor k,v in general_contraction.items():\n\t\tif k in text.split():\n\t\t\ttext = text.replace(k,v)\n\t# Q4 remove speacial char including all puncuattions and replace it with a space\n\ttext = re.sub('[^A-Za-z0-9]+',' ',text) \n\t# tokenise\n\ttokens = text.split()\n\t# stop word removal\n\ttokens = [w for w in tokens if w not in stopwords ]\n\t# Q4 Stemming\n\ttokens = [str(porter.stem(w)) for w in tokens]\n\t# if word is non-english return its english form # too much time-complexity\n\t# tokens = [porter.stem(w) if porter.stem(w) in set(words.words()) else w for w in tokens ]\n\t# for words having digits such as 12gb, 1st, etc expanding the token list\n\tfor k in tokens:\n\t\tif len(k) >2 and re.match(r'[0-9]+',k):\t\t\t\n\t\t\tif len(k) >2 and not k.isdigit():\n\t\t\t\tl = re.split(r'(\\d+)',k)\n\t\t\t\tl = [w for w in l if w is not '' ]\n\t\t\t\tif l and len(l) <= 3:\n\t\t\t\t\tfor i in l:\n\t\t\t\t\t\tif i in digit_contractions.keys():\n\t\t\t\t\t\t\tl = list(map(lambda b: b.replace(i,digit_contractions[i]), l))\n\t\t\t\t\ttokens.remove(k)\n\t\t\t\t\ttokens = tokens+l\n\t\t\t\telse:\n\t\t\t\t\ttokens.remove(k)\n\tfor k,v in digit_contractions.items():\n\t\tif k in tokens:\n\t\t\tif tokens[tokens.index(k)-1].isdigit():\t\n\t\t\t\ttokens = list(map(lambda b: b.replace(k,v), tokens))\n\t# remove tokens of size less than 2\n\ttokens = [t for t in tokens if len(t) > 2]\n\treturn tokens", "def _recipe_details_generator(self, converted_content, overview_recipe):\n def get_cooking_shop_strings(lines):\n ret = []\n buf = None\n is_recipe_step_area = False\n for l in lines:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n if buf:\n ret.append(buf)\n buf = l.strip()\n continue\n\n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_recipe_step_area = False\n\n if re.search(\"^材料\", l.strip()):\n title, materials = re.search(\"(材料)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + materials.strip()\n continue\n\n if re.search(\"^作り方\", l.strip()):\n is_recipe_step_area = True\n title, recipe_steps = re.search(\"(作り方)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + recipe_steps.strip()\n continue\n \n if buf:\n if is_recipe_step_area:\n if re.match(r\"^[①-⑳*]\", l.strip()):\n buf += \"\\n\" + l.strip()\n else:\n buf += l.strip()\n else:\n buf += \"\\n\" + l.strip()\n if buf:\n ret.append(buf)\n\n return ret\n \n \n for ii, l in enumerate(converted_content.splitlines()):\n if ii == 1:\n overview_recipe.cooking_name_sub = l.strip()\n continue\n \n if -1 < l.find(\"初回放送\"):\n overview_recipe.program_date = dateutil.parser.parse(\"/\".join(re.search(r\"(\\d+)\\D+(\\d+)\\D+(\\d+)\\D+\", l).groups()))\n break\n\n cooking_shop_strings = get_cooking_shop_strings(converted_content.splitlines())\n\n logger.debug(\"-\" * 20)\n logger.debug(cooking_shop_strings)\n for shop_string in cooking_shop_strings:\n recipe_shop = None\n recipe = None\n is_material_area = False\n is_recipe_step_area = False\n for l in shop_string.splitlines():\n if len(l.strip()) == 0:\n continue\n \n if is_material_area == False and is_recipe_step_area == False:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n recipe_shop = copy.deepcopy(overview_recipe)\n recipe = None\n \n m = re.search(r\"「(.*)」\", l)\n if m:\n recipe_shop.cooking_name_sub += \"/\" + m.group(1)\n else:\n m2 = re.search(r\"『(.*)』\", l)\n if m2:\n recipe_shop.cooking_name_sub += \"/\" + m2.group(1)\n \n continue\n \n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_material_area = False\n is_recipe_step_area = False\n if recipe:\n yield recipe\n\n if recipe_shop:\n recipe = copy.deepcopy(recipe_shop)\n else:\n recipe = copy.deepcopy(overview_recipe)\n \n if -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif re.search(r\"^(料理|万能調味料)[①-⑳]\", l.strip()):\n # https://www.nhk.or.jp/program/manpuku/recipe/dg0_200115.pdf\n # 料理①カルパッチョ\n recipe.cooking_name = l.strip()[3:].strip()\n else:\n recipe.cooking_name = l.split(None, 1)[1].strip()\n continue\n \n if re.search(\"^材料\", l.strip()):\n is_material_area = True\n is_recipe_step_area = False\n if l.strip() == \"材料\":\n continue\n \n if re.search(\"^作り方\", l.strip()):\n is_material_area = False\n is_recipe_step_area = True\n if l.strip() == \"作り方\":\n pass\n else:\n l = l.replace(\"作り方\", \"\", 1)\n # recipeがNoneの場合はエラーとして検出したい\n recipe.recipe_steps.append(RecipeText(l.strip()))\n continue\n \n \n if is_material_area:\n for material in l.strip().split(\"、\"):\n material = material.strip()\n if len(material):\n if material.startswith(\"(\"):\n recipe.materials.append(RecipeText(material))\n else:\n recipe.materials.append(RecipeText(material.replace(\"(\", \": \").replace(\")\", \"\")))\n \n if is_recipe_step_area:\n recipe.recipe_steps.append(RecipeText(l.strip()))\n if recipe:\n yield recipe", "def get_rules(data: List[str]) -> Dict[str, Dict[str, int]]:\n contains_split = re.compile(r\"bags?.?$\")\n bags = {}\n\n for line in data:\n color, contains = line.split(\" bags contain \")\n if not contains == \"no other bags.\":\n contains = [contains_split.sub(\"\", item).strip() for item in contains.split(\",\")]\n sub_bags = dict(reversed(a.split(\" \", 1)) for a in contains)\n else: # If there aren't any bags within the bag\n sub_bags = {}\n bags.update({color: sub_bags})\n\n return bags", "def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields", "def __line_parse_4(index: int, line: list, dictionary: dict, word_list: list):\n if index + 4 >= len(line):\n return\n word_1 = line[index + 4]\n word_2 = line[index + 3]\n word_3 = line[index + 2]\n word_4 = line[index + 1]\n word_5 = line[index]\n\n if word_1 == \"\" or word_2 == \"\" or word_3 == \"\" or word_4 == \"\" or word_5 == \"\":\n return\n\n if word_1 not in dictionary:\n dictionary[word_1] = {\n str(word_1 + \"_4\"): {\n\n }\n }\n if word_1 not in word_list:\n word_list.append(word_1)\n\n \"\"\"word_5 word_4 word_3 word_2 word_1\"\"\"\n if word_5 not in dictionary[word_1][str(word_1 + \"_4\")]:\n dictionary[word_1][str(word_1 + \"_4\")][word_5] = 1\n else:\n dictionary[word_1][str(word_1 + \"_4\")][word_5] = dictionary[word_1][str(word_1 + \"_4\")][word_5] + 1", "def code() -> str:\n return \"\"\"\n G91 G17\n G0 Y10 X-10\n G0 Y0 X-5\n G0 Y5 X0\n G0 Y0 X5\n G0 Y0 X-5\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G0 Y-5 X0\n G0 Y-10 X10\n G0 Y0 X-5\n G0 Y-15 X-15\n G0 Y0 X5\n G0 Y5 X0\n G0 Y0 X-5\n G0 Y-5 X0\n G0 Y5 X0\n G2 Y5 X5 J0 I5\n G0 Y0 X5\n G0 Y-5 X0\n G2 Y-5 X-5 J0 I-5\n G0 Y5 X0\n G0 Y10 X10\n G0 Y0 X-30\n G3 Y0 X-10 J0 I-5\n G3 Y0 X10 J0 I5\n\n G0 Y0 X5\n G3 Y5 X5 J5 I0\n G3 Y10 X-10 J0 I-10\n G3 Y-5 X-5 J-5 I0\n G0 Y-5 X0\n\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G3 Y-10 X-10 J-10 I0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n\n G0 Y0 X-5\n G3 Y-5 X-5 J-5 I0\n G3 Y-10 X10 J0 I10\n G3 Y5 X5 J5 I0\n G0 Y5 X0\n\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G3 Y10 X10 J10 I0\n G3 Y5 X-5 J0 I-5\n G0 Y0 X-5\n \"\"\"", "def convert_to_tenfive(ch_names=None):\n correspondance = {\n # ------------\n \"65\": \"Fpz\",\n \"1\": \"Fp1\",\n \"2\": \"Fp2\",\n # ------------\n \"66\": \"AFp1\",\n \"67\": \"AFp2\",\n \"33\": \"AF7\",\n \"34\": \"AF3\",\n \"68\": \"AFz\",\n \"36\": \"AF4\",\n \"38\": \"AF8\",\n # ------------\n \"69\": \"AFF2\",\n \"70\": \"AFF1\",\n \"71\": \"AFF3\",\n \"72\": \"AFF4\",\n # ------------\n \"3\": \"F7\",\n \"41\": \"F5\",\n \"4\": \"F3\",\n \"43\": \"F1\",\n \"5\": \"Fz\",\n \"46\": \"F2\",\n \"6\": \"F4\",\n \"48\": \"F6\",\n \"7\": \"F8\",\n # ------------\n \"73\": \"FFT9\",\n \"74\": \"FFT7\",\n \"75\": \"FFC5\",\n \"76\": \"FFC3\",\n \"77\": \"FFC1\",\n \"78\": \"FFC2\",\n \"79\": \"FFC4\",\n \"80\": \"FFC6\",\n \"81\": \"FFT8\",\n \"82\": \"FFT10\",\n # ------------\n \"50\": \"FT9\",\n \"51\": \"FT7\",\n \"20\": \"FC5\",\n \"35\": \"FC3\",\n \"21\": \"FC1\",\n \"83\": \"FCz\",\n \"22\": \"FC2\",\n \"39\": \"FC4\",\n \"23\": \"FC6\",\n \"40\": \"FT8\",\n \"44\": \"FT10\",\n # ------------\n \"84\": \"FTT9\",\n \"85\": \"FTT7\",\n \"86\": \"FCC5\",\n \"87\": \"FCC3\",\n \"88\": \"FCC1\",\n \"89\": \"FCC2\",\n \"90\": \"FCC4\",\n \"91\": \"FCC6\",\n \"92\": \"FTT8\",\n \"93\": \"FTT10\",\n # ------------\n \"8\": \"T7\",\n \"45\": \"C5\",\n \"9\": \"C3\",\n \"49\": \"C1\",\n \"10\": \"Cz\",\n \"42\": \"C2\",\n \"11\": \"C4\",\n \"37\": \"C6\",\n \"12\": \"T8\",\n # ------------\n \"94\": \"TTP7\",\n \"95\": \"CCP5\",\n \"96\": \"CCP3\",\n \"97\": \"CCP1\",\n \"98\": \"CCP2\",\n \"99\": \"CCP4\",\n \"100\": \"CCP6\",\n \"101\": \"TTP8\",\n # ------------\n \"47\": \"TP7\",\n \"25\": \"CP5\",\n \"52\": \"CP3\",\n \"26\": \"CP1\",\n \"53\": \"CPz\",\n \"27\": \"CP2\",\n \"54\": \"CP4\",\n \"28\": \"CP6\",\n \"55\": \"TP8\",\n # ------------\n \"102\": \"TPP9\",\n \"103\": \"TPP7\",\n \"104\": \"CPP5\",\n \"105\": \"CPP3\",\n \"106\": \"CPP1\",\n \"107\": \"CPP2\",\n \"108\": \"CPP4\",\n \"109\": \"CPP6\",\n \"110\": \"TPP8\",\n \"111\": \"TPP10\",\n # ------------\n \"13\": \"P7\",\n \"56\": \"P5\",\n \"14\": \"P3\",\n \"57\": \"P1\",\n \"15\": \"Pz\",\n \"58\": \"P2\",\n \"16\": \"P4\",\n \"59\": \"P6\",\n \"17\": \"P8\",\n \"112\": \"P9\",\n \"119\": \"P10\",\n # ------------\n \"24\": \"TP9\",\n \"29\": \"TP10\",\n # ------------\n \"113\": \"PPO5\",\n \"114\": \"PPO3\",\n \"115\": \"PPO1\",\n \"116\": \"PPO2\",\n \"117\": \"PPO4\",\n \"118\": \"PPO6\",\n # ------------\n \"31\": \"PO9\",\n \"60\": \"PO7\",\n \"61\": \"PO3\",\n \"62\": \"POz\",\n \"63\": \"PO4\",\n \"64\": \"PO8\",\n \"32\": \"PO10\",\n # ------------\n \"121\": \"POO1\",\n \"122\": \"POO2\",\n \"120\": \"POO3\",\n \"123\": \"POO4\",\n # ------------\n \"18\": \"O1\",\n \"30\": \"Oz\",\n \"19\": \"O2\",\n # ------------\n \"124\": \"OI1\",\n \"125\": \"OI2\",\n # ------------\n \"126\": \"I1\",\n \"127\": \"Iz\",\n \"128\": \"I2\",\n }\n if ch_names is None:\n ch_names = list(correspondance.keys())\n ch_names = [correspondance.get(name, name) for name in ch_names]\n return ch_names", "def make_text(chains):\n\n text = []\n nchars = 0\n\n # Starting ngram (as tuple), first word in tuple must be uppercase\n start = choice(get_uppercase(chains))\n\n # Add starting ngram to text list\n text.extend(start)\n\n # Add length of words in first bigram and two spaces to nchars\n nchars += len(start[0]) + len(start[1]) + 2\n\n while nchars < 119:\n # Choose next word randomly from list\n new_word = choice(chains[start])\n\n # add length of new word to nchars\n # add one for space between words\n nchars += len(new_word) + 1\n\n if nchars > 120:\n break\n else:\n # Add new word to text list\n text.append(new_word)\n\n # Generate tuple for next ngram\n new_key = start[1:] + (new_word,)\n\n # Break out of loop if bigram doesn't exist\n if new_key in chains:\n start = new_key\n else:\n break\n\n text.append(\"#hackbrightgracejan17\")\n\n # Find last sentence punctuation in text\n text_string = ' '.join(text)\n\n # period = text_string.rfind('.')\n # exclamation = text_string.rfind('!')\n # question = text_string.rfind('?')\n\n # largest = max(period, exclamation, question)\n\n # # Remove everything after the last punctuation, if there is anything\n # if len(text_string) == largest+1:\n # return text_string\n # else:\n # return text_string[:largest+1]\n\n return text_string", "def smooth_tag_model(cls, heldout_data):\n\n # bi_transition_counts = defaultdict(int)\n n = 0 # count word-tags\n e = .0001 # stopping condition\n L = [.25, .25, .25, .25] # initialize lambdas uniformly\n i = 1 # iteration\n while True:\n # E Step (Step 1)\n # Iterate through all occurring trigrams\n # in the heldout.txt data (H), i.e. minimizing\n # log likelihood\n counts = [0, 0, 0, 0]\n ratio = [0, 0, 0, 0]\n nextL = 4 * [0] # next lambda\n\n for sent in heldout_data:\n\n # Handle beginning of sentence\n t = '<s>'\n u = '<s>'\n # bi_transition_counts[t, u] += 1\n\n # if first == '<s>' and second == '<s>': # add bigram for nulls\n\n for word, tag in sent:\n v = tag\n # tri_transitions[t, u, v] += 1\n # bi_transitions[u, v] += 1\n # # if first == '<s>' and second == '<s>': # add bigram for nulls\n # # tagger._bi_transitions[('<s>', '<s>')] += 1\n # uni_transitions[v] += 1\n # cls._emissions[word, tag] += 1\n # tags.add(v)\n # words.add(word)\n # uni_words[word] += 1\n\n\n # Calculate expected counts of lambdas\n ratio = cls.calc_tag_ratio(t, u, v, L)\n\n # M-step (Step 2)\n # Calculate expected counts of lambdas, i.e. weight, taking\n # into account the number of occurrences of each trigram (cnt)\n for j in range(len(L)):\n counts[j] += ratio[j] # weight of lambda in whole equation (count)\n\n t = u\n u = v\n # n += 1\n\n # Handle end of sentence\n # tri_transitions[t, u, '</s>'] += 1\n v = '</s>'\n ratio = cls.calc_tag_ratio(t, u, v, L)\n for j in range(len(L)):\n counts[j] += ratio[j] # weight of lambda in whole equation (count)\n\n # cls._bi_transitions[first, second] += 1\n # cls._tri_transitions[second, '</s>', '</s>'] += 1\n\n\n # Update values for parameters given current distribution\n for k in range(len(L)):\n total = np.sum(counts)\n nextL[k] = counts[k] / total # next lambda\n\n # Check if lambda values have converged\n converged = True\n for l in range(len(L)):\n if np.abs(nextL[l] - L[l]) > e: # tolerance = e\n converged = False\n L = nextL\n\n # Return values if lambdas have converged\n if converged:\n break\n\n i += 1 # increment iteration counter\n\n\n return L # copy lambdas passed by reference", "def ngrams(name_string, n=3):\n\n string = re.sub(r'[,-./]|\\sBD', r'', name_string)\n n_grams = zip(*[string[i:] for i in range(n)])\n return [''.join(n_gram) for n_gram in n_grams]", "def emotion(input_list, output_dict):\n\temotion_mapping = {\"😀\":\"มีความสุข\",\n\t\"😃\":\"มีความสุข\",\n\t\"😄\":\"มีความสุข\",\n\t\"😁\":\"มีความสุข\",\n\t\"😆\":\"มีความสุข\",\n\t\"😅\":\"มีความสุข\",\n\t\"😂\":\"มีความสุข\",\n\t\"🤣\":\"มีความสุข\",\n\t\"😊\":\"มีความสุข\",\n\t\"😇\":\"มีความสุข\",\n\t\"🙂\":\"มีความสุข\",\n\t\"🙃\":\"มีความสุข\",\n\t\"😉\":\"มีความสุข\",\n\t\"😌\":\"มีความสุข\",\n\t\"😍\":\"มีความสุข\",\n\t\"😘\":\"มีความสุข\",\n\t\"😗\":\"มีความสุข\",\n\t\"😙\":\"มีความสุข\",\n\t\"😚\":\"มีความสุข\",\n\t\"😋\":\"มีความสุข\",\n\t\"😜\":\"มีความสุข\",\n\t\"😝\":\"มีความสุข\",\n\t\"😛\":\"มีความสุข\",\n\t\"🤗\":\"มีความสุข\",\n\t\"🤓\":\"มีความสุข\",\n\t\"😎\":\"มีความสุข\",\n\t\"🤡\":\"มีความสุข\",\n\t\"🤠\":\"มีความสุข\",\n\t\"😒\":\"ไม่มีความสุข\",\n\t\"😞\":\"ไม่มีความสุข\",\n\t\"😔\":\"ไม่มีความสุข\",\n\t\"😟\":\"ไม่มีความสุข\",\n\t\"☹\" :\"ไม่มีความสุข\",\n\t\"😕\":\"ไม่มีความสุข\",\n\t\"🙁\":\"ไม่มีความสุข\",\n\t\"😣\":\"ไม่มีความสุข\",\n\t\"😖\":\"ไม่มีความสุข\",\n\t\"😫\":\"ไม่มีความสุข\",\n\t\"😩\":\"ไม่มีความสุข\",\n\t\"😤\":\"ไม่มีความสุข\",\n\t\"😠\":\"ไม่มีความสุข\",\n\t\"😡\":\"ไม่มีความสุข\",\n\t\"😐\":\"ไม่มีความสุข\",\n\t\"😑\":\"ไม่มีความสุข\",\n\t\"😯\":\"ไม่มีความสุข\",\n\t\"😦\":\"ไม่มีความสุข\",\n\t\"😧\":\"ไม่มีความสุข\",\n\t\"😮\":\"มีความสุข\",\n\t\"😲\":\"มีความสุข\",\n\t\"😵\":\"ไม่มีความสุข\",\n\t\"😳\":\"มีความสุข\",\n\t\"😱\":\"ไม่มีความสุข\",\n\t\"😨\":\"ไม่มีความสุข\",\n\t\"😰\":\"ไม่มีความสุข\",\n\t\"😢\":\"ไม่มีความสุข\",\n\t\"😥\":\"ไม่มีความสุข\",\n\t\"🤤\":\"ไม่มีความสุข\",\n\t\"😭\":\"ไม่มีความสุข\",\n\t\"😓\":\"ไม่มีความสุข\",\n\t\"😪\":\"ไม่มีความสุข\",\n\t\"😴\":\"ไม่มีความสุข\",\n\t\"🤥\":\"ไม่มีความสุข\",\n\t\"😬\":\"มีความสุข\",\n\t\"🤢\":\"ไม่มีความสุข\",\n\t\"🤧\":\"ไม่มีความสุข\",\n\t\"😷\":\"ไม่มีความสุข\",\n\t\"🤒\":\"ไม่มีความสุข\",\n\t\"🤕\":\"ไม่มีความสุข\",\n\t\"😈\":\"ไม่มีความสุข\",\n\t\"👿\":\"ไม่มีความสุข\",\n\t\"👹\":\"ไม่มีความสุข\",\n\t\"👺\":\"ไม่มีความสุข\",\n\t\"💩\":\"ไม่มีความสุข\",\n\t\"😺\":\"มีความสุข\",\n\t\"😸\":\"มีความสุข\",\n\t\"😹\":\"มีความสุข\",\n\t\"😻\":\"มีความสุข\",\n\t\"😼\":\"มีความสุข\",\n\t\"😽\":\"มีความสุข\",\n\t\"🙀\":\"มีความสุข\",\n\t\"😿\":\"ไม่มีความสุข\",\n\t\"😾\":\"ไม่มีความสุข\",\n\t\"👏\":\"มีความสุข\",\n\t\"👍\":\"มีความสุข\",\n\t\"👎\":\"ไม่มีความสุข\",\n\t\"🖕\":\"ไม่มีความสุข\",\n\t\"❤\":\"มีความสุข\",\n\t\"💛\":\"มีความสุข\",\n\t\"💚\":\"มีความสุข\",\n\t\"💙\":\"มีความสุข\",\n\t\"💜\":\"มีความสุข\",\n\t\"🖤\":\"ไม่มีความสุข\",\n\t\"💕\":\"มีความสุข\",\n\t\"💞\":\"มีความสุข\",\n\t\"💓\":\"มีความสุข\",\n\t\"💗\":\"มีความสุข\",\n\t\"💖\":\"มีความสุข\",\n\t\"💘\":\"มีความสุข\",\n\t\"💝\":\"มีความสุข\",\n\t\"💟\":\"มีความสุข\"}\n\n\tfor emo in input_list:\n\t\tif emo in emotion_mapping:\n\t\t\tfeeling = emotion_mapping[emo]\n\t\t\tif feeling not in output_dict:\n\t\t\t\toutput_dict[feeling] = 1\n\t\t\telse:\n\t\t\t\toutput_dict[feeling] += 1\n\n\treturn output_dict", "def decode(self, input):\n S = [s[0] for s in self.ngram_counts[0].keys()] # Set of tags\n _S = [s[0] for s in self.ngram_counts[0].keys()]\n _S.append('*') # _S includes '*' tag\n X = ['*'] # X stores each sentence. X[0] = '*', X[i] = xi\n for l in input:\n x = l.strip()\n if x: # Word\n X.append(x)\n else: # End of a sentence\n n = len(X) - 1 # the length of the sentence\n pi = defaultdict(float) # DP table PI\n bp = {} # back pointer\n\n # Initialize DP table\n for u in _S:\n for v in _S:\n pi[tuple([0, u, v])] = 0\n pi[tuple([0, '*', '*'])] = 1\n\n # Viterbi algorithm\n for k in xrange(1, n + 1):\n for u in _S:\n for v in S: # v will not be '*' \n max_score = 0\n tag = None\n for w in _S:\n if sum([self.emission_counts[tuple([y, X[k]])] for y in S]) < 5: # If the word X[k] is rare word or unseen word in the training corpus,\n x = symbolize(X[k], self.symbolize_option) # use RARE word probability\n else:\n x = X[k]\n try:\n score = pi[tuple([k-1, w, u])] * self.q(v, w, u) * self.e(x, v)\n if max_score < score:\n max_score = score\n tag = w\n except:\n pass\n pi[tuple([k, u, v])] = max_score # Update DP table entry\n bp[tuple([k, u, v])] = tag\n\n # Find tag sequence\n Y = ['*'] # Y stores tag sequence for X. Y[0] = '*', Y[i] = yi\n Y.extend(n * [None])\n max_score = None\n tag = None\n for u in _S:\n for v in _S:\n if self.ngram_counts[1][tuple([u, v])]:\n score = pi[tuple([n, u, v])] * self.q('STOP', u, v)\n if max_score is None or max_score < score:\n max_score = score\n tag = [u, v]\n Y[n-1] = tag[0]\n Y[n] = tag[1]\n for k in xrange(n - 2, 0, -1):\n Y[k] = bp[tuple([k + 2, Y[k + 1], Y[k + 2]])]\n\n # Write result\n prev = '*'\n for k in xrange(1, n + 1):\n print X[k], Y[k], log(pi[tuple([k, prev, Y[k]])])\n prev = Y[k]\n print ''\n\n X = ['*'] # set for the next sentence", "def tag_nes(dic, max_key_len, sent):\n dic_nes = []\n for begin in range(len(sent.morps)):\n right_bound = _find_right_bound(sent.morps, begin, max_key_len)\n # find pattern and key, longest first\n for end in range(right_bound, begin, -1): # end is exclusive\n text = make_text(sent, begin, end)\n categories = []\n ptn = make_dt_ti_ptn(text)\n if ptn in dic:\n categories = dic[ptn]\n else:\n key = re.sub(r'\\s+', '', text).lower()\n if key in dic:\n categories = dic[key]\n if categories:\n dic_ne_obj = {}\n dic_ne_obj['id'] = len(dic_nes)\n dic_ne_obj['text'] = text\n dic_ne_obj['type'] = categories\n dic_ne_obj['begin'] = begin\n dic_ne_obj['end'] = end-1 # NE's end is inclusive\n dic_nes.append(dic_ne_obj)\n break\n return dic_nes", "def parse_puzzle(puzzle):\n puzzle = re.sub(\"\\sGrid \\d{2}\",\"\", sample)\n puzzle = puzzle.strip().split(\"\\n\") \n return puzzle", "def activity(name):\n\n name_split = name.split(\",\")\n if \"Irrigation\" in name and \"gal\" not in name_split[1]:\n n = name_split[0] + \",\" + name_split[1]\n else:\n n = name_split[0]\n\n if \" to \" in n:\n activity = n.split(\" to \")\n name = split_name(activity[0])\n produced = name[0]\n consumed = capitalize_first_letter(activity[1])\n elif \" from \" in n:\n if \")\" in n:\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n produced_split = close_paren_split[1].split(\" from \")\n produced = capitalize_first_letter(produced_split[1].strip())\n consumed = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n else:\n activity = n.split(\" from \")\n name = split_name(activity[0])\n produced = capitalize_first_letter(activity[1])\n consumed = name[0].strip()\n elif \"consumptive\" in n:\n if \")\" in n:\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n produced = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n consumed = None\n else:\n split_case = split_name(n)\n consumed = None\n produced = capitalize_first_letter(split_case[0])\n elif \")\" in n:\n produced = None\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n consumed = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n elif \"total deliveries\" in n:\n split_case = split_name(n)\n consumed = None\n produced = capitalize_first_letter(split_case[0])\n elif \"Self-supplied\" in n:\n split_case = split_name(n)\n produced = None\n consumed = capitalize_first_letter(split_case[1])\n else:\n split_case = split_name(n)\n produced = None\n consumed = capitalize_first_letter(split_case[0])\n return pd.Series([produced, consumed])", "def extract_information(preprocessed_sentences):\n parsed = list(map(lambda sentence: nlp(sentence), preprocessed_sentences))\n\n quantities = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'QUANTITY'), parsed))\n dates = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'DATE'), parsed))\n\n hurricane_name = eh.extract_frequent_regex_match(parsed, '[Hh]urricane ([A-Z][a-z]+)').most_common(1)[0][0]\n hurricane_category = eh.extract_frequent_regex_match(parsed, '[Cc]ategory ([0-9]+)').most_common(1)[0][0]\n\n tropical_storm_name = eh.extract_frequent_regex_match(parsed, '[Tt]ropical [Ss]torm ([A-Z][a-z]+)').most_common(1)[0][0]\n formation_date, middle_month = extract_storm_timeline(dates, hurricane_name)\n\n preperation_info = extract_preparation_information(parsed)\n prep_gpes = preperation_info[0].most_common(3)\n\n restore_info = extract_restoration_information(parsed)\n\n landfall_info = extract_landfall_information(parsed)\n\n wind_info = extract_wind_information(quantities)\n rain_info = extract_rain_information(quantities)\n size_info = extract_size_information(parsed)\n\n # formation_info = extract_formation_info(parsed)\n death_info = extract_death_damages_info(parsed)\n\n print(constants.HURRICANE_SENTENCE.format(hurricane_name, middle_month, hurricane_category))\n print(constants.LANDFALL_SENTENCE.format(hurricane_name, landfall_info[2], landfall_info[3], landfall_info[0], landfall_info[1]))\n print(constants.WIND_SENTENCE.format(wind_info[0], wind_info[1], wind_info[2]))\n print(constants.RAIN_SENTENCE.format(hurricane_name, rain_info[1], rain_info[0], rain_info[2]))\n print(constants.FORMATION_SENTENCE.format(formation_date, tropical_storm_name))\n print(constants.PREPARATION_SENTENCE.format(prep_gpes[0][0], prep_gpes[1][0], prep_gpes[2][0], preperation_info[1].\n most_common(1)[0][0]))\n print(constants.SIZE_SENTENCE.format(size_info[0], size_info[1]))", "def model(nr, nw, model_num):\r\n\r\n\treader = csv.reader(open(sys.argv[nr], \"r\"), delimiter='\\t') \r\n\tex = np.array(list(reader))\r\n\r\n\tlabel = np.zeros(len(ex))\r\n\r\n\twriter = open(sys.argv[nw], \"w\")\r\n\r\n\tif(model_num==1):\r\n\r\n\t\t# model - 1 = indicating which word occurs atleast once in the review from the dictionary\r\n\r\n\t\tfor i in range(len(ex)):\r\n\r\n\t\t\t# noting down the label assigned to each movie from review\r\n\t\t\tlabel[i] = ex[i][0]\r\n\r\n\t\t\twriter.write(str(int(label[i])))\r\n\t\t\t\r\n\t\t\tA = ex[i][1].split()\r\n\t\t\t\r\n\t\t\t# remove repetitive\r\n\t\t\tx = {} #empty dictionary to keep a track of repetitive words\r\n\r\n\t\t\tfor a in A:\t\t\t\t\r\n\t\t\t\tif(a in dict and dict[a] not in x):\t\r\n\t\t\t\t\t# adding a label 1 to words which are occuring in the review\r\n\t\t\t\t \tx[dict[a]]=1\r\n\t\t\t\t \twriter.write(\"\\t\")\r\n\t\t\t\t \twriter.write(dict[a]+\":1\")\r\n\t\t\t\t \t\r\n\t\t\twriter.write(\"\\n\")\r\n\r\n\telif(model_num==2):\r\n\r\n\t\t\"\"\"model - 2 = keeping a count of all the words in review and removing the words which are \r\n\t\toccuring more than 4 times as they may be just punctuation\"\"\"\r\n\r\n\t\tfor i in range(len(ex)):\r\n\t\t\t# noting down the label assigned to each movie from review\r\n\t\t\tlabel[i] = ex[i][0]\r\n\r\n\t\t\twriter.write(str(int(label[i])))\r\n\t\t\t\r\n\t\t\tA = ex[i][1].split()\r\n\t\t\t\r\n\t\t\tx = {} #dictionary to keep track of words occuring in review\r\n\r\n\t\t\tfor a in A:\r\n\t\t\t\t\r\n\t\t\t\tif(a in dict):\r\n\t\t\t\t\t# if word already there, add a count or else add it to the dictionary\r\n\r\n\t\t\t \t\tif(dict[a] in x):\r\n\t\t\t \t\t\tx[dict[a]]=x[dict[a]]+1\r\n\r\n\t\t\t \t\telif(dict[a] not in x):\r\n\t\t\t \t\t\tx[dict[a]]=1\r\n\t\t\t\r\n\t\t\t# to remove words which are occuring more than 4 times\r\n\t\t\ty = {}\r\n\t\t\tfor a in A:\r\n\t\t\t\tif(a in dict and x[dict[a]] < 4 and dict[a] not in y):\t\r\n\t\t\t\t\ty[dict[a]]=1\r\n\t\t\t\t\twriter.write(\"\\t\")\r\n\t\t\t\t\twriter.write(dict[a]+\":1\")\r\n\t\t\t\t\t\r\n\t\t\twriter.write(\"\\n\")", "def one_v_all(description):\n label = [0, 0, 0]\n for raw in description.split(\"|\")[4:]:\n line = raw.strip()\n if \"midbrain\" in line:\n label[0] = 1\n if \"forebrain\" in line:\n label[1] = 1\n if \"hindbrain\" in line:\n label[2] = 1\n return label", "def getstates(s) -> \"[str]\":\n pass", "def startrek_characters():\n pdf = pd.DataFrame({\n 'name': [\n 'JONATHAN ARCHER',\n 'Michael Burnham',\n 'Chakotay ',\n ' Data ',\n 'the Doctor',\n 'philippa georgiou',\n 'Jean--Luc PICARD',\n 'Christopher pike '\n ],\n 'rank': [\n 'Captain',\n 'Comander',\n 'Comander',\n 'LT Commander',\n 'None',\n 'Capitain',\n 'Captain',\n 'CAPTAIN',\n ]\n })\n return pdf", "def main():\n\tnuc = \"ATGAAGACCATCATTGCTTTGAGCTACATTTTCTGTCTGGCTCTCGGCCAAGACCTTCCAGGAAATGACAACAGCACAGCAACGCTGTGCCTGGGACATCATGCGGTGCCAAACGGAACACTAGTGAAAACAATCACAGATGATCAGATTGAAGTGACTAATGCTACTGAGCTAGTTCAGAGCTCCTCAACGGGGAAAATATGCAACAATCCTCATCGAATCCTTGATGGAATAGACTGCACACTGATAGATGCTCTATTGGGGGACCCTCATTGTGATGTTTTTCAAAATGAGACATGGGACCTTTTCGTTGAACGCAGCAAAGCTTTCAGCAACTGTTACCCTTATGATGTGCCAGATTATGCCTCCCTTAGGTCACTAGTTGCCTCGTCAGGCACTCTGGAGTTTATCACTGAGGGTTTCACTTGGACTGGGGTCACTCAGAATGGGGGAAGCAATGCTTGCAAAAGGGGACCTGGTAGCGGTTTTTTCAGTAGACTGAACTGGTTGACCAAATCAGGAAGCACATATCCAGTGCTGAACGTGACTATGCCAAACAATGACAATTTTGACAAACTATACATTTGGGGGGTTCACCACCCGAGCACGAACCAAGAACAAACCAGCCTGTATGTTCAAGCATCAGGGAGAGTCACAGTCTCTACCAGAAGAAGCCAGCAAACTATAATCCCGAATATCTGGTCCAGACCCTGGGTAAGGGGTCTGTCTAGTAGAATAAGCATCTATTGGACAATAGTTAAGCCGGGAGACGTACTGGTAATTAATAGTAATGGGAACCTAATCGCTCCTCGGGGTTATTTCAAAATGCGCACTGGGAAAAGCTCAATAATGAGGTCAGATGCACCTATTGATACCTGTATTTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAGCCCTTTCAAAACGTAAACAAGATCACATATGGAGCATGCCCCAAGTATGTTAAGCAAAACACC\"\n\taa = translate(nuc[48:])\n\tep = epitope_sites(aa)\n\tne = nonepitope_sites(aa)\n\trb = receptor_binding_sites(aa)\n\tprint \"nuc: \" + nuc\n\tprint \"aa: \" + aa\n\tprint \"ep: \" + ep\n\tprint \"ne: \" + ne\n\tprint \"rb: \" + rb", "def parse_text(text, cmudict):\n text_seq = []\n\n # Normalize the text\n text = tokenize_text(clean_text(text))\n\n # Get the sequence of phonemes for words in the text while explicitly marking word boundaries. Incase of OOV words\n # backoff to using character sequence\n for word in text:\n if word.upper() in cmudict:\n text_seq.append(\" \".join([\"@\" + s for s in cmudict[word.upper()].split(\" \")]))\n else:\n text_seq.append(\" \".join(char for char in word))\n\n text_seq.append(_wb)\n\n text_seq = [word.split(\" \") for word in text_seq]\n text_seq = [char for word in text_seq for char in word]\n\n # Insert _bos and _eos symbols\n text_seq.insert(0, _bos)\n text_seq.append(_eos)\n\n return text_seq", "def gen_passage(ngrams, start=None, min_length=100, max_sentence_length=10):\n counter = 0\n strn =\"\"\n while counter < max_sentence_length:\n if start == None:\n start = str(random.choice((list(ngrams.keys()))))\n k = random.choice(ngrams[start])\n strn += str.capitalize(start) +\" \" + \" \".join(k)+\" \"\n #last token/word of selected sequence is the new start token IFF it is a KEY!\n for i in range(min_length):\n start = k[-1]\n if start not in ngrams.keys() and start:\n if \".\" in start:\n start = None\n break\n else:\n strn+=\". \"\n start = None\n #make sure this completely breaks out...\n break\n else:\n k = random.choice(ngrams[start])\n strn+= \" \".join(k)\n counter+=1\n print(strn)\n return strn", "def tokenize_and_split_bis(sms_file):\n \n dic = {}\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n i = -1\n document = 0\n terms = 0\n new_document = True\n ham = True\n for line in open(sms_file, 'r').readlines():\n w = []\n document += 1\n new_document = True\n for word in line.split():\n i = i + 1\n if word == \"ham\":\n ham = True\n i = i - 1\n elif word == \"spam\":\n ham = False\n i = i - 1\n else:\n if word not in dic:\n dic[word] = i\n w.append(dic[word])\n list3.append(1)\n list4.append(1)\n new_document = False\n terms += 1\n else : \n i = i - 1\n w.append(dic[word])\n list4[dic[word]] += 1\n terms += 1\n if new_document: \n list3[dic[word]] += 1\n new_document = False\n \n if ham and w !=[]:\n list2.append(w)\n elif ham == False and w !=[]:\n list1.append(w)\n\n moy = 0\n len_dic = len(dic.keys())\n list5 = [0 for x in range(len_dic)]\n for key in dic.keys():\n if list4[dic[key]] > 0:\n tf = list4[dic[key]] / terms\n idf = math.log(document / list3[dic[key]])\n tfIdf = tf * idf\n list5[dic[key]] = tfIdf\n # print(\"the word \" + str(key) + \" appairs \" + str(list4[dic[key]]) + \" times.\")\n # print(\"his frequency is \" + str(list4[dic[key]] / terms) )\n # print(\"the word \" + str(key) + \" appairs \" + str(list3[dic[key]]) + \" times in each document.\")\n # print(\"his frequency is \" + str(idf))\n # print(\"utility \" + str(tfIdf))\n moy += tfIdf\n \n moy = moy / len_dic \n # print(moy)\n dic_bis = {}\n i = -1\n for key in dic.keys():\n value = list5[dic[key]]\n # print(str(value))\n if (value > oracle * moy):\n i += 1\n dic_bis[key] = i\n # else:\n # print(\"not pass \" + key + \" \" + str(value))\n \n \n # print(dic_bis == dic)\n # print(dic)\n return dic_bis,list1,list2", "def pirate():\n \n from random import choice\n d = {\n \"sir\": \"matey\", \"hotel\": \"fleabag inn\", \"student\": \"swabbie\",\n \"boy\": \"matey\", \"girl\": \"wench\", \"professor\": \"foul blaggart\",\n \"restaurant\": \"galley\", \"your\": \"ye\", \"excuse\": \"arr\", \"you\": \"ye\",\n \"students\": \"swabbies\", \"are\": \"be\", \"lawyer\": \"foul blaggart\",\n \"the\": \"th'\", \"restroom\": \"head\", \"my\": \"me\", \"hello\": \"avast\",\n \"is\": \"be\", \"man\": \"scurvy dog\", \"hey\": \"avast\", \"pirate\": \"scurvy pirate\",\n \"idiot\": \"flunder\", \"young\": \"ye\", \"the\": \"t'\", \"suck\": \"blow down\",\n \"fall\": \"hade\", \"happens\": \"be happening\", \"death\": \"Davy Jones' treasure chest\",\n \"always\": \"ever\", \"you're\": \"ye're\", \"girlfriend\": \"Lassie-Lucy\",\n \"with\": \"wit'\", \"everyone\": \"evr'un\"\n }\n #~ sentence = raw_input(\"What be yer sentence, ye old landlubber? \")\n sentences = [\"If the world didn't suck we'd all fall off\",\n \"What happens if you get scared half to death twice?\",\n \"Save water and shower with your girlfriend\",\n \"Always remember you're unique, just like everyone else\"]\n \n start = [\"Yarr, \", \"Yarr! \", \"Yarr-ha-harr! \", \"Skuttle me Skippers! \",\n \"Shiver me timbers! \", \"Ahoy, me hearties! \", \"Dogs ahoy! \",\n \"Hoist the mizzen!!! \", \"Hoist the colors!! \" ]\n end = [\". ARRRGHHHH!\", \". Avast ye varmint!\", \". Yarr?\",\n \". Savvyy?\", \". Yarr!\", \". Aye!\",\n \", ye bilge rat!\", \", ye mangy dog!\", \", ye scallywag!\"]\n\n s = choice(sentences)\n print s\n print choice(start) +\\\n ' '.join([d[i] if i in d else i for i in s.lower().split()]).capitalize()\\\n + choice(end)" ]
[ "0.54976827", "0.54719514", "0.5322297", "0.52816135", "0.52341515", "0.5196226", "0.51849", "0.51808786", "0.51729864", "0.5135752", "0.51132756", "0.50834715", "0.50587803", "0.502308", "0.5018544", "0.5004243", "0.49756864", "0.49671763", "0.4965958", "0.49378312", "0.4929289", "0.4924898", "0.49173275", "0.4915775", "0.4914407", "0.49131304", "0.49016747", "0.48949507", "0.48862708", "0.48650777", "0.48584884", "0.48480204", "0.4840348", "0.4834977", "0.48239902", "0.48194337", "0.48183274", "0.48110002", "0.4810799", "0.47993883", "0.4791383", "0.4789993", "0.47867966", "0.47732708", "0.47688818", "0.4765088", "0.47577107", "0.4756685", "0.4753702", "0.4753531", "0.47494417", "0.47473362", "0.47467518", "0.47466898", "0.47453815", "0.47430617", "0.4739271", "0.4738134", "0.47380012", "0.47238702", "0.47201926", "0.47127786", "0.47111592", "0.47111034", "0.47092763", "0.4702679", "0.47014967", "0.4700882", "0.46968803", "0.4693685", "0.46923476", "0.46895167", "0.46875405", "0.46744388", "0.4673905", "0.46649185", "0.4664565", "0.46641567", "0.46638897", "0.46631783", "0.46600002", "0.4657272", "0.4655251", "0.4654581", "0.46536398", "0.46524283", "0.46494177", "0.4647162", "0.46458334", "0.46377486", "0.46269214", "0.462652", "0.4622248", "0.46156982", "0.4612328", "0.46108297", "0.46011442", "0.45996112", "0.4598099", "0.45974723", "0.45970443" ]
0.0
-1
Rerun CLI command inside a tobecreated conda environment.
def _rerun_in_conda(conda_env_path): conda_env_name = _get_or_create_conda_env(conda_env_path) activate_path = _get_conda_bin_executable("activate") commands = [] commands.append("source {} {}".format(activate_path, conda_env_name)) safe_argv = [shlex_quote(arg) for arg in sys.argv] commands.append(" ".join(safe_argv) + " --no-conda") commandline = " && ".join(commands) eprint("=== Running command '{}'".format(commandline)) child = subprocess.Popen(["bash", "-c", commandline], close_fds=True) exit_code = child.wait() return exit_code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepConda(commands_list, envName = envName):\n commands_list.append('module load conda2')\n commands_list.append('source deactivate') # Removes any pre-existing conda environments\n commands_list.append('source activate {eName}'.format(eName = envName))", "def time_conda_run():\n from conda.testing.helpers import run_inprocess_conda_command\n\n run_inprocess_conda_command(\n \"conda run -n base python -V\",\n disallow_stderr=False,\n )", "def _relaunch_in_controlled_env(argv):\n relfilepath_self = __file__ if __file__ else argv[0]\n cmd_argv = argv[1:]\n dirpath_cwd = os.getcwd()\n dirpath_lwc_root = os.path.dirname(os.path.realpath(relfilepath_self))\n pid = '{pid}'.format(pid = os.getpid())\n cmd_b64 = base64.b64encode(' '.join(argv))\n arglist = ['-b', 'da', '_in_controlled_env',\n dirpath_lwc_root,\n dirpath_cwd,\n pid,\n cmd_b64] + cmd_argv\n sys.path.insert(0, _dirpath_internal(dirpath_lwc_root))\n import da.lwc.run\n return da.lwc.run.python3(arglist, dirpath_lwc_root = dirpath_lwc_root)", "def main(): # pragma: no cover\n # Check if conda is available\n if not is_conda_available():\n path = os.path.abspath(os.path.dirname(sys.argv[0]))\n # print(path, len(sys.argv))\n msg = '''#\n# Please activate the conda root enviroment properly before running the\n# `anaconda-navigator` command.\n'''\n win_msg = '''#\n# To activate the environment please open a Windows Command Prompt and run:\n#\n# {0}\\\\activate root\n'''.format(path)\n\n unix_msg = '''#\n# To activate the environment please open a terminal and run:\n#\n# source {0}/activate root\n'''.format(path)\n\n more_info = '''#\n# For more information please see the documentation at:\n#\n# https://docs.anaconda.com/anaconda/navigator/\n#'''\n if os.name == 'nt':\n print_msg = '{}{}{}'.format(msg, win_msg, more_info)\n else:\n print_msg = '{}{}{}'.format(msg, unix_msg, more_info)\n\n print(print_msg)\n\n return 1\n\n # Parse CLI arguments\n options = parse_arguments()\n\n # Return information on version\n if options.version:\n print(__version__)\n sys.exit(0)\n\n # Reset Navigator conifg\n if options.reset:\n print('\\nAnaconda Navigator configuration reset...\\n\\n')\n from anaconda_navigator.config import CONF_PATH\n if os.path.isdir(CONF_PATH):\n try:\n shutil.rmtree(CONF_PATH)\n print('Anaconda Navigator configuration reset successful!\\n')\n sys.exit(0)\n except Exception as e:\n print('Anaconda Navigator configuration reset failed!!!\\n')\n print(e)\n sys.exit(1)\n\n if options.removelock:\n print('\\nRemoving Anaconda Navigator lock...\\n\\n')\n from anaconda_navigator.config import LOCKFILE\n try:\n os.remove(LOCKFILE)\n print('Anaconda Navigator lock removal successful!\\n')\n sys.exit(0)\n except Exception as e:\n print('Anaconda Navigator lock removal failed!!!\\n')\n print(e)\n sys.exit(1)\n\n # Clean old style logs\n from anaconda_navigator.utils.logs import clean_logs\n clean_logs()\n\n # Import app\n from anaconda_navigator.app.start import start_app\n return exception_handler(start_app, options)", "def run(self, commands: list[str]):\n conda_env_path = self.env_fs.getsyspath(self.name)\n create_script = f\"\"\"\\\neval \"$({self.env_exe} shell.posix hook)\"\nconda activate {conda_env_path}\n{\" \".join(commands)}\n\"\"\"\n with NamedTemporaryFile(mode=\"w\", delete=False) as script_file:\n script_file.write(create_script)\n self.run_script_subprocess(\n script_file.name, capture_output=False, debug_mode=get_debug_mode()\n )", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def cli(self, env):\n raise NotImplementedError", "def update_envrionment():\n l.critical(\"Try getting Git remote repo...\")\n try:\n import git\n repo = git.Repo()\n o = repo.remotes.origin\n l.info(o.fetch())\n l.info(o.pull())\n except Exception as err:\n l.error(\n \"An error happened while updating COMET source code.\", exc_info=True\n )\n\n l.critical(\"Checking conda environment requirements...\")\n try:\n osType = sys.platform\n if \"win\" in osType.lower():\n version = \"COMET/resources/requirements_Winx86.yml\"\n elif \"linux\" in osType.lower():\n version = \"COMET/resources/requirements_LINUX_x86_64.yml\"\n else:\n version = \"COMET/resources/requirements_MacOS.yml\"\n os.system(\n \"conda env update --prefix ./env --file {} --prune\".format(version)\n )\n except Exception as err:\n l.error(\n \"An error happened while updating COMET environment.\", exc_info=True\n )\n\n l.critical(\"Please restart COMET for the updates to have an effect!\")", "def _conda_activate_cmd(self,pkgs):\n # Make environment name from packages\n env_name = make_conda_env_name(*pkgs)\n # Set up conda wrapper\n if not self._conda_env_dir:\n logger.warning(\"conda environment directory not specified, \"\n \"unable to acquire conda environment '%s'\" %\n env_name)\n return None\n env_dir = os.path.abspath(self._conda_env_dir)\n conda = CondaWrapper(env_dir=env_dir)\n try:\n conda.create_env(env_name,*pkgs)\n conda_env = os.path.join(env_dir,env_name)\n # Script fragment to activate the environment\n return conda.activate_env_cmd(conda_env)\n except CondaWrapperError as ex:\n # Failed to acquire the environment\n logger.warning(\"failed to acquire conda environment \"\n \"'%s': %s\" % (env_name,ex))\n return None", "def _main(argv):\n _in_controlled_env = (2 <= len(argv)) and ('_in_controlled_env' == argv[1])\n\n if not _in_controlled_env:\n return _relaunch_in_controlled_env(argv)\n\n else:\n return _cmd(argv)", "def reconfigure(client, instance_name, command):\n\n # 'command' has 3 parts in a list (1 Command and 2 ARGs)\n exec_Id = client.exec_create(container=instance_name, cmd=command)\n\n exec_start_resp = client.exec_start(exec_Id, stream=True)\n\n # Using a 'single' generator response to solve issue of 'start_exec' returning control after 6 minutes\n for response in exec_start_resp:\n dlog.info(\"Reconfig Script execution response: {:}\".format(response))\n exec_start_resp.close()\n break", "def execute(helper, config, args):\n env_config = parse_env_config(config, args.environment)\n cname_prefix = env_config.get('cname_prefix', None)\n env_name = args.environment\n\n # change version\n if args.version_label:\n helper.deploy_version(env_name, args.version_label)\n if not args.dont_wait:\n helper.wait_for_environments(env_name, status='Ready', version_label=args.version_label)\n\n # update it\n env = parse_env_config(config, env_name)\n option_settings = parse_option_settings(env.get('option_settings', {}))\n helper.update_environment(env_name,\n description=env.get('description', None),\n option_settings=option_settings,\n tier_type=env.get('tier_type'),\n tier_name=env.get('tier_name'),\n tier_version=env.get('tier_version'))\n\n # wait\n if not args.dont_wait:\n helper.wait_for_environments(env_name, health='Green', status='Ready', version_label=args.version_label)\n\n # delete unused\n helper.delete_unused_versions(versions_to_keep=int( get(config, 'app.versions_to_keep', 10) ))", "def run_cmd_env(command):\n # Con virtualenv\n run(\"source %(ve_dir)s/bin/activate; %(cmd)s\" % {\n 've_dir': virtualenv_dir,\n 'cmd': command,\n })", "async def restart(ctx):\n dev = ctx.message.author\n if bot.botdev_role in dev.roles or bot.owner_role in dev.roles:\n await ctx.send(\"`Restarting, please wait...`\")\n execv(\"python3 SchmuckBot\", argv)", "def env(parser, args):\n action = subcommand_functions[args.env_command]\n action(args)", "def check_rerun(\n project_dir: Path,\n command: Dict[str, Any],\n *,\n check_spacy_version: bool = True,\n check_spacy_commit: bool = False,\n) -> bool:\n # Always rerun if no-skip is set\n if command.get(\"no_skip\", False):\n return True\n lock_path = project_dir / PROJECT_LOCK\n if not lock_path.exists(): # We don't have a lockfile, run command\n return True\n data = srsly.read_yaml(lock_path)\n if command[\"name\"] not in data: # We don't have info about this command\n return True\n entry = data[command[\"name\"]]\n # Always run commands with no outputs (otherwise they'd always be skipped)\n if not entry.get(\"outs\", []):\n return True\n # Always rerun if spaCy version or commit hash changed\n spacy_v = entry.get(\"spacy_version\")\n commit = entry.get(\"spacy_git_version\")\n if check_spacy_version and not is_minor_version_match(spacy_v, about.__version__):\n info = f\"({spacy_v} in {PROJECT_LOCK}, {about.__version__} current)\"\n msg.info(f\"Re-running '{command['name']}': spaCy minor version changed {info}\")\n return True\n if check_spacy_commit and commit != GIT_VERSION:\n info = f\"({commit} in {PROJECT_LOCK}, {GIT_VERSION} current)\"\n msg.info(f\"Re-running '{command['name']}': spaCy commit changed {info}\")\n return True\n # If the entry in the lockfile matches the lockfile entry that would be\n # generated from the current command, we don't rerun because it means that\n # all inputs/outputs, hashes and scripts are the same and nothing changed\n lock_entry = get_lock_entry(project_dir, command)\n exclude = [\"spacy_version\", \"spacy_git_version\"]\n return get_hash(lock_entry, exclude=exclude) != get_hash(entry, exclude=exclude)", "def command_refresh_repo(self):\n repoinit.refresh(*self.args())", "def restart(self):\n print \"Restarting \" + executable + \" \" + str(argv) \n execl(executable, *([executable]+argv))", "def resetApp(self):\n python = sys.executable\n os.execl(python, python, * sys.argv)", "def run_venv(command, **kwargs):\n run(\"source %s/bin/activate\" % env.venv + \" && \" + command, **kwargs)", "def run_ciao(cmd, clean =0):\n if clean == 1:\n acmd = '/usr/bin/env PERL5LIB=\"\" source /home/mta/bin/reset_param ;' + cmd\n else:\n acmd = '/usr/bin/env PERL5LIB=\"\" LD_LIBRARY_PATH=\"\" ' + cmd\n \n try:\n bash(acmd, env=ciaoenv)\n except:\n try:\n bash(acmd, env=ciaoenv)\n except:\n pass", "def repl_restart(restart: bool = True) -> None:", "def run_sagemaker_pytest_cmd(image):\n pytest_command, path, tag = generate_sagemaker_pytest_cmd(image)\n\n context = Context()\n with context.cd(path):\n context.run(f\"virtualenv {tag}\")\n with context.prefix(f\"source {tag}/bin/activate\"):\n context.run(\"pip install -r requirements.txt\", warn=True)\n context.run(pytest_command)", "def reload_config():\n subprocess.run([SUPERVISOR_CMD, \"reload\"])", "def update(self, env):\n del env\n return", "def conda_create_environment(name, python='3'):\n conda = '{0}/bin/conda'.format(utils.home('apps', 'miniconda'))\n\n run('{conda} create --name {name} python={python} --yes'.format(\n name=name,\n conda=conda,\n python=python))", "def restart_program():\r\n\tpython = sys.executable\r\n\tos.execl(python, python, * sys.argv)\r\n\troot = tk.Tk()", "def rerun_calibrator(base_dir):\n import infer_sequence\n # base_dir = 'data/batch4'\n collection_dir = 'calib_data'\n calib = Calibrator().init_from_data(os.path.join(base_dir, collection_dir))\n # calib.gen_calib_parameters()\n calib.show_images()", "def cli(ctx):\n if not ctx.invoked_subcommand:\n auto()", "def restart_supervisor():\n\n require('environment', provided_by=env.environments)\n supervisor.supervisor_command('restart %(environment)s:*' % env)", "def main():\n parser = argparse.ArgumentParser(\n description=\"Relocate a virtual environment.\"\n )\n parser.add_argument(\n \"--source\", help=\"The existing virtual environment.\", required=True\n )\n parser.add_argument(\n \"--destination\",\n help=\"The location for which to configure the virtual environment.\",\n required=True,\n )\n parser.add_argument(\n \"--move\",\n help=\"Move the virtual environment to the destination.\",\n default=False,\n action=\"store_true\",\n )\n\n args = parser.parse_args()\n relocate(args.source, args.destination, args.move)", "def remove(self):\n if self.exists():\n try:\n utils.run_in_bash(\n f'{CONDA_BIN} env remove -q -y -n {self.name}')\n except CalledProcessError as err:\n err_message = err.output.strip().decode('ascii')\n if 'CondaEnvironmentError:' in err_message:\n inform.info('deactivating and retry')\n utils.run_in_bash(\n 'source deactivate && '\n f'{CONDA_BIN} env remove -q -y -n {self.name}')\n else:\n inform.error('Couldn\\'t remove environment. '\n 'Following error occured:')\n print(err_message)\n inform.critical()", "def change_env(args):\n if len(args) != 2:\n raise Exception(\"syco chagne-env [env]\")\n\n env = args[1]\n\n app.print_verbose(\"Change to env \" + env)\n x(\"rm %spasswordstore \" % (SYCO_ETC_PATH))\n x(\"ln -s %spasswordstore.%s %spasswordstore\" % (\n SYCO_ETC_PATH, env, SYCO_ETC_PATH)\n )\n\n if os.access(app.SYCO_USR_PATH, os.F_OK):\n for plugin in os.listdir(app.SYCO_USR_PATH):\n plugin_path = os.path.abspath(app.SYCO_USR_PATH + plugin + \"/etc/\")\n\n x(\"rm %s/install.cfg \" % (plugin_path))\n x(\"ln -s %s/install-%s.cfg %s/install.cfg\" % (plugin_path, env, plugin_path))", "def _restart_environment_episode(self, force_environment_reset=False) -> None:\n raise NotImplementedError(\"\")", "def test_conda_path(mock_env, expected_conda, expected_activate, monkeypatch):\n monkeypatch.delenvs([CONDA_EXE, MLFLOW_CONDA_HOME.name], raising=False)\n monkeypatch.setenvs(mock_env)\n assert mlflow.utils.conda.get_conda_bin_executable(\"conda\") == expected_conda\n assert mlflow.utils.conda.get_conda_bin_executable(\"activate\") == expected_activate", "def test_integration_run_non_existing_command(\n mocker, monkeypatch, set_environment_variables\n):\n logger_info = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"info\")\n monkeypatch.setenv(\"COLUMNS\", \"1000\")\n mocker.patch.object(DefaultApi, \"investigation_add_entries_sync\", return_value=None)\n mocker.patch.object(Runner, \"_get_playground_id\", return_value=\"pg_id\")\n result = CliRunner(mix_stderr=False,).invoke(\n main,\n [\n \"run\",\n \"-q\",\n \"!non-existing-command\",\n \"-D\",\n ],\n )\n assert 0 == result.exit_code\n assert not result.exception\n assert str_in_call_args_list(\n logger_info.call_args_list,\n \"Command did not run, make sure it was written correctly.\",\n )", "def test_interactive(self):\n self.executor.command(['python']).interactive()", "def launch(config):\n \n launch_with_configs([config])", "def stop_and_restart():\n logging.info(\"Restarting eduzen_bot...\\n\")\n bot.updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def add_diff_env_to_controller(differentiated_environment):\n env.host_string = ''.join(\n [pytest.symbols.tenant_name,\n '@',\n pytest.symbols.controller_ip,\n ':22'])\n\n @hosts(env.host_string)\n def setup_env_oncontroller(diff_env):\n env.password = pytest.symbols.tenant_password\n execute(lambda: run('sudo ls -la'))\n\n # Stop existing agent\n execute(lambda: run('sudo systemctl stop f5-openstack-agent'))\n # Stop neutron server / f5_plugin\n execute(lambda: run('sudo systemctl stop neutron-server'))\n # Edit agent configuration to use new environment\n sedtempl = '''sed -i \"s/^\\(environment_prefix = \\)\\(.*\\)$/\\\\1%s/\"''' +\\\n ''' /etc/neutron/services/f5/f5-openstack-agent.ini'''\n sedstring = 'sudo ' + sedtempl % diff_env\n execute(lambda: run(sedstring))\n # Add diff env to neutron_lbaas.conf and installed Python package\n add_string = 'sudo add_f5agent_environment %s' % diff_env\n execute(lambda: run(add_string))\n # Start neutron-server / f5_plugin\n execute(lambda: run('sudo systemctl start neutron-server'))\n # Start existing agent\n execute(lambda: run('source keystonerc_testlab && '\n 'sudo systemctl start f5-openstack-agent'))\n\n setup_env_oncontroller(differentiated_environment)", "def exec_anaconda():\n if PSC_PATH_PREFIX in sys.executable:\n from imp import reload\n\n fix_sys_path()\n\n reload(json)\n reload(os)\n reload(platform)\n reload(stat)\n reload(subprocess)\n reload(sys)\n return\n\n check_python_version()\n\n system = (platform.system(), platform.machine())\n if system not in SUPPORTED_SYSTEMS:\n raise Exception('Unsupported platform: %s %s' % (system))\n\n sa_scipy = '%s%s' % (PSC_PATH_PREFIX, SUPPORTED_SYSTEMS[system])\n\n sa_path = os.path.join(get_apps_path(), sa_scipy)\n if not os.path.isdir(sa_path):\n raise Exception('Failed to find Python for Scientific Computing Add-on (%s)' % sa_scipy)\n\n system_path = os.path.join(sa_path, 'bin', '%s' % (SUPPORTED_SYSTEMS[system]))\n\n if system[0] == 'Windows':\n python_path = os.path.join(system_path, 'python.exe')\n # MLA-564: Windows need the DLLs to be in the PATH\n dllpath = os.path.join(system_path, 'Library', 'bin')\n pathsep = os.pathsep if 'PATH' in os.environ else ''\n os.environ['PATH'] = os.environ.get('PATH', '') + pathsep + dllpath\n else:\n python_path = os.path.join(system_path, 'bin', 'python')\n\n # MLA-996: Unset PYTHONHOME\n # XXX: After migration to Python3 PYTHONPATH is not set anymore so this will\n # be unnecessary. SPL-170875\n os.environ.pop('PYTHONHOME', None)\n\n # Ensure that execute bit is set on <system_path>/bin/python\n if system[0] != 'Windows':\n mode = os.stat(python_path).st_mode\n os.chmod(python_path, mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n print('INFO Running %s' % \" \".join([python_path] + sys.argv), sys.stderr)\n sys.stderr.flush()\n\n # In Quake and later PYTHONPATH is removed or not set.\n # So after shelling into PSC Python interpreter will lose\n # information about what Splunk core's Python path is. So we\n # stash it into an environment variable to retrieve it after\n # switching into conda.\n os.environ['SPLUNK_CORE_PYTHONPATH'] = json.dumps(sys.path)\n\n try:\n if system[0] == \"Windows\":\n os.environ['MKL_NUM_THREADS'] = '1'\n # os.exec* broken on Windows: http://bugs.python.org/issue19066\n subprocess.check_call([python_path] + sys.argv)\n os._exit(0)\n else:\n os.environ['VECLIB_MAXIMUM_THREADS'] = '1'\n os.environ['OPENBLAS_NUM_THREADS'] = '1'\n os.execl(python_path, python_path, *sys.argv)\n except Exception:\n traceback.print_exc(None, sys.stderr)\n sys.stderr.flush()\n time.sleep(0.1)\n raise RuntimeError(\n 'Error encountered while loading Python for Scientific Computing, see search.log.'\n )", "def restart_llap(self, env):\n Logger.info(\"Custom Command to retart LLAP\")\n import params\n env.set_params(params)\n\n if params.security_enabled:\n self.do_kinit()\n\n self._llap_stop(env)\n self._llap_start(env)", "def cmd_restart(self, app_name=None):\n rc = self.socket_command_with_project('restart', app_name)\n return rc", "def save(self):\n condaExecutable = self.condaExePicker.text()\n if condaExecutable != self.__condaExecutable:\n Preferences.setConda(\"CondaExecutable\", condaExecutable)\n \n import CondaInterface\n CondaInterface.resetInterface()", "def mv_conda_paths_to_front():\n sys.path = get_conda_paths() + get_non_conda_paths()", "def env_revert_setup_parser(subparser):\n subparser.add_argument(\n metavar=\"env\", dest=\"revert_env\", help=\"name or directory of the environment to activate\"\n )\n spack.cmd.common.arguments.add_common_arguments(subparser, [\"yes_to_all\"])", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def main(ctx: typer.Context):\n LOG.debug(F\"COVIDAP: executing command: {ctx.invoked_subcommand}\")", "def project_run_cli(\n # fmt: off\n ctx: typer.Context, # This is only used to read additional arguments\n subcommand: str = Arg(None, help=f\"Name of command defined in the {PROJECT_FILE}\"),\n project_dir: Path = Arg(Path.cwd(), help=\"Location of project directory. Defaults to current working directory.\", exists=True, file_okay=False),\n force: bool = Opt(False, \"--force\", \"-F\", help=\"Force re-running steps, even if nothing changed\"),\n dry: bool = Opt(False, \"--dry\", \"-D\", help=\"Perform a dry run and don't execute scripts\"),\n show_help: bool = Opt(False, \"--help\", help=\"Show help message and available subcommands\")\n # fmt: on\n):\n if show_help or not subcommand:\n print_run_help(project_dir, subcommand)\n else:\n overrides = parse_config_overrides(ctx.args)\n project_run(project_dir, subcommand, overrides=overrides, force=force, dry=dry)", "async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True", "def _run_env(self):\n raise NotImplementedError()", "def test_relaunch_deployment_run(self):\n pass", "def do_command(): # pragma: no cover\n args = parse_args(sys.argv[1:])\n status = run(args)\n sys.exit(status)", "def cli(ctx, cname, debug, dist, extra_sources, image, release, rm_first):\n ctx.obj = Workspace(cname, debug, dist, extra_sources, image, release)\n\n ws = ctx.obj\n if \":\" not in ws.image:\n ws.image = \"{}:{}-{}\".format(ws.image, IMAGE_VERSION, ws.dist)\n if ctx.invoked_subcommand != \"build\":\n ws.image += \"-dev\"\n\n # check for prereqs\n if shutil.which(\"docker\") is None:\n error(\"Docker not found in PATH. Please install docker.\")\n sys.exit(1)\n if shutil.which(\"git\") is None:\n error(\"Git not found in PATH. Please install git.\")\n sys.exit(1)\n\n # ensure Docker image is present\n if ctx.invoked_subcommand in [\"run\", \"build\", \"pull\", \"shell\"]:\n try:\n ws.client.images.get(ws.image)\n except docker.errors.ImageNotFound:\n info(\"Pulling image {}...\".format(ws.image))\n ws.client.images.pull(ws.image)\n\n if rm_first:\n ws.docker_remove()", "def reboot(*args):\n args = list(sys.argv) + list(args)\n if args[0] == 'python' or not args[0]:\n args[0] = BIN_PYTHON\n elif os.path.basename(sys.argv[0]) in ['lore', 'lore.exe']:\n args[0] = BIN_LORE\n try:\n os.execv(args[0], args)\n except Exception as e:\n if args[0] == BIN_LORE and args[1] == 'console' and JUPYTER_KERNEL_PATH:\n print(ansi.error() + ' Your jupyter kernel may be corrupt. Please remove it so lore can reinstall:\\n $ rm ' + JUPYTER_KERNEL_PATH)\n raise e", "def main():\n args = parse_command_line()\n expt_config = load_config(args.experiment_config_path)\n run_cli(RunOptions.from_dict(expt_config))", "def cd_env(folder='ds_Projects',project=None, mod=None, env=None, envname=None):\n cd = f\"cd ~/{folder}/{project}\"\n cd_ccpy = f\"cd ~/{folder}/{project}/{mod}\"\n activ_ccpy= f\"source .virtualenvs/{envname}/Scripts/activate\"\n activ_conda= f\"conda activate {envname}\"\n if env == 'conda':\n print(cd)\n print(activ_conda)\n elif env=='cc':\n print(f\"cd ~/{folder}/{project}/src/{mod}\")\n print(f'source activate {envname}')\n print(\"\\n- Based on 'standard cookiecutter'\")\n else:\n print(cd_ccpy)\n print(activ_ccpy)\n print(\"\\n- Based on 'cookiecutter-pypackage'\")", "def restart(self):\r\n if self._engine:\r\n self._engine.repl.terminate()\r\n\r\n if \"OCTAVE_EXECUTABLE\" not in os.environ and \"OCTAVE\" in os.environ:\r\n os.environ[\"OCTAVE_EXECUTABLE\"] = os.environ[\"OCTAVE\"]\r\n\r\n try:\r\n self._engine = OctaveEngine(stdin_handler=self._handle_stdin, logger=self.logger)\r\n except Exception as e:\r\n raise Oct2PyError(str(e)) from None\r\n\r\n # Add local Octave scripts.\r\n self._engine.eval('addpath(\"%s\");' % HERE.replace(osp.sep, \"/\"))", "def shell(env_name, command, shell_name, temp_env, pyname, pypath, global_packages): # no cov\n venv_dir = None\n if resolve_path(env_name) == os.getcwd():\n env_name = ''\n\n if not (env_name or temp_env):\n if is_project():\n venv_dir = os.path.join(os.getcwd(), 'venv')\n if not is_venv(venv_dir):\n echo_info('A project has been detected!')\n echo_waiting('Creating a dedicated virtual env... ', nl=False)\n create_venv(venv_dir, use_global=global_packages)\n echo_success('complete!')\n\n with venv(venv_dir):\n echo_waiting('Installing this project in the virtual env... ', nl=False)\n install_packages(['-q', '-e', '.'])\n echo_success('complete!')\n else:\n echo_failure('No project found.')\n sys.exit(1)\n\n if env_name and temp_env:\n echo_failure('Cannot use more than one virtual env at a time!')\n sys.exit(1)\n\n if not command and '_HATCHING_' in os.environ:\n echo_failure(\n 'Virtual environments cannot be nested, sorry! To leave '\n 'the current one type `exit` or press `Ctrl+D`.'\n )\n sys.exit(1)\n\n if temp_env:\n if pyname:\n try:\n settings = load_settings()\n except FileNotFoundError:\n echo_failure('Unable to locate config file. Try `hatch config --restore`.')\n sys.exit(1)\n\n pypath = settings.get('pypaths', {}).get(pyname, None)\n if not pypath:\n echo_failure('Unable to find a Python path named `{}`.'.format(pyname))\n sys.exit(1)\n\n temp_dir = TemporaryDirectory()\n env_name = get_random_venv_name()\n venv_dir = os.path.join(temp_dir.name, env_name)\n echo_waiting('Creating a temporary virtual env named `{}`...'.format(env_name))\n create_venv(venv_dir, pypath=pypath, use_global=global_packages, verbose=True)\n else:\n temp_dir = None\n venv_dir = venv_dir or os.path.join(get_venv_dir(), env_name)\n if not os.path.exists(venv_dir):\n echo_failure('Virtual env named `{}` does not exist.'.format(env_name))\n sys.exit(1)\n\n result = None\n\n try:\n if command:\n with venv(venv_dir):\n echo_waiting('Running `{}` in {}...'.format(\n ' '.join(c if len(c.split()) == 1 else '\"{}\"'.format(c) for c in command),\n '`{}`'.format(env_name) if env_name else \"this project's env\"\n ))\n result = subprocess.run(command, shell=NEED_SUBPROCESS_SHELL).returncode\n else:\n with venv(venv_dir) as exe_dir:\n result = run_shell(exe_dir, shell_name)\n finally:\n result = 1 if result is None else result\n if temp_dir is not None:\n temp_dir.cleanup()\n\n sys.exit(result)", "def run() -> ():\n if len(sys.argv) > 1:\n show_help()\n errs = get_cargo_input()\n main_stack = make_proj_stack(errs)\n while len(main_stack) > 0:\n file_stack = make_file_stack(main_stack)\n overwrite(file_stack)\n\n # FIXME", "def exec_python_rc(*args, **kwargs):\n cmdargs, kwargs = __wrap_python(args, kwargs)\n return exec_command_rc(*cmdargs, **kwargs)", "def _create_dials_env_script():\n import libtbx.load_env\n import os\n\n filename = abs(libtbx.env.build_path.dirname() / \"dials\")\n if not os.path.exists(filename):\n return\n\n if os.name == \"nt\":\n return # here be dragons\n\n script = \"\"\"\n#!/bin/bash\n\nif [ -n \"${LIBTBX_BUILD_RELOCATION_HINT}\" ]; then\n # possibly used for some logic in the installer\n LIBTBX_BUILD=\"${LIBTBX_BUILD_RELOCATION_HINT}\"\n LIBTBX_BUILD_RELOCATION_HINT=\n export LIBTBX_BUILD_RELOCATION_HINT\nelif [ -n \"$BASH_SOURCE\" ]; then\n LIBTBX_BUILD=\"$(dirname -- \"${BASH_SOURCE[0]}\")/build\"\nelse\n LIBTBX_BUILD=\"%s\"\nfi\n\n# make path absolute and resolve symlinks\nLIBTBX_BUILD=$(cd -P -- \"${LIBTBX_BUILD}\" && pwd -P)\n\n# enable conda environment\nsource ${LIBTBX_BUILD}/../conda_base/etc/profile.d/conda.sh\nconda activate $(dirname -- \"${LIBTBX_BUILD}\")/conda_base\n\n# prepend cctbx /build/bin directory to PATH\nPATH=\"${LIBTBX_BUILD}/bin:${PATH}\"\nexport PATH\n\n# enable DIALS command line completion\n[ -n \"$BASH_VERSION\" ] && {\n source $(libtbx.find_in_repositories dials/util/autocomplete.sh) && \\\n source ${LIBTBX_BUILD}/dials/autocomplete/bash.sh || \\\n echo dials command line completion not available\n}\n\nunset LIBTBX_BUILD\n\"\"\".lstrip()\n with open(filename, \"w\") as fh:\n fh.write(script % abs(libtbx.env.build_path))\n mode = os.stat(filename).st_mode\n mode |= (mode & 0o444) >> 2 # copy R bits to X\n os.chmod(filename, mode)\n\n for clobberfile in (\n \"setpaths_all.csh\",\n \"setpaths_all.sh\",\n \"setpaths.csh\",\n \"setpaths_debug.csh\",\n \"setpaths_debug.sh\",\n \"setpaths.sh\",\n ):\n with open(abs(libtbx.env.build_path / clobberfile), \"w\") as fh:\n fh.write(\"echo '%s'\\n\" % (\"*\" * 74))\n fh.write(\"echo The script to set up the DIALS environment has changed\\n\")\n fh.write(\"echo Please source or run '%s' instead\\n\" % filename)\n fh.write(\"echo '%s'\\n\" % (\"*\" * 74))", "def run(crate, executable, keep_wd, dry_run):\n run_crate(crate, executable=executable, keep_wd=keep_wd, dry_run=dry_run)", "def RestartApp(argv_extra):\n\n p = psutil.Process(os.getpid())\n for handler in p.open_files() + p.connections():\n if handler.fd != -1:\n os.close(handler.fd)\n\n exe = sys.executable\n argv = list(sys.argv)\n argv.append(argv_extra)\n\n os.execl(exe, exe, *argv)", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def restart(reason, *args, **kwargs):\n logging.info(\"Restarting: %s\" % reason)\n os.execv(sys.argv[0], sys.argv)", "def reload_test(test_name):\n sudo(\"restart %s\" % test_name)", "def arun(ctx, user_cmd):\n connecter = ScalingoInterface(ctx.obj)\n connecter.detached = True\n connecter.run(user_cmd)", "def rerun(self, i):\n\n i['rerun']=True\n\n return self.run(i)", "def setup_env(mocker, request):\n env_dir = USER_ENVS_DIR / ENV_NAME\n mocker.patch(\n \"conda_env_tracker.history.debug.get_pip_version\",\n mocker.Mock(return_value=\"18.1\"),\n )\n mocker.patch(\"conda_env_tracker.env.get_all_existing_environment\")\n mocker.patch(\"conda_env_tracker.main._ask_user_to_sync\")\n mocker.patch(\"conda_env_tracker.gateways.conda.run_command\")\n initial_conda_packages = {\"pandas\": Package(\"pandas\", \"pandas\", \"0.23\", \"py_36\")}\n get_package_mock = mocker.patch(\n \"conda_env_tracker.env.get_dependencies\",\n mocker.Mock(return_value={\"conda\": initial_conda_packages, \"pip\": {}}),\n )\n condarc_channels = [\"conda-forge\", \"main\"]\n mocker.patch(\n \"conda_env_tracker.env.get_conda_channels\",\n mocker.Mock(return_value=condarc_channels),\n )\n if \"channels\" in request.param[\"input\"]:\n channels = request.param[\"input\"][\"channels\"]\n else:\n channels = condarc_channels\n id_mock = mocker.patch(\"conda_env_tracker.history.history.uuid4\")\n id_mock.return_value = \"my_unique_id\"\n env = main.create(name=ENV_NAME, **request.param[\"input\"])\n env_io = EnvIO(env_directory=env_dir)\n yield {\n \"channels\": channels,\n \"env\": env,\n \"env_io\": env_io,\n \"expected\": request.param[\"expected\"],\n \"get_package_mock\": get_package_mock,\n \"initial_conda_packages\": initial_conda_packages,\n \"id\": id_mock,\n }\n if env_dir.exists():\n shutil.rmtree(env_dir)", "def exec_init_cmd(self):\n\n sys.argv = ['-c']\n self.push(self.rc.c)", "async def reacrole(self, ctx: commands.Context):\n pass", "def _with_deploy_env(commands=[]):\n with cd(env.directory):\n for command in commands:\n run(command)", "def run_shell(kit):\n context = {\n 'kit': kit,\n }\n try:\n import IPython\n except ImportError:\n interact(local=context)\n else:\n interactive_shell = IPython.frontend.terminal.embed.InteractiveShellEmbed()\n interactive_shell(local_ns=context)", "def cli(ctx, confirm):\n builder_data.register_context(ctx, auto_reset=False)\n\n if confirm != True:\n raise cli_exception.CliException(\"You must use the --confirm option\")\n\n builder_data.reset()", "def test_with_correct_env(self, monkeypatch, capsys, tmpdir, data):\n path = str(tmpdir.join(\"naedw.txt\"))\n with open(path, mode=\"w\"):\n pass\n monkeypatch.setenv(\"ORION_RESULTS_PATH\", path)\n reloaded_client = reload(cli)\n\n assert reloaded_client.IS_ORION_ON is True\n assert reloaded_client.RESULTS_FILENAME == path\n assert reloaded_client._HAS_REPORTED_RESULTS is False\n\n reloaded_client.report_results(data)\n out, err = capsys.readouterr()\n assert reloaded_client._HAS_REPORTED_RESULTS is True\n assert out == \"\"\n assert err == \"\"\n\n with open(path) as results_file:\n res = json.load(results_file)\n assert res == data", "def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))", "def restart_celery():\n puts(yellow(\"Restart celery worker\"))\n with prefix('source %s' % in_rwd('bin/activate')):\n sudo('supervisorctl restart celery-worker', user=env.app_user)\n time.sleep(1)\n sudo('supervisorctl status', user=env.app_user)", "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def install_cli(version, environment, tester_conf):\n # Work with multiple scenarios in the same feature, rather than\n # failing due to already being installed\n if not environment.cli_installed:\n environment.cfy.pip_install(version=version)\n environment.cli_installed = True\n # Make sure we use profiles in the test dir only\n os.environ['CFY_WORKDIR'] = environment.workdir", "def runCommand(configbase, robase, argv):\n (options, args) = parseCommandArgs(argv)\n status = 1\n if options:\n status = run(configbase, options, args)\n return status", "def runCommand(command):\n None", "def test_check_cmd_no_environment(env_name: str, context: dict, caplog):\n caplog.set_level(logging.DEBUG)\n # GIVEN a environment that does not exist\n delete_conda_env(Process(\"conda\"), env_name)\n assert conda_env_exists(env_name) is False\n # GIVEN a cli runner\n runner = CliRunner()\n # GIVEN a context with basic information\n\n # WHEN running the command to check if deployment is possible\n runner.invoke(check_cmd, [], obj=context)\n\n # THEN assert that it communicates that the environment does not exist\n assert \"Please use 'shipping provision' to create valid conda environment\" in caplog.text", "def main(ctx):\n\n print(\"Mode:\", ctx.invoked_subcommand)", "def _reset(self, env_id: np.ndarray) -> None:", "def run_exec_command(command):\n if not is_debug_environment():\n import cli\n return cli.cli(command)", "def main():\n try:\n merge_envs(parse_args())\n except MergeError:\n return 1", "def activate(self, config):\n new_workspace = ClonedWorkspace(config.cloning_config)\n\n self.cleanup()\n\n self._config = config\n self._workspace = new_workspace", "def _interactively_fix_missing_variables(project, result):\n if project.problems:\n return False\n\n if not console_utils.stdin_is_interactive():\n return False\n\n # We don't ask the user to manually enter CONDA_PREFIX\n # (CondaEnvRequirement) because it's a bizarre/confusing\n # thing to ask.\n can_ask_about = [\n status for status in result.statuses if (not status and isinstance(status.requirement, EnvVarRequirement)\n and not isinstance(status.requirement, CondaEnvRequirement))\n ]\n\n if can_ask_about:\n print(\"(Use Ctrl+C to quit.)\")\n\n start_over = False\n values = dict()\n for status in can_ask_about:\n reply = console_utils.console_input(\"Value for \" + status.requirement.env_var + \": \",\n encrypted=status.requirement.encrypted)\n if reply is None:\n return False # EOF\n reply = reply.strip()\n if reply == '':\n start_over = True\n break\n values[status.requirement.env_var] = reply\n\n if len(values) > 0:\n status = project_ops.set_variables(project, result.env_spec_name, values.items(), result)\n if status:\n return True\n else:\n console_utils.print_status_errors(status)\n return False\n else:\n return start_over", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def reset_project(ctx, path):\n with ctx.cd(path):\n ctx.run(\"rm -rf project.state repos\")\n ctx.run(\"newt -v upgrade\")", "def jupyter_run(config, notebook=None,\n prepare_only=False,\n param=None,\n param_raw=None):\n # TODO implements parameters passing\n if notebook:\n app_config = AiscalatorConfig(config=config,\n step_selection=notebook)\n else:\n app_config = AiscalatorConfig(config=config)\n return command.jupyter_run(app_config, prepare_only=prepare_only,\n param=param, param_raw=param_raw)", "def interactive(parsed_args, headers, results, force=False):\n context = _read_current(parsed_args)\n interactive = context['interactive'] or force\n\n if interactive:\n print('\\nContainer registry access:')\n print('--------------------------')\n\n for iv in VARS:\n prompt_name = iv.replace('_', ' ').title()\n key_name = ENV_PREFIX + iv\n header_name = iv.lower()\n\n if interactive:\n if settings.redact.key_is_private(key_name):\n is_secret = True\n else:\n is_secret = False\n value = prompt(prompt_name, context[key_name], secret=is_secret)\n else:\n value = context[key_name]\n\n if value is not None and value != '':\n settings_set(key_name, value)\n\n headers.append(header_name)\n results.append(settings.redact.auto_redact(header_name, value))\n\n return (headers, results)", "def resetTool(*args, **kwargs)->None:\n pass", "def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def anaconda(channel, token):\n\n pkg_location, pkg_name = get_package_info()\n pkg_dir, _ = os.path.split(pkg_location)\n pkg_root = pkg_dir.replace('linux-64', '')\n\n osx_location = pkg_location.replace('linux-64', 'osx-64')\n\n cmd = 'anaconda -t {token} upload -u nsidc {location} -c {channel} --force && '\n cmd += 'conda convert {location} -p osx-64 -o {root} &&'\n cmd += 'anaconda -t {token} upload -u nsidc {osx_location} -c {channel} --force'\n local(cmd.format(location=pkg_location,\n name=pkg_name,\n channel=channel,\n root=pkg_root,\n token=token,\n osx_location=osx_location))", "def update_freshclam(module, freshclam_binary):\n rc_code, out, err = module.run_command(\"%s\" % (freshclam_binary))\n return rc_code, out, err", "def reset(args):\n if os.path.exists(args.config):\n os.remove(args.config)\n return" ]
[ "0.6140316", "0.5963909", "0.57024294", "0.56684333", "0.5625119", "0.56162804", "0.5252688", "0.5224873", "0.5202257", "0.51971525", "0.51945513", "0.5131335", "0.5123417", "0.51196927", "0.5108993", "0.50988483", "0.5078373", "0.50722355", "0.50060016", "0.49738103", "0.4968838", "0.49574235", "0.49450293", "0.49130514", "0.49050978", "0.49033588", "0.48745516", "0.486378", "0.48633996", "0.4859364", "0.48531502", "0.4838259", "0.48157212", "0.4802765", "0.4788432", "0.47714758", "0.47686052", "0.47559255", "0.47468817", "0.4736154", "0.4735234", "0.4727922", "0.4725398", "0.471072", "0.47107038", "0.4707723", "0.47035447", "0.46885994", "0.46688625", "0.46664062", "0.46574652", "0.46516427", "0.46492955", "0.4642071", "0.4639023", "0.46306393", "0.462334", "0.46101448", "0.4607627", "0.46072406", "0.46049625", "0.45991853", "0.4595827", "0.45942166", "0.45879897", "0.45673203", "0.45543253", "0.45518482", "0.4550502", "0.4550454", "0.45490175", "0.45469347", "0.45393714", "0.45390496", "0.4536049", "0.45354822", "0.4529563", "0.4527391", "0.45243347", "0.45111045", "0.4499624", "0.44946948", "0.44924927", "0.44902343", "0.4486564", "0.44860673", "0.44780782", "0.44777694", "0.44656512", "0.44655606", "0.44636533", "0.44555712", "0.44548717", "0.44544256", "0.44512728", "0.4445679", "0.4444491", "0.44441706", "0.44435197", "0.44408986" ]
0.71491045
0
Serve Python models locally. To serve a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI environment variable to the URL of the desired server.
def commands(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serve(model_path, run_id, port, host, no_conda):\n if run_id:\n model_path = _get_model_log_dir(model_path, run_id)\n\n model_env_file = _load_model_env(model_path)\n if not no_conda and model_env_file is not None:\n conda_env_path = os.path.join(model_path, model_env_file)\n return _rerun_in_conda(conda_env_path)\n\n app = scoring_server.init(load_pyfunc(model_path))\n app.run(port=port, host=host)", "def serve(model_path, host, port, debug):\n\n app = Scorer(__name__, model_path=model_path)\n app.run(host=host, port=port, debug=debug)", "def serve(self):\n\n with self.graph.as_default():\n\n if self.builder == None:\n self.builder = tf.saved_model.builder.SavedModelBuilder(self.path + '/build/')\n\n # Generate softmax output.\n prediction = tf.nn.softmax(self.output, name='predict_probability')\n prediction_adv = tf.nn.softmax(self.output_adv, name='prediction_probability_adv')\n \n\n # Build `SignatureDef`.\n # See https://www.tensorflow.org/serving/signature_defs .\n inputs = {k.name: tf.saved_model.utils.build_tensor_info(k) for k in self.eval_config}\n inputs[SIGNATURE_INPUT] = tf.saved_model.utils.build_tensor_info(self.feature_holder)\n\n outputs = {SIGNATURE_OUTPUT: tf.saved_model.utils.build_tensor_info(prediction), SIGNATURE_OUTPUT_ADV: tf.saved_model.utils.build_tensor_info(prediction_adv)}\n\n signature = tf.saved_model.signature_def_utils.build_signature_def(inputs, outputs, SIGNATURE_METHOD_NAME)\n self.builder.add_meta_graph_and_variables(self.session, tags=[tf.saved_model.tag_constants.SERVING], signature_def_map={SIGNATURE_KEY: signature})\n self.builder.save()", "def run_local_api():\n\tcfg = settings.LocalConfig()\n\tapp = make_app(blueprints.api_server, settings.LocalConfig)\n\tapp.run(host = cfg.SERVERNAME, port = cfg.API_PORT, debug = True)", "def server(self, host=None, port=None):\n self._prepare_models()\n\n if not host:\n host = config.HOST\n\n if not port:\n port = config.PORT\n\n web.app.run(host=host, port=port)", "def flask_server(real_model):\n def run_app(port):\n if not real_model:\n app.config['FAKE_MODEL'] = True\n app.run(port=port, use_reloader=False)\n\n server_process = Process(target=run_app, args=(PORT, ))\n server_process.start()\n\n # Give 2 secs for the Flask server to start up\n time.sleep(2)\n\n yield f'http://localhost:{PORT}'\n\n server_process.terminate()", "def preload_model(\n mlflow_tracking_uri: str,\n experiment_name: str,\n run_id: str,\n):\n\n mlflow.set_tracking_uri(mlflow_tracking_uri)\n mlflow.set_experiment(experiment_name)\n experiment_details = mlflow.get_experiment_by_name(experiment_name)\n\n mlflow.end_run()\n mlflow.start_run(run_id=run_id)\n\n # pull model from tracking uri\n artifact_loc = (\n str(experiment_details.artifact_location)\n .replace(\"file:\", \"\")\n .replace(\"///\", \"\")\n )\n loc_prefix = \"\"\n if \"P1-AnalyzeTrades\" not in os.getcwd():\n loc_prefix = r\"P1-AnalyzeTrades/\"\n\n metrics, params, tags = parse_mlflow_info(mlflow.get_run(run_id))\n\n model_type = get_model_type(tags)\n\n if model_type == \"sklearn\":\n try: # first try local path]\n mdl = pickle.load(\n open(f\"{artifact_loc}/{run_id}/artifacts/model/model.pkl\", \"rb\")\n )\n except: # then try repo specific path for finalized cases\n mdl = pickle.load(\n open(f\"{loc_prefix}mlruns/0/{run_id}/artifacts/model/model.pkl\", \"rb\")\n )\n else:\n # for h2o models\n h2o.init()\n try:\n logged_model = f\"runs:/{run_id}/model\"\n # logged_model = f'mlruns/0/{run_id}/artifacts/model'\n mdl = mlflow.pyfunc.load_model(logged_model)\n\n # mojo deprecated\n # mdl = h2o.import_mojo(f'{artifact_loc}/{run_id}/artifacts/')\n except:\n logged_model = f\"{loc_prefix}mlruns/0/{run_id}/artifacts/model\"\n mdl = mlflow.pyfunc.load_model(logged_model)\n\n mlflow.end_run()\n\n # load cat dict, if available\n cat_dict = {}\n cat_dict_loc = f\"{artifact_loc}/{run_id}/artifacts/cat_dict.pkl\"\n if os.path.exists(cat_dict_loc):\n cat_dict = pickle.load(open(cat_dict_loc, \"rb\"))\n else: # then try repo specific path for finalized cases\n cat_dict_loc = f\"{loc_prefix}mlruns/0/{run_id}/artifacts/cat_dict.pkl\"\n if os.path.exists(cat_dict_loc):\n cat_dict = pickle.load(open(cat_dict_loc, \"rb\"))\n\n return mdl, cat_dict", "def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break", "def create_local_deployment(model_uri: Text) -> Tuple[sp.Popen, int]:\n\n conf = Config()\n port = get_free_tcp_port()\n log_path = os.path.join(conf.deployments_logs_dir, model_uri.replace('/','_') + '.log')\n process = sp.Popen(\n [\n f'mlflow models serve --no-conda -m '\n f'{model_uri} '\n f'--host 0.0.0.0 --port {port} --workers {conf.get(\"DEPLOY_SERVER_WORKERS\")} '\n f'2>&1 | tee -a {log_path}'\n ],\n shell=True\n )\n\n return process, port", "def serve() -> None:\n uvicorn.run(\n \"bartender.web.application:get_app\",\n workers=settings.workers_count,\n host=settings.host,\n port=settings.port,\n reload=settings.reload,\n log_level=settings.log_level,\n factory=True,\n )", "def run_task_in_model_specific_flask_server(start):\n global GLOBAL_FLASK_SERVER_PROCESS\n global LAST_GLOBAL_QUEUE_NAME\n model_specific_queue_name = processing.get_model_specific_queue_name(start.operation, start.arguments)\n trained_model = TrainedModel.objects.get(pk=processing.get_model_pk_from_args(start.operation,start.arguments))\n new_envs = os.environ.copy()\n for k in {'PYTORCH_MODE','CAFFE_MODE','MXNET_MODE'}:\n if k in new_envs:\n del new_envs[k]\n if trained_model.mode == TrainedModel.PYTORCH:\n new_envs['PYTORCH_MODE'] = '1'\n elif trained_model.mode == TrainedModel.CAFFE:\n new_envs['CAFFE_MODE'] = '1'\n elif trained_model.mode == TrainedModel.MXNET:\n new_envs['MXNET_MODE'] = '1'\n if GLOBAL_FLASK_SERVER_PROCESS is None or LAST_GLOBAL_QUEUE_NAME != model_specific_queue_name:\n if GLOBAL_FLASK_SERVER_PROCESS:\n GLOBAL_FLASK_SERVER_PROCESS.terminate()\n os.remove('flask.pid')\n elif os.path.isfile('flask.pid'):\n try:\n os.kill(int(file('flask.pid').read()),signal.SIGTERM)\n except:\n pass\n GLOBAL_FLASK_SERVER_PROCESS = subprocess.Popen(['python', 'scripts/run_flask.py',\n start.operation, str(start.pk)],env=new_envs)\n LAST_GLOBAL_QUEUE_NAME = model_specific_queue_name\n max_attempts = 15\n while max_attempts:\n try:\n r = requests.get('http://localhost:{port}/'.format(port=settings.GLOBAL_MODEL_FLASK_SERVER_PORT))\n if r.ok:\n break\n except:\n max_attempts -= 1\n time.sleep(4)\n r = requests.get('http://localhost:{port}/{pk}/'.format(port=settings.GLOBAL_MODEL_FLASK_SERVER_PORT,pk=start.pk))\n if not r.ok:\n raise ValueError(\"Coud not process\")\n return True", "async def serve_web(self):\n interface = \"0.0.0.0\" if settings.PUBLIC_ACCESS else \"127.0.0.1\"\n port = settings.WEB_PORT\n self.logger.info(f\"web: starting the server on {interface}:{port}...\")\n await self.runner.setup()\n site = aioweb.TCPSite(self.runner, interface, port)\n await site.start()\n self.preparing_task = None", "def runserver():\n local_addr = \"0.0.0.0:8000\"\n local(\"{} exec web python3 manage.py runserver {} {}\".format(\n dc, local_addr, settings))", "def log_model_without_starting_new_run():\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )", "def local_main():\n uvicorn.run(app, host=\"0.0.0.0\", port=5000)", "def web():\n from mephisto.client.server import app\n\n app.run(debug=False)", "def run_models(request):\n job_form_data = request.session['job_form_data']\n job_wrapper = JobWrapper(job_form_data)\n job_wrapper.create_data_file()\n print job_wrapper.job_form_data\n # Must run emits to generate emis_co2.dat - this step is requried to\n # run the models and it's a lot simpler to have it run form here than\n # from a job manager script\n cmd = \"/var/opt/IMOGEN/EMITS/emits\"\n subprocess.call(cmd, shell=True)\n print \"Ran {0} program\".format(cmd)\n # Now submit the models via the job manager\n jr = DRMAAJobRunner()\n return jr.queue_job(job_wrapper)", "def lms(options):\r\n settings = getattr(options, 'settings', None)\r\n port = getattr(options, 'port', None)\r\n fast = getattr(options, 'fast', False)\r\n run_server('lms', settings=settings, port=port, skip_assets=fast)", "def serve() -> None: # pragma: no cover-behave\n logging.getLogger().setLevel(logging.INFO)\n database = init_database()\n init_bottle(database)\n server_port = os.environ.get(\"SERVER_PORT\", \"5001\")\n bottle.run(server=\"gevent\", host=\"0.0.0.0\", port=server_port, reloader=True, log=logging.getLogger()) # nosec", "def run(args: argparse.Namespace) -> None:\n import rasa\n\n args.endpoints = rasa.cli.utils.get_validated_path(\n args.endpoints, \"endpoints\", DEFAULT_ENDPOINTS_PATH, True\n )\n args.credentials = rasa.cli.utils.get_validated_path(\n args.credentials, \"credentials\", DEFAULT_CREDENTIALS_PATH, True\n )\n\n if args.enable_api:\n if not args.remote_storage:\n args.model = _validate_model_path(args.model, \"model\", DEFAULT_MODELS_PATH)\n rasa.run(**vars(args))\n return\n\n # if the API is not enable you cannot start without a model\n # make sure either a model server, a remote storage, or a local model is\n # configured\n\n import rasa.model\n from rasa.core.utils import AvailableEndpoints\n\n # start server if remote storage is configured\n if args.remote_storage is not None:\n rasa.run(**vars(args))\n return\n\n # start server if model server is configured\n endpoints = AvailableEndpoints.read_endpoints(args.endpoints)\n model_server = endpoints.model if endpoints and endpoints.model else None\n if model_server is not None:\n rasa.run(**vars(args))\n return\n\n # start server if local model found\n args.model = _validate_model_path(args.model, \"model\", DEFAULT_MODELS_PATH)\n local_model_set = True\n try:\n rasa.model.get_local_model(args.model)\n except ModelNotFound:\n local_model_set = False\n\n if local_model_set:\n rasa.run(**vars(args))\n return\n\n rasa.shared.utils.cli.print_error(\n f\"No model found. You have three options to provide a model:\\n\"\n f\"1. Configure a model server in the endpoint configuration and provide \"\n f\"the configuration via '--endpoints'.\\n\"\n f\"2. Specify a remote storage via '--remote-storage' to load the model \"\n f\"from.\\n\"\n f\"3. Train a model before running the server using `rasa train` and \"\n f\"use '--model' to provide the model path.\\n\"\n f\"For more information check {DOCS_BASE_URL}/model-storage.\"\n )", "def load_extension(nb_server_app):\n\n mlflowHandlers = [\n (\"/mlflow/serve\", MLFlowModelServeHandler),\n (\"/mlflow/run\", MLFlowModelRunHandler),\n (\"/mlflow/build\", MLFlowModelBuildHandler),\n (\"/mlflow/test\", MLFlowModelTestHandler),\n (\"/mlflow/gitclone\", MLFlowGitCloneHandler)\n ]\n web_app = nb_server_app.web_app\n host_pattern = '.*$'\n base_url = web_app.settings['base_url']\n global_base_url = nb_server_app.connection_url\n print(global_base_url)\n # route_pattern = url_path_join(web_app.settings['base_url'], '/mlflow/serve')\n\n handlers = [(url_path_join(base_url, handler[0]), handler[1]) for handler in mlflowHandlers]\n web_app.add_handlers(host_pattern, handlers)", "def run(\n trained_model: Ridge,\n mlflow: mlflow,\n model_name: str = \"diabetes\",\n app_logger: AppLogger = get_disabled_logger(),\n parent_tracer: Tracer = None,\n) -> ModelVersion:\n logger = logging.getLogger(__name__)\n try:\n component_name = \"Diabetes_Publish_Model\"\n\n # mlflow tracking\n mlflow_run = mlflow.active_run()\n mlflow_run_id = mlflow_run.info.run_id\n mlflow_experiment_id = mlflow_run.info.experiment_id\n\n logger = app_logger.get_logger(\n component_name=component_name,\n custom_dimensions={\n \"mlflow_run_id\": mlflow_run_id,\n \"mlflow_experiment_id\": mlflow_experiment_id,\n },\n )\n tracer = app_logger.get_tracer(\n component_name=component_name, parent_tracer=parent_tracer\n )\n\n logger.info(\"Running MLOps publish model\")\n\n temp_model_dir = tempfile.mkdtemp()\n model_path = os.path.join(temp_model_dir, model_name)\n with tracer.span(\"save_model\"):\n mlflow.sklearn.save_model(trained_model, model_path)\n mlflow.log_artifact(model_path)\n model_uri = \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=model_name\n )\n\n logger.info(\"Publishing trained model into mlflow model registry\")\n with tracer.span(\"register_model\"):\n model_details = mlflow.register_model(model_uri=model_uri, name=model_name)\n model_version = model_details.version\n\n mlflow.log_param(\"model_version\", model_version)\n mlflow.log_param(\"model_name\", model_name)\n\n logger.info(f\"published model name: {model_name}, version: {model_version}\")\n logger.info(\"Completed MLOps publish model\")\n\n return model_details\n except Exception as exp:\n logger.error(\"an exception occurred in publish model\")\n raise Exception(\"an exception occurred in publish model\") from exp", "def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)", "def run_model(request):\n\n params = unpack_post_parameters(request.POST)\n\n config = []\n df = pd.DataFrame(params['mid_atlantic_mortality'],\n columns=params['mid_atlantic_active_areas'])\n config.append(df)\n df = pd.DataFrame(params['georges_bank_mortality'],\n columns=params['georges_bank_active_areas'])\n config.append(df)\n\n # Invoke the SAMS model wrapper, which in turn runs the model.\n with tempfile.TemporaryDirectory() as tdir:\n s = SamsWrapper(outdir=tdir, numruns=100, startyear=params['start_year'],\n access_area_management=config,\n open_area_f=params['open_area_f_mortality'])\n s.run()\n\n create_web_outputs(params, s)\n\n return render(request, 'results.html')", "def load_model(app: FastAPI) -> None:\n\n logging.info(\"Starting up the application\")\n model_path = DATA_MODEL_PATH\n\n if model_path.exists():\n model = FraudDetection(model_path)\n app.state.model = model\n logging.info(f\"Loaded model {model_path}\")\n else:\n app.state.model = FraudDetection()\n logging.warning(f\"No existing model found in {model_path}\")", "def run(self):\n self.__server.serve_forever()", "def deploymodel():\n appliances = request.json.get('appliances')\n print(\"APPLIANCES : \" + str(appliances))\n if len(appliances) != 0:\n\n # debug\n logging.info(\"--- Start LSTM Forecast API ---\")\n logging.info(\"Load model ...\")\n\n global lstm_models\n lstm_models = {}\n\n for appliance in appliances:\n logging.info(\"Initialize model for appliance: \" + appliance)\n lstm_models[appliance] = LSTMSeries(\n opts, appliance + '_train_values',\n appliance+'_model_1', appliance+'_scaler')\n lstm_models[appliance].init_state(appliance+'_train_normalized')\n logging.info(\"Start web service ...\")\n else:\n print(\"Please provide some appliance \"\n \"for which models shall be started\")\n return jsonify({'currently-deployed-models': str(appliances)}), 200", "def run():\n print('*-----------------------------------*')\n print('Running main.py ...')\n model = MLPModel(CFG, name='tfds_tryout')\n print('* Model defined')\n model.load_data(method='tfds')\n print('* Data Loaded')\n print(model.datasetinfo)\n model.build()\n model.train()\n model.evaluate()\n model.save()", "def serve(ssl='n', port='5000'):\n if not 'project_path' in env:\n _setup_env()\n\n opts = ' -p '+port\n if do(ssl):\n opts += ' -s'\n\n with lcd(join(env.project_path)):\n if exists(join(env.project_path, 'manage.py')):\n local('python manage.py runserver')\n elif _config and 'deploy' in _config:\n if int(port) < 1024:\n local('sudo python website/app.py'+opts)\n else:\n local('python website/app.py'+opts)\n else:\n if int(port) < 1024:\n local('sudo python api.py'+opts)\n else:\n local('python api.py'+opts)", "def _setup_hosting_endpoint(self, model_id, wait, **kwargs):\n # this should only be called once per experiment\n model_record = self.model_db_client.get_model_record(self.experiment_id, model_id)\n\n # create resource for firehost stream if not running in 'local' mode\n environ_vars = self._get_hosting_environ_vars(model_id)\n if not self.local_mode:\n stream_name = self.experiment_id\n s3_prefix = f\"{self.experiment_id}/inference_data\"\n self.resource_manager.create_firehose_stream_if_not_exists(stream_name, s3_prefix)\n environ_vars[\"FIREHOSE_STREAM\"] = stream_name\n\n sagemaker_model = sagemaker.model.Model(\n image_uri=self.image,\n role=self.resource_manager.iam_role_arn,\n name=model_id,\n model_data=model_record[\"s3_model_output_path\"],\n sagemaker_session=self.sagemaker_session,\n env=environ_vars,\n **kwargs,\n )\n\n hosting_instance_count = self.resource_manager.hosting_fleet_config.get(\"instance_count\", 1)\n hosting_instance_type = self.resource_manager.hosting_fleet_config.get(\n \"instance_type\", \"local\"\n )\n\n try:\n sagemaker_model.deploy(\n initial_instance_count=hosting_instance_count,\n instance_type=hosting_instance_type,\n endpoint_name=self.experiment_id,\n wait=wait,\n )\n except Exception as e:\n logger.error(f\"Failed to deploy experiment {self.experiment_id}: \" + str(e))\n raise UnhandledWorkflowException(\n \"Some error occurred while setting up hosting endpoint. \"\n \"Please check SageMaker console for more information.\"\n )", "def train_model():\n # Decode the request\n data = request.data.decode(\"utf-8\")\n\n # Write data from the request in a local csv file\n train_csv = \"train_local.csv\"\n f = open(train_csv, \"w\", encoding=\"utf-8\")\n f.write(data)\n f.close()\n\n # Load the train csv file as a DataFrame\n train_df = pd.read_csv(train_csv)\n\n # Train model\n model.train_model(train_df)\n\n return jsonify({\"success\": \"The model was trained sucessfully\"})", "def download_model():\n logging.info(\"[genreml] Downloading model...\")\n with urllib.request.urlopen(config.FMAModelConfig.FMA_MODEL_URL) as f:\n data = f.read()\n open(config.FMAModelConfig.FMA_MODEL_PATH, 'wb').write(data)\n logging.info(\"[genreml] Model download complete\")", "def runserver():\n\tapp.run(host = '0.0.0.0', port = 5000)", "def main(_):\n if not FLAGS.model_output_dir:\n raise ValueError(\n \"Undefined model output directory. Perhaps you forgot to set the --model_output_dir flag?\")\n \n if FLAGS.predict_input_file:\n decode()\n else:\n train()", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def startserver(path):\n global urlpath\n urlpath = path\n app.run(debug=True, host='0.0.0.0', port=4444)", "def run_server(self, _):\n if not ENABLE_SERVER:\n logger.info('server not enabled, exit')\n return\n app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)", "def to_mlflow(\n self,\n tracking_uri: Optional[str] = None,\n experiment_id: Optional[int] = None,\n run_name: str = \"log_biometext_model\",\n input_example: Optional[Dict] = None,\n conda_env: Optional[Dict] = None,\n ) -> str:\n if tracking_uri:\n mlflow.set_tracking_uri(tracking_uri)\n\n # This conda environment is only needed when serving the model later on with `mlflow models serve`\n conda_env = conda_env or {\n \"name\": \"mlflow-dev\",\n \"channels\": [\"defaults\", \"conda-forge\"],\n \"dependencies\": [\n \"python=3.7.9\",\n \"pip>=20.3.0\",\n {\"pip\": [\"mlflow\", f\"biome-text=={__version__}\"]},\n ],\n }\n\n with tempfile.TemporaryDirectory() as tmpdir_name:\n file_path = Path(self.save(directory=tmpdir_name))\n\n with mlflow.start_run(\n experiment_id=experiment_id, run_name=run_name\n ) as run:\n mlflow.log_artifact(str(file_path), \"biometext_pipeline\")\n mlflow.pyfunc.log_model(\n artifact_path=\"mlflow_model\",\n python_model=BiomeTextModel(),\n artifacts={\n BiomeTextModel.ARTIFACT_CONTEXT: mlflow.get_artifact_uri(\n f\"biometext_pipeline/{file_path.name}\"\n )\n },\n input_example=input_example,\n conda_env=conda_env,\n )\n model_uri = os.path.join(run.info.artifact_uri, \"mlflow_model\")\n\n return model_uri", "def train_model():\n if (not request.json \n or not 'data-format' in request.json\n or not 'ML-model' in request.json\n or not 'source-data-directory' in request.json\n or not 'source-data-repo' in request.json):\n abort(400)\n\n data_format = request.json['data-format']\n ML_model = request.json['ML-model']\n source_data_directory = request.json['source-data-directory']\n source_data_repo = request.json['source-data-repo']\n \n start = time.time()\n\n do_train(model_name=ML_model, \n repo_url=source_data_repo, \n repo_data_directory=source_data_directory, \n data_format=data_format)\n \n end = time.time()\n\n runningtime = (end - start)\n\n return f'Model {ML_model} trained using source data in directory {source_data_directory}, running time {runningtime} seconds', 201", "def runserver():\n\tlocal(\"revel run github.com/FreeFlightSim/fg-navdb\")", "def web(host: str, port: str, loglevel: str) -> None:\n uvicorn.run(\"source.apps.web:App\", host=host, port=port, log_level=loglevel)", "def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])", "def runserver(ctx):\n run(\"./manage.py runserver\", pty=True)", "def serve(port):\n app.run(host='0.0.0.0', port=port, debug=True)", "def run(self):\n self.app.run(host=\"0.0.0.0\")", "def StaticServe(base_path):\n def get_file(path):\n fullpath = base_path + path\n try:\n mime, encoding = mimetypes.guess_type(fullpath)\n return open(fullpath, 'rb'), mime or 'application/octet-stream'\n except IOError:\n raise DataNotFound(\"File does not exist\")\n\n class StaticServe(GiottoProgram):\n controllers = ['http-get']\n model = [get_file]\n view = FileView()\n\n return StaticServe()", "def server_it():\n\n app = flask.Flask(__name__, static_url_path='/static')\n\n app.route('/')(serve_index)\n\n\n @app.route('/brain/data/<path:path>')\n def serve_brain_data(path):\n data_dir = 'generated/data'\n return flask.send_from_directory(data_dir, path)\n\n @app.route('/brain/<path:path>')\n def serve_roygbiv_html(path):\n try:\n return flask.send_from_directory('brain', path)\n except Exception as e:\n import roygbiv\n viz_dir = os.path.join(os.path.dirname(roygbiv.__file__), 'web')\n return flask.send_from_directory(viz_dir, path)\n\n # GWAS app\n @app.route('/gwas/data/<path:path>')\n def serve_gwas_data(path):\n data_dir = 'generated/data'\n return flask.send_from_directory(data_dir, path)\n\n @app.route('/gwas/')\n @app.route('/gwas/index.html')\n def serve_default():\n import ping.viz\n viz_dir = os.path.dirname(ping.viz.__file__)\n man_dir = os.path.join(viz_dir, 'manhattan')\n return flask.send_from_directory(man_dir, 'manhattan.html')\n\n @app.route('/gwas/<path:path>')\n def serve_gwas_html(path):\n import ping.viz\n viz_dir = os.path.dirname(ping.viz.__file__)\n man_dir = os.path.join(viz_dir, 'manhattan')\n return flask.send_from_directory(man_dir, path)\n\n # Scatter app\n @app.route('/plots/<path:path>')\n def serve_plot(path):\n return flask.send_from_directory('generated/plots', path)\n @app.route('/2015/<path:path>')\n def serve_old(path):\n return flask.send_from_directory('2015', path)\n app.debug = True\n app.run()", "def serve():\n parser = OptionParser()\n \n # Port to to run grpc\n parser.add_option(\"--sitl\", action=\"store\", type=\"string\", dest=\"sitlOption\", default=None,\n help=\"Connect to a sitl simulator for RF8 rather than a real drone\")\n \n # Specifies lat and long for HOME position when SITL is being used.\n parser.add_option(\"--lat\", action=\"store\", type=\"float\", dest=\"lat\", default=None)\n parser.add_option(\"--lon\", action=\"store\", type=\"float\", dest=\"lon\", default=None)\n \n (options, args) = parser.parse_args()\n \n # Cheap and dirty position recording\n #pathFile = open('out.txt', 'w')\n \n vehicle = connect_to_drone(options.sitlOption, options.lat, options.lon)\n print \"Set default/target airspeed to 5\"\n vehicle.airspeed = 5\n \n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n droneconnect_pb2_grpc.add_DroneConnectServicer_to_server(DroneConnect(vehicle), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n\t\n try:\n while(True):\n time.sleep(_ONE_DAY_IN_SECONDS)\n #vehicle.executeGuidedMission()\n except KeyboardInterrupt:\n print (\"Stopping grpc server\")\n #Close vehicle object before exiting script\n print (\"Closing connection to drone.\")\n vehicle.close()\n #pathFile.close()\n server.stop(0)", "def start(self):\n self.serve_forever()", "def start(self):\n self.serve_forever()", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def serve_local_folder(source=None, port=\"8280\"):\n package(source=source)\n with lcd(\"%(toolsdir)s/rinfomain\" % env):\n local(\"groovy serve_folder.groovy %s/_build/%s/rinfo-admin %s\" % (env.projectroot, env.target, port) )", "def main():\n host = ''\n port = 8088\n HTTPServer((host, port), HandleRequests).serve_forever()", "def serve(kwargs, host, port, verbose):\n setup_logging(verbose)\n from werkzeug.serving import run_simple\n from . import create_app\n\n app = create_app(kwargs['db_path'], kwargs['plugins'])\n run_simple(host, port, app)", "def start_video_server():\r\n video_source_dir = '{}/data/video'.format(settings.TEST_ROOT)\r\n video_server = VideoSourceHttpService(port_num=settings.VIDEO_SOURCE_PORT)\r\n video_server.config['root_dir'] = video_source_dir\r\n setattr(world, 'video_source', video_server)", "def run_server(system, settings=None, port=None, skip_assets=False):\r\n if system not in ['lms', 'studio']:\r\n print(\"System must be either lms or studio\", file=sys.stderr)\r\n exit(1)\r\n\r\n if not skip_assets:\r\n # Local dev settings use staticfiles to serve assets, so we can skip the collecstatic step\r\n args = [system, '--settings={}'.format(settings), '--skip-collect', '--watch']\r\n call_task('pavelib.assets.update_assets', args=args)\r\n\r\n if port is None:\r\n port = DEFAULT_PORT[system]\r\n\r\n if settings is None:\r\n settings = DEFAULT_SETTINGS\r\n\r\n run_process(django_cmd(\r\n system, settings, 'runserver', '--traceback',\r\n '--pythonpath=.', '0.0.0.0:{}'.format(port)))", "def runtime(self, flask_instance: Flask, mode: int = ENABLE_CHERRYPY):\n # Start SLM discovery and Datastream registration\n try:\n self.ogc_datastream_registration(URL_SLM_CLOUD)\n except AttributeError as ae:\n logging.critical(ae)\n logging.info(\"Site name: \" + self._site_name)\n logging.info(\"Tenant ID :\" + self._tenant_id)\n logging.info(\"Site ID :\" + self._site_id)\n return\n\n # Start thread pool for Observations\n self._start_thread_pool(SCRALSoundLevelMeter.SLMThread)\n\n # starting REST web server\n super().runtime(flask_instance, mode)", "def serve_frozen():\n do('export FLASK_CONFIG=config/dev.py && %s/bin/python manage.py serve_frozen' % venv_path)", "def localhost():\n env.run = local\n env.cd = lcd\n env.deployment = 'local'", "def main(model_folder, override=False):\n model_description_file = os.path.join(model_folder, \"info.yml\")\n # Read the model description file\n with open(model_description_file) as ymlfile:\n model_description = yaml.safe_load(ymlfile)\n\n project_root = utils.get_project_root()\n # Read the feature description file\n feature_folder = os.path.join(project_root, model_description[\"data-source\"])\n with open(os.path.join(feature_folder, \"info.yml\")) as ymlfile:\n feature_description = yaml.safe_load(ymlfile)\n # Get a list of all used features\n feature_list = features.get_features(feature_description[\"features\"])\n # Get the dimension of the feature vector\n input_features = sum(n.get_dimension() for n in feature_list)\n logger.info(\"Number of features: %i\", input_features)\n\n # Analyze model\n logger.info(model_description[\"model\"])\n if model_description[\"model\"][\"type\"] != \"mlp\":\n return\n create_model(\n model_folder,\n model_description[\"model\"][\"type\"],\n model_description[\"model\"][\"topology\"],\n override,\n )\n utils.create_run_logfile(model_folder)", "def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())", "def run_fn(fn_args: TrainerFnArgs):\n\n # Training set size\n TRAIN_SIZE = get_dataset_size(fn_args.train_files)\n NUM_STEPS = TRAIN_SIZE / BATCH_SIZE # number of steps per epoch for which to train model\n \n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n \n train_dataset = _input_fn(fn_args.train_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n eval_dataset = _input_fn(fn_args.eval_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n\n model = _build_keras_model(\n tf_transform_output,\n hidden_units=[HIDDEN_UNITS_1, HIDDEN_UNITS_2, HIDDEN_UNITS_3],\n learning_rate=LEARNING_RATE)\n\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, update_freq='batch')\n \n model.fit(\n train_dataset,\n epochs=NUM_EPOCHS, \n steps_per_epoch=NUM_STEPS,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default':\n _get_serve_tf_examples_fn(model,\n LABEL_COLUMN,\n tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def run_model(project=None, model=None, raw=None, dyr=None, xls=None, path=None, server='tcp://127.0.0.1:5678'):\n ret = 0\n if (not project) or (not model):\n logging.error('RT-LAB project or model undefined.')\n sys.exit(-1)\n if (not raw) and (not xls):\n logging.error('PSS/E raw file or ePHASORsim Excel file undefined.')\n sys.exit(-1)\n if not dyr:\n logging.debug('PSS/E dyr file not specified')\n\n sim = SimControl(project, model, path)\n\n simulink = os.path.join(path,project, 'simulink')\n models = os.path.join(path,project, 'models')\n if not os.path.isdir(simulink):\n logging.error('No <{}> directory found.'.format(simulink))\n if not os.path.isdir(models):\n logging.error('No <{}> directory found.'.format(models))\n sys.exit(1)\n else:\n logging.info('Using <{}> directory'.format(models))\n modelPath = models\n else:\n logging.info('Using <{}> directory'.format(simulink))\n modelPath = simulink\n\n\n sim_data = LTBSetup(raw=raw, dyr=dyr, xls=xls, path=modelPath, model=model, simObject=sim)\n\n streaming = Streaming(name='sim', server=server, ltb_data=sim_data)\n\n sim.open()\n sim.load()\n\n sim_data.get_sysparam()\n sim_data.get_varheader_idxvgs()\n sim.set_settings(sim_data.Settings)\n # sim_data.Idxvgs['Line'].update(sim.add_branch_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Line']))\n # sim_data.Idxvgs['Bus'].update(sim.add_bus_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Bus']))\n streaming.send_init()\n logging.debug('Varheader, SysParam and Idxvgs sent.')\n sleep(0.5)\n\n sim.start()\n\n streaming.run()", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def train():\n dataset = mgr.current_dataset()\n if dataset is None or not mgr.data_path.exists():\n return redirect(url_for('main.datasets'))\n # Generate session ID and obtain JavaScript from Bokeh server\n session_id = generate_session_id()\n script = server_session(url='http://bokeh:5006/train/',\n session_id=session_id)\n # Replace Docker alias in URL with localhost\n script = script.replace(\"http://bokeh:5006\", \"http://localhost:5006\")\n # Use the script in the rendered page\n return render_template(\"train.html\", script=script)", "def run_model (arguments):\n if arguments.train is not None:\n # Train a new model, optionally with a certain number of epochs\n predictor = None\n if len(arguments.train) > 0:\n predictor = train(n_epochs=arguments.train[0])\n else:\n predictor = train()\n # Afterwards save it\n now = datetime.now(timezone.utc)\n predictor.to_disk(fname=f\"model_parameters_{now.strftime('%Y%m%d%H%M%S')}\")\n elif arguments.export_embeddings:\n # Load the saved predictor ...\n predictor = Predictor.from_file()\n # ... and then dump the models to disk.\n predictor.subj.export_embeddings(\"subject\")\n predictor.obj.export_embeddings(\"object\")\n print(\"Models are saved to output directory for loading with http://projector.tensorflow.org/.\")\n elif arguments.console:\n # Opens a console for prediction without training\n predictor = Predictor.from_file()\n tinker(predictor)", "def main():\n run = Run.get_context()\n try:\n work_space = run.experiment.workspace\n except AttributeError:\n interactive_auth = InteractiveLoginAuthentication(\n tenant_id=os.getenv(\"TENANT_ID\")\n )\n work_space = Workspace.from_config(auth=interactive_auth)\n environment = work_space.environments[\"train_lstm\"]\n model = Model(work_space, \"currency\")\n service_name = \"currency-service\"\n inference_config = InferenceConfig(\n entry_script=\"predict_currency.py\", environment=environment\n )\n aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)\n scaler = Model(work_space, name=\"scaler\", version=1)\n service = Model.deploy(\n workspace=work_space,\n name=service_name,\n models=[model, scaler],\n inference_config=inference_config,\n deployment_config=aci_config,\n overwrite=True,\n )\n service.wait_for_deployment(show_output=True)\n print(service.get_logs())\n print(service.scoring_uri)", "def expose_models(app, HOST=\"localhost\", PORT=5000, API_PREFIX=\"/api\"):\n\n api = SAFRSAPI(app, host=HOST, port=PORT)\n api.expose_object(models.Category)\n api.expose_object(models.CustomerCustomerDemo)\n api.expose_object(models.OrderDetail)\n api.expose_object(models.Order)\n api.expose_object(models.Customer)\n api.expose_object(models.CustomerDemographic)\n api.expose_object(models.EmployeeAudit)\n api.expose_object(models.EmployeeTerritory)\n api.expose_object(models.Employee)\n api.expose_object(models.Product)\n api.expose_object(models.Region)\n api.expose_object(models.Shipper)\n api.expose_object(models.Supplier)\n api.expose_object(models.Territory)\n return api", "def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')", "def run():\n\n @werkzeug.serving.run_with_reloader\n def runDebugServer():\n try:\n app.debug = True\n dapp = DebuggedApplication(app, evalex=True)\n appsocket.serve_forever()\n\n except Exception, err:\n app.logger.error(\"Error\", exc_info=True)\n\n runDebugServer()", "def run_simple_server(tb_app):\n # Mute the werkzeug logging.\n base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)\n\n try:\n server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)\n server.daemon_threads = True\n except socket.error:\n if FLAGS.port == 0:\n msg = 'TensorBoard unable to find any open port'\n else:\n msg = (\n 'TensorBoard attempted to bind to port %d, but it was already in use'\n % FLAGS.port)\n logging.error(msg)\n print(msg)\n exit(-1)\n\n port = server.socket.getsockname()[1]\n msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,\n port)\n print(msg)\n logging.info(msg)\n print('(Press CTRL+C to quit)')\n sys.stdout.flush()\n\n server.serve_forever()", "def main_training():\n if request.method == 'GET':\n print(\"Working directory: \", path_creator())\n train_knn_model_params=[config_gettype('train_knn_model','FRS.ini',param) for param in inspect.getfullargspec(train_knn_model)[0]]\n train_knn_model(*train_knn_model_params)\n return_text=\"FRS_training_model.py completed\"\n return jsonify(return_text)\n else:\n return_text1 = \"Опа\"\n return jsonify(return_text1)", "def run():\n app.run(debug=True, port=5001)", "def main():\n import sys\n FILES.extend(sys.argv[1:])\n app.debug = True\n app.run(port=5001, threaded=False)", "def simple_serve(self, host='0.0.0.0', port=8000):\n from wsgiref.simple_server import make_server\n srv = make_server(host, port, self)\n print \"Development server is running at http://%s:%d/\" % (\n host, port\n )\n print \"Quit the server with CONTROL-C\"\n srv.serve_forever()", "def entry(self):\n if not os.path.isfile('model'):\n train()\n schedule.every(0.01).seconds.do(predict, self)\n while True:\n schedule.run_pending()", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def run(self, host=\"0.0.0.0\", port=8080):\n self.app.run(host=host, port=port, debug=True, use_reloader=False,\n use_evalex=False)", "def web():\n env['remote_port'] = env['port_map']['8000']\n\n sys.stdout.write('Launching browser on remote port %(remote_port)s\\n' % env)\n\n run('open http://%(relay_server)s:%(remote_port)s' % env)", "def load_trainer(self):\n super().load_trainer()\n\n logging.info(\"[Server #%d] Loading a pre-trained model.\", os.getpid())\n self.trainer.load_model()", "def run_local(ctx, config):\n ctx.obj['NAME'] = config.name\n ctx.invoke(local,\n inputs_fpath=config.inputs_fpath,\n offshore_sites=config.offshore_sites,\n input_layers=config.input_layers,\n out_dir=config.dirout,\n tm_dset=config.tm_dset,\n log_dir=config.log_directory,\n verbose=config.log_level)", "def web():\n import web\n web.app.run(host='0.0.0.0', port=5000, debug=True)", "def web():\n import web\n web.app.run(host='0.0.0.0', port=5000, debug=True)", "def run(site):\n\n # Make sure there is a site by this name.\n Yesod.check_for_site(site)\n\n # Now run the site on the development server.\n Yesod.run(site)", "def load_model(self, directory):\n with tf.Session() as sess:\n tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], directory)\n self.model = tf.contrib.predictor.from_saved_model(directory)", "def load_model(self, fl_ctx: FLContext) -> ModelLearnable:\n if os.path.exists(self.save_path):\n self.logger.info(\"Loading server model\")\n model = load(self.save_path)\n else:\n self.logger.info(f\"Initialization, sending global settings: {self.initial_params}\")\n model = self.initial_params\n model_learnable = make_model_learnable(weights=model, meta_props=dict())\n\n return model_learnable", "def standalone():\n parser = argparse.ArgumentParser(\n description=\"Start Argo Workflow API Dispatch Server\"\n )\n parser.add_argument(\n \"-p\", \"--port\", help=\"Server listening port\", type=int, default=8080\n )\n parser.add_argument(\n \"-b\", \"--bind-address\", help=\"Server bind address\", default=\"127.0.0.1\"\n )\n parser.add_argument(\n \"-m\",\n \"--mock\",\n \"--mock-authentication\",\n action=\"store_true\",\n help=\"Do not require a JWT; mock out authentication\",\n )\n parser.add_argument(\n \"--no-verify-signature\",\n action=\"store_true\",\n help=\"Do not verify JWT signature\",\n )\n parser.add_argument(\n \"--no-verify-audience\",\n action=\"store_true\",\n help=\"Do not verify JWT audience\",\n )\n args = parser.parse_args()\n mock = args.mock\n v_s = True\n v_a = True\n if args.no_verify_signature:\n v_s = False\n if args.no_verify_audience:\n v_a = False\n server = Server(_mock=mock, verify_signature=v_s, verify_audience=v_a)\n httpd = WSGIServer(server.app, host=args.bind_address, port=args.port)\n httpd.start()", "def local_video(**kwargs):\n output_dir = run_video_preprocess(\n video_file=input_video,\n roi_locations=kwargs[\"roi_locations\"],\n preprocess_analysis=kwargs[\"preprocess_analysis\"],\n database=False\n )\n\n run_analysis_pipeline(\n preprocess_analysis=kwargs[\"preprocess_analysis\"],\n json_filepath=output_dir,\n )", "def start():\n from paste.deploy import loadapp, loadserver\n from moksha.config.environment import load_environment\n from moksha.config.middleware import make_app\n ini = 'config:' + path('development.ini').abspath()\n wsgi_app = loadapp(ini)\n serve = loadserver(ini)\n serve(wsgi_app)", "def predict_model():\n # Decode the request\n data = request.data.decode(\"utf-8\")\n\n # Write data from the request in a local csv file\n test_csv = \"test_local.csv\"\n f = open(test_csv, \"w\", encoding=\"utf-8\")\n f.write(data)\n f.close()\n\n # Load the test csv file as a DataFrame\n test_df = pd.read_csv(test_csv)\n\n # Get submission DataFrame\n predictions_df = model.predict(test_df)\n\n # Send csv file as response\n res = make_response(predictions_df.to_csv(index=False))\n res.headers[\"Content-Disposition\"] = \"attachment; filename=submission.csv\"\n res.headers[\"Content-Type\"] = \"text/csv\"\n return res", "def serve(port=3000, httpdir=\"/srv/http\", channels={}, livestream={}, secret_key=\"ChangeMe\"):\n\n app = Flask(__name__)\n\n app.config['http_dir'] = httpdir\n app.config['channels'] = channels\n app.config['livestream'] = livestream\n app.config['SECRET_KEY'] = secret_key\n\n login_manager.init_app(app)\n\n app.register_blueprint(blueprint)\n\n # blueprints cannot handle 404 or 405 errors, so stick this on the\n # app directly.\n @app.errorhandler(404)\n def page_not_found(error):\n return send_file(in_http_dir(\"404.html\"))\n\n return app.run(port=port)", "def run():\n do('export FLASK_CONFIG=config/dev.py && %s/bin/python manage.py runserver' % venv_path)", "def run_flow(flow_id):\n if flow_id == 1:\n etl.load_images_from_urls()\n elif flow_id == 2:\n etl.find_edges_and_save()\n elif flow_id == 3:\n etl.normalize_dataset()\n elif flow_id == 4:\n classifiers.run_models_comparison()", "def run(Concordancer, port=1420, url=None, open_browser=True):\n # Allow access from frontend\n cors = CORS(allow_all_origins=True)\n\n # Falcon server\n app = falcon.API(middleware=[cors.middleware])\n serv = ConcordancerBackend(Concordancer)\n app.add_route('/query', serv)\n app.add_route('/export', serv, suffix='export')\n\n print(f\"Initializing server...\")\n httpd = simple_server.make_server('localhost', port, app)\n print(f\"Start serving at http://localhost:{port}\")\n if url is None:\n url = query_interface_path()\n if open_browser:\n webbrowser.open(url)\n httpd.serve_forever()", "def set_vanilla_model(self):\n logging.debug(\"Setting vanilla model\")\n # Build model\n\n ## Embedding Layer\n word_embedding_layer = self.embed_word()\n pos_embedding_layer = self.embed_pos()\n\n ## Deep layers\n latent_layers = self.stack_latent_layers(self.num_of_latent_layers)\n\n ## Dropout\n dropout = Dropout(self.pred_dropout)\n\n ## Prediction\n predict_layer = self.predict_classes()\n\n ## Prepare input features, and indicate how to embed them\n inputs_and_embeddings = [(Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"word_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"predicate_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"postags_inputs\"),\n pos_embedding_layer),\n ]\n\n ## Concat all inputs and run on deep network\n output = predict_layer(dropout(latent_layers(merge([embed(inp)\n for inp, embed in inputs_and_embeddings],\n mode = \"concat\",\n concat_axis = -1))))\n\n # Build model\n self.model = Model(input = map(itemgetter(0), inputs_and_embeddings),\n output = [output])\n\n # Loss\n self.model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n self.model.summary()\n\n # Save model json to file\n self.save_model_to_file(os.path.join(self.model_dir, \"model.json\"))", "def local_webserver_start():\n if not _is_webserver_running():\n local(_webserver_command())", "def main():\n cwd = os.getcwd() # static files\n port = int(os.environ.get(\"PORT\", 8000))\n path = os.path.join(cwd, \"paper\") # Path to dl file\n\n app = Application([\n (r'/', GraphView),\n # Static files, repeat for other file names\n (r'/(.*\\.js)', StaticFileHandler, {\"path\": cwd} ),\n (r'/download/(barojas_v193\\.pdf)', StaticFileHandler, {'path': path} ), # Static serving file\n ])\n http_server = HTTPServer(app)\n http_server.listen(port)\n print('RI5C is listening on port:%i' % port)\n IOLoop.current().start()", "def runserver(args):\n elmrapp.run()\n return \"\"", "def deploy(app_name, model_path, run_id, mlflow_home):\n mlflow.azureml.deploy(app_name=app_name, model_path=model_path, run_id=run_id,\n mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None)", "def pretrained(name=\"tfhub_use\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(UniversalSentenceEncoder, name, lang, remote_loc)", "def run_server() -> Generator[DevServer, None, None]:\n server = DevServer()\n server.start()\n\n with requests.Session() as session:\n while True:\n try:\n session.get(\"http://localhost:32950\")\n except requests.exceptions.ConnectionError:\n pass\n else:\n break\n\n yield server\n server.stop()" ]
[ "0.6560286", "0.64194596", "0.6404588", "0.587721", "0.5865771", "0.58165324", "0.5640516", "0.5599409", "0.5582179", "0.5575687", "0.55372185", "0.54902416", "0.5471806", "0.54470295", "0.5435611", "0.543411", "0.543088", "0.5413148", "0.54089355", "0.53982294", "0.53869677", "0.532483", "0.5311424", "0.5270371", "0.52685183", "0.52444416", "0.5233685", "0.5228077", "0.52184486", "0.5214481", "0.52110106", "0.5195256", "0.5162161", "0.51336294", "0.51320356", "0.5126767", "0.5123389", "0.51022553", "0.5092668", "0.50925434", "0.50796", "0.5076065", "0.50668424", "0.5058893", "0.50576836", "0.5051816", "0.50409985", "0.5036506", "0.50320566", "0.50320566", "0.50146633", "0.5002037", "0.49935114", "0.4989113", "0.4983865", "0.49621892", "0.49480766", "0.49338126", "0.49305686", "0.49272248", "0.49224737", "0.4919705", "0.49147576", "0.49080312", "0.49076068", "0.49073142", "0.49063924", "0.49063027", "0.4903486", "0.4898567", "0.48947075", "0.48944533", "0.4894047", "0.48924267", "0.48893186", "0.48883593", "0.48742932", "0.48709917", "0.48701924", "0.4869655", "0.48600158", "0.48459953", "0.48459953", "0.48443687", "0.48362008", "0.4826512", "0.48235777", "0.4821556", "0.4818153", "0.4817411", "0.4816249", "0.48117396", "0.4807974", "0.47993457", "0.4798275", "0.47932753", "0.4792777", "0.47881734", "0.4786959", "0.47824246", "0.47743145" ]
0.0
-1
Serve a PythonFunction model saved with MLflow. If a ``run_id`` is specified, ``modelpath`` is treated as an artifact path within that run; otherwise it is treated as a local path.
def serve(model_path, run_id, port, host, no_conda): if run_id: model_path = _get_model_log_dir(model_path, run_id) model_env_file = _load_model_env(model_path) if not no_conda and model_env_file is not None: conda_env_path = os.path.join(model_path, model_env_file) return _rerun_in_conda(conda_env_path) app = scoring_server.init(load_pyfunc(model_path)) app.run(port=port, host=host)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_fn(fn_args: TrainerFnArgs):\n\n # Training set size\n TRAIN_SIZE = get_dataset_size(fn_args.train_files)\n NUM_STEPS = TRAIN_SIZE / BATCH_SIZE # number of steps per epoch for which to train model\n \n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n \n train_dataset = _input_fn(fn_args.train_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n eval_dataset = _input_fn(fn_args.eval_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n\n model = _build_keras_model(\n tf_transform_output,\n hidden_units=[HIDDEN_UNITS_1, HIDDEN_UNITS_2, HIDDEN_UNITS_3],\n learning_rate=LEARNING_RATE)\n\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, update_freq='batch')\n \n model.fit(\n train_dataset,\n epochs=NUM_EPOCHS, \n steps_per_epoch=NUM_STEPS,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default':\n _get_serve_tf_examples_fn(model,\n LABEL_COLUMN,\n tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def run(\n trained_model: Ridge,\n mlflow: mlflow,\n model_name: str = \"diabetes\",\n app_logger: AppLogger = get_disabled_logger(),\n parent_tracer: Tracer = None,\n) -> ModelVersion:\n logger = logging.getLogger(__name__)\n try:\n component_name = \"Diabetes_Publish_Model\"\n\n # mlflow tracking\n mlflow_run = mlflow.active_run()\n mlflow_run_id = mlflow_run.info.run_id\n mlflow_experiment_id = mlflow_run.info.experiment_id\n\n logger = app_logger.get_logger(\n component_name=component_name,\n custom_dimensions={\n \"mlflow_run_id\": mlflow_run_id,\n \"mlflow_experiment_id\": mlflow_experiment_id,\n },\n )\n tracer = app_logger.get_tracer(\n component_name=component_name, parent_tracer=parent_tracer\n )\n\n logger.info(\"Running MLOps publish model\")\n\n temp_model_dir = tempfile.mkdtemp()\n model_path = os.path.join(temp_model_dir, model_name)\n with tracer.span(\"save_model\"):\n mlflow.sklearn.save_model(trained_model, model_path)\n mlflow.log_artifact(model_path)\n model_uri = \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=model_name\n )\n\n logger.info(\"Publishing trained model into mlflow model registry\")\n with tracer.span(\"register_model\"):\n model_details = mlflow.register_model(model_uri=model_uri, name=model_name)\n model_version = model_details.version\n\n mlflow.log_param(\"model_version\", model_version)\n mlflow.log_param(\"model_name\", model_name)\n\n logger.info(f\"published model name: {model_name}, version: {model_version}\")\n logger.info(\"Completed MLOps publish model\")\n\n return model_details\n except Exception as exp:\n logger.error(\"an exception occurred in publish model\")\n raise Exception(\"an exception occurred in publish model\") from exp", "def export(output, model_path, run_id, mlflow_home):\n mlflow.azureml.export(output=output, model_path=model_path, run_id=run_id,\n mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None)", "def serve(self):\n\n with self.graph.as_default():\n\n if self.builder == None:\n self.builder = tf.saved_model.builder.SavedModelBuilder(self.path + '/build/')\n\n # Generate softmax output.\n prediction = tf.nn.softmax(self.output, name='predict_probability')\n prediction_adv = tf.nn.softmax(self.output_adv, name='prediction_probability_adv')\n \n\n # Build `SignatureDef`.\n # See https://www.tensorflow.org/serving/signature_defs .\n inputs = {k.name: tf.saved_model.utils.build_tensor_info(k) for k in self.eval_config}\n inputs[SIGNATURE_INPUT] = tf.saved_model.utils.build_tensor_info(self.feature_holder)\n\n outputs = {SIGNATURE_OUTPUT: tf.saved_model.utils.build_tensor_info(prediction), SIGNATURE_OUTPUT_ADV: tf.saved_model.utils.build_tensor_info(prediction_adv)}\n\n signature = tf.saved_model.signature_def_utils.build_signature_def(inputs, outputs, SIGNATURE_METHOD_NAME)\n self.builder.add_meta_graph_and_variables(self.session, tags=[tf.saved_model.tag_constants.SERVING], signature_def_map={SIGNATURE_KEY: signature})\n self.builder.save()", "def export_model(model, path=None, input_shape=(1, 3, 64, 64)):\n path = get_model_path() if path is None else path\n model = deepcopy(model).cpu().eval()\n if not isinstance(model, torch.jit.ScriptModule):\n assert input_shape is not None, \"`input_shape` must be provided since model is not a \" \\\n \"`ScriptModule`.\"\n traced_model = trace(model, torch.zeros(*input_shape))\n else:\n traced_model = model\n torch.jit.save(traced_model, path)\n return path", "def serve(model_path, host, port, debug):\n\n app = Scorer(__name__, model_path=model_path)\n app.run(host=host, port=port, debug=debug)", "def deploy(app_name, model_path, run_id, mlflow_home):\n mlflow.azureml.deploy(app_name=app_name, model_path=model_path, run_id=run_id,\n mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None)", "def _get_serve_tf_examples_fn(model, tf_transform_output):\n\n model.tft_layer = tf_transform_output.transform_features_layer()\n\n @tf.function\n def serve_tf_examples_fn(serialized_tf_examples):\n \"\"\"Returns the output to be used in the serving signature.\"\"\"\n feature_spec = tf_transform_output.raw_feature_spec()\n parsed_features = tf.io.parse_example(\n serialized_tf_examples, feature_spec)\n transformed_features = create_training_data(\n model.tft_layer(parsed_features))\n return model(transformed_features)\n\n return serve_tf_examples_fn", "def _get_serve_tf_examples_fn(model, tf_transform_output):\n\n model.tft_layer = tf_transform_output.transform_features_layer()\n\n @tf.function\n def serve_tf_examples_fn(serialized_tf_examples):\n \"\"\"Returns the output to be used in the serving signature.\"\"\"\n feature_spec = tf_transform_output.raw_feature_spec()\n feature_spec.pop('income_bracket')\n parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)\n transformed_features = model.tft_layer(parsed_features)\n return model(transformed_features)\n\n return serve_tf_examples_fn", "def to_mlflow(\n self,\n tracking_uri: Optional[str] = None,\n experiment_id: Optional[int] = None,\n run_name: str = \"log_biometext_model\",\n input_example: Optional[Dict] = None,\n conda_env: Optional[Dict] = None,\n ) -> str:\n if tracking_uri:\n mlflow.set_tracking_uri(tracking_uri)\n\n # This conda environment is only needed when serving the model later on with `mlflow models serve`\n conda_env = conda_env or {\n \"name\": \"mlflow-dev\",\n \"channels\": [\"defaults\", \"conda-forge\"],\n \"dependencies\": [\n \"python=3.7.9\",\n \"pip>=20.3.0\",\n {\"pip\": [\"mlflow\", f\"biome-text=={__version__}\"]},\n ],\n }\n\n with tempfile.TemporaryDirectory() as tmpdir_name:\n file_path = Path(self.save(directory=tmpdir_name))\n\n with mlflow.start_run(\n experiment_id=experiment_id, run_name=run_name\n ) as run:\n mlflow.log_artifact(str(file_path), \"biometext_pipeline\")\n mlflow.pyfunc.log_model(\n artifact_path=\"mlflow_model\",\n python_model=BiomeTextModel(),\n artifacts={\n BiomeTextModel.ARTIFACT_CONTEXT: mlflow.get_artifact_uri(\n f\"biometext_pipeline/{file_path.name}\"\n )\n },\n input_example=input_example,\n conda_env=conda_env,\n )\n model_uri = os.path.join(run.info.artifact_uri, \"mlflow_model\")\n\n return model_uri", "def model_fn(model_dir):\n \n sym, arg_params, aux_params = mx.model.load_checkpoint('%s/102flowers' % model_dir, 0)\n mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)\n mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))], label_shapes=mod._label_shapes)\n mod.set_params(arg_params, aux_params, allow_missing=True)\n return mod", "def save_model(\n sktime_model,\n path,\n conda_env=None,\n code_paths=None,\n mlflow_model=None,\n signature=None,\n input_example=None,\n pip_requirements=None,\n extra_pip_requirements=None,\n serialization_format=SERIALIZATION_FORMAT_PICKLE,\n): # TODO: can we specify a type for fitted instance of sktime model below? # noqa: E501\n _check_soft_dependencies(\"mlflow\", severity=\"error\")\n from mlflow.exceptions import MlflowException\n from mlflow.models import Model\n from mlflow.models.model import MLMODEL_FILE_NAME\n from mlflow.models.utils import _save_example\n from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\n from mlflow.utils.environment import (\n _CONDA_ENV_FILE_NAME,\n _CONSTRAINTS_FILE_NAME,\n _PYTHON_ENV_FILE_NAME,\n _REQUIREMENTS_FILE_NAME,\n _process_conda_env,\n _process_pip_requirements,\n _PythonEnv,\n _validate_env_arguments,\n )\n from mlflow.utils.file_utils import write_to\n from mlflow.utils.model_utils import (\n _validate_and_copy_code_paths,\n _validate_and_prepare_target_save_path,\n )\n\n _validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)\n\n if serialization_format not in SUPPORTED_SERIALIZATION_FORMATS:\n raise MlflowException(\n message=(\n \"Unrecognized serialization format: {serialization_format}. \"\n \"Please specify one of the following supported formats: \"\n \"{supported_formats}.\".format(\n serialization_format=serialization_format,\n supported_formats=SUPPORTED_SERIALIZATION_FORMATS,\n )\n ),\n error_code=INVALID_PARAMETER_VALUE,\n )\n\n _validate_and_prepare_target_save_path(path)\n code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)\n\n if mlflow_model is None:\n mlflow_model = Model()\n if signature is not None:\n mlflow_model.signature = signature\n if input_example is not None:\n _save_example(mlflow_model, input_example, path)\n\n model_data_subpath = \"model.pkl\"\n model_data_path = os.path.join(path, model_data_subpath)\n _save_model(\n sktime_model, model_data_path, serialization_format=serialization_format\n )\n\n pyfunc.add_to_model(\n mlflow_model,\n loader_module=\"sktime.utils.mlflow_sktime\",\n model_path=model_data_subpath,\n conda_env=_CONDA_ENV_FILE_NAME,\n python_env=_PYTHON_ENV_FILE_NAME,\n code=code_dir_subpath,\n )\n\n mlflow_model.add_flavor(\n FLAVOR_NAME,\n pickled_model=model_data_subpath,\n sktime_version=sktime.__version__,\n serialization_format=serialization_format,\n code=code_dir_subpath,\n )\n mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))\n\n if conda_env is None:\n if pip_requirements is None:\n include_cloudpickle = (\n serialization_format == SERIALIZATION_FORMAT_CLOUDPICKLE\n )\n default_reqs = get_default_pip_requirements(include_cloudpickle)\n default_reqs = sorted(default_reqs)\n else:\n default_reqs = None\n conda_env, pip_requirements, pip_constraints = _process_pip_requirements(\n default_reqs, pip_requirements, extra_pip_requirements\n )\n else:\n conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)\n\n with open(os.path.join(path, _CONDA_ENV_FILE_NAME), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n if pip_constraints:\n write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), \"\\n\".join(pip_constraints))\n\n write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), \"\\n\".join(pip_requirements))\n\n _PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))", "def _get_serve_tf_examples_fn(model, label_column, tf_transform_output):\n\n model.tft_layer = tf_transform_output.transform_features_layer()\n\n @tf.function\n def serve_tf_examples_fn(serialized_tf_examples):\n \"\"\"Returns the output to be used in the serving signature.\"\"\"\n feature_spec = tf_transform_output.raw_feature_spec()\n feature_spec.pop(label_column)\n parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)\n transformed_features = model.tft_layer(parsed_features)\n return model(transformed_features)\n\n return serve_tf_examples_fn", "def _load_pyfunc(path):\n import tensorflow\n\n (\n tf_saved_model_dir,\n tf_meta_graph_tags,\n tf_signature_def_key,\n ) = _get_and_parse_flavor_configuration(model_path=path)\n\n loaded_model = tensorflow.saved_model.load( # pylint: disable=no-value-for-parameter\n export_dir=tf_saved_model_dir, tags=tf_meta_graph_tags\n )\n return _TF2Wrapper(model=loaded_model, infer=loaded_model.signatures[tf_signature_def_key])", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n print(f\"Parameters {fn_args}\")\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n batch_size=fn_args.train_batches)\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n batch_size=fn_args.eval_batches)\n\n # mirrored_strategy = tf.distribute.MirroredStrategy()\n # with mirrored_strategy.scope():\n model = encoder_decoder_model.build_keras_model(\n timesteps=fn_args.timesteps,\n number_features=fn_args.number_features,\n outer_units=fn_args.outer_units,\n inner_units=fn_args.inner_units)\n\n steps_per_epoch = fn_args.training_example_count / fn_args.train_batches\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard()\n\n model.fit(\n train_dataset,\n epochs=int(fn_args.train_steps / steps_per_epoch),\n steps_per_epoch=steps_per_epoch,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default': _get_serve_tf_examples_fn(\n model, tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n\n model.save(\n fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def _load_pyfunc(path): # noqa: E501\n _check_soft_dependencies(\"mlflow\", severity=\"error\")\n from mlflow.exceptions import MlflowException\n from mlflow.utils.model_utils import _get_flavor_configuration\n\n if os.path.isfile(path):\n serialization_format = SERIALIZATION_FORMAT_PICKLE\n _logger.warning(\n \"Loading procedure in older versions of MLflow using pickle.load()\"\n )\n else:\n try:\n sktime_flavor_conf = _get_flavor_configuration(\n model_path=path, flavor_name=FLAVOR_NAME\n )\n serialization_format = sktime_flavor_conf.get(\n \"serialization_format\", SERIALIZATION_FORMAT_PICKLE\n )\n except MlflowException:\n _logger.warning(\n \"Could not find sktime flavor configuration during model \"\n \"loading process. Assuming 'pickle' serialization format.\"\n )\n serialization_format = SERIALIZATION_FORMAT_PICKLE\n\n pyfunc_flavor_conf = _get_flavor_configuration(\n model_path=path, flavor_name=pyfunc.FLAVOR_NAME\n )\n path = os.path.join(path, pyfunc_flavor_conf[\"model_path\"])\n\n return _SktimeModelWrapper(\n _load_model(path, serialization_format=serialization_format)\n )", "def save_model(script_name, feature_set, model_fname):\n import requests\n import json\n from urllib.parse import urljoin\n\n model_payload = {\n \"model\": {\n \"name\": script_name,\n \"model\": {\n \"type\": \"model/ranklib\",\n \"definition\": {\n }\n }\n }\n }\n\n with open(model_fname) as modelFile:\n model_content = modelFile.read()\n path = \"_ltr/_featureset/%s/_createmodel\" % feature_set\n full_path = urljoin(ES_HOST, path)\n print(\"full_path\", full_path)\n model_payload['model']['model']['definition'] = model_content\n Logger.logger.info(\"POST %s\" % full_path)\n head = {'Content-Type': 'application/json'}\n resp = requests.post(full_path, data=json.dumps(model_payload), auth = HTTPBasicAuth(ES_User,ES_Passw),headers=head,verify=False)\n Logger.logger.info(resp.status_code)\n if resp.status_code >= 300:\n Logger.logger.error(resp.text)", "def _write_v2_saved_model(tf_function: function.Function, name: str,\n saved_model_dir: str) -> function.ConcreteFunction:\n module = tf.Module()\n\n resource_tracker = tracking.ResourceTracker()\n created_variables = []\n\n def _variable_creator(next_creator, **kwargs):\n var = next_creator(**kwargs)\n created_variables.append(var)\n return var\n\n # TODO(b/164921571): Handle generic Trackable objects.\n # Trace `tf_function` to gather any resources in it using the\n # resource_tracker. These are then assigned to `module.resources` and tracked\n # before exporting to SavedModel.\n with tracking.resource_tracker_scope(\n resource_tracker), tf.variable_creator_scope(_variable_creator):\n concrete_fn = tf_function.get_concrete_function()\n\n # Prior to 2020/10/08, saving a tf.function with a concrete function signature\n # would ensure that the function was not re-traced in a round-trip to a\n # SavedModel. Since this is no longer the case, we save the concrete function\n # directly.\n if tf.compat.forward_compatible(2020, 10, 8):\n pruned_function = _optimize_concrete_function(concrete_fn)\n module.pruned_variables = pruned_function.variables\n setattr(module, name, pruned_function)\n else:\n setattr(module, name, tf_function)\n\n # Any variables created need to be explicitly tracked.\n module.created_variables = created_variables\n # Resources need to be explicitly tracked.\n module.resources = resource_tracker.resources\n # TODO(b/158011374) - Stop explicitly tracking initializers. Tracking the\n # table should be sufficient.\n initializers = []\n for resource in module.resources:\n if isinstance(resource, lookup_ops.InitializableLookupTableBase):\n initializers.append(resource._initializer) # pylint: disable=protected-access\n module.initializers = initializers\n module.assets = [\n common_types.Asset(asset_filepath) for asset_filepath in\n concrete_fn.graph.get_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS)\n ]\n tf.saved_model.save(module, saved_model_dir)\n return concrete_fn", "def save_model(self, directory):\n def serving_input_fn():\n label_ids = tf.placeholder(tf.int32, [None], name='label_ids')\n input_ids = tf.placeholder(tf.int32, [None, self.max_seq_len], name='input_ids')\n input_mask = tf.placeholder(tf.int32, [None, self.max_seq_len], name='input_mask')\n segment_ids = tf.placeholder(tf.int32, [None, self.max_seq_len], name='segment_ids')\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'label_ids': label_ids,\n 'input_ids': input_ids,\n 'input_mask': input_mask,\n 'segment_ids': segment_ids,\n })()\n return input_fn\n\n self.model._export_to_tpu = False # this is important\n self.model.export_savedmodel(directory, serving_input_fn)", "def predict(model_path, run_id, input_path, output_path, no_conda):\n if run_id:\n model_path = _get_model_log_dir(model_path, run_id)\n\n model_env_file = _load_model_env(model_path)\n if not no_conda and model_env_file is not None:\n conda_env_path = os.path.join(model_path, model_env_file)\n return _rerun_in_conda(conda_env_path)\n\n model = load_pyfunc(model_path)\n df = pandas.read_csv(input_path)\n result = model.predict(df)\n out_stream = sys.stdout\n if output_path:\n out_stream = open(output_path, 'w')\n pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False)", "def export_model(model, model_type, export_dir, model_column_fn):\n wide_columns, deep_columns = model_column_fn()\n if model_type == 'wide':\n columns = wide_columns\n elif model_type == 'deep':\n columns = deep_columns\n else:\n columns = wide_columns + deep_columns\n feature_spec = tf.feature_column.make_parse_example_spec(columns)\n example_input_fn = (\n tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec))\n model.export_savedmodel(export_dir, example_input_fn,\n strip_default_attrs=True)", "def model_fn(model_dir):\n\n net = gluon.nn.SymbolBlock.imports('%s/model.json' % model_dir,\n ['data'], \n param_file='%s/model.params' % model_dir,\n ctx=mx.cpu())\n\n return net", "def upload(model_file):\n model_endpoint = os.path.join(SKETCHFAB_API_URL, 'models')\n\n # Optional parameters\n name = 'vid2meshtest'\n description = 'Uploaded by 4D-Modeling:'\n tags = ['4D-modeling']\n\n data = {\n 'name': name,\n 'description': description,\n 'tags': tags\n }\n\n f = open(model_file, 'rb')\n\n files = {'modelFile': f}\n\n print\n 'Uploading ...'\n\n try:\n r = requests.post(\n model_endpoint, **_get_request_payload(\n data, files=files))\n except requests.exceptions.RequestException as e:\n print\n u'An error occured: {}'.format(e)\n return\n finally:\n f.close()\n\n if r.status_code != requests.codes.created:\n print\n u'Upload failed with error: {}'.format(r.json())\n return\n\n # Should be https://api.sketchfab.com/v3/models/XXXX\n model_url = r.headers['Location']\n print\n 'Upload successful. Your model is being processed.'\n print\n 'Once the processing is done, the model will be available at: {}'.format(\n model_url)\n\n return model_url", "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)", "def _get_serve_image_fn(model):\n\n @tf.function\n def serve_image_fn(image_tensor):\n \"\"\"Returns the output to be used in the serving signature.\n \n Args:\n image_tensor: A tensor represeting input image. The image should have 3\n channels.\n \n Returns:\n The model's predicton on input image tensor\n \"\"\"\n return model(image_tensor)\n\n return serve_image_fn", "def save_model(self, model_path: str):", "def load_pyfunc(path, run_id=None, suppress_warnings=False):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf = _load_model_conf(path)\n model_py_version = conf.get(PY_VERSION)\n if not suppress_warnings:\n _warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)\n if CODE in conf and conf[CODE]:\n code_path = os.path.join(path, conf[CODE])\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path\n return importlib.import_module(conf[MAIN]).load_pyfunc(data_path)", "def get_function(model_or_function, preprocess_function=None):\n from dianna.utils.onnx_runner import SimpleModelRunner # pylint: disable=import-outside-toplevel\n\n if isinstance(model_or_function, Path):\n model_or_function = str(model_or_function)\n\n if isinstance(model_or_function, (str, bytes, Path)):\n runner = SimpleModelRunner(model_or_function,\n preprocess_function=preprocess_function)\n elif callable(model_or_function):\n if preprocess_function is None:\n runner = model_or_function\n else:\n\n def runner(input_data):\n return model_or_function(preprocess_function(input_data))\n else:\n raise TypeError(\n 'model_or_function argument must be string (path to model), '\n 'bytes (serialized onnx model), or function')\n return runner", "def _make_serving_input_fn(tft_working_dir):\n\n def input_fn():\n \"\"\"Serving input function that reads raw data and applies transforms.\"\"\"\n raw_placeholder_spec = RAW_DATA_METADATA.schema.as_batched_placeholders()\n # remove label key that is not going to be available at seving\n raw_placeholder_spec.pop(LABEL_KEY)\n\n # we are defining the feature_column (raw_featutes) and the tensor\n # (receiver_tensors) for the raw data\n raw_input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(\n raw_placeholder_spec)\n raw_features, receiver_tensors , _ = raw_input_fn()\n\n # we are tranforming the raw_features with the graph written by\n # preprocess.py to transform_fn_io.TRANSFORM_FN_DIR and that was used to\n # write the tf records. This helps avoiding training/serving skew\n\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n os.path.join(tft_working_dir, transform_fn_io.TRANSFORM_FN_DIR),\n raw_features))\n\n return tf.estimator.export.ServingInputReceiver(\n transformed_features, receiver_tensors)\n\n return input_fn", "def log_model_without_starting_new_run():\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )", "def to_savedmodel(keras_model, export_path):\n try:\n builder = saved_model_builder.SavedModelBuilder(export_path)\n signature = predict_signature_def(\n inputs={'input': keras_model.inputs[0]},\n outputs={'output': keras_model.outputs[0]}\n )\n\n with K.get_session() as sess:\n builder.add_meta_graph_and_variables(\n sess=sess,\n tags=[tag_constants.SERVING],\n signature_def_map={\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature\n }\n )\n builder.save()\n except Exception:\n error_client = error_reporting.Client()\n error_client.report_exception()", "def main(opt):\n weights_path: Path = Path(\"../weights\")\n model_path = weights_path / opt.model_path\n if not model_path.exists():\n raise ValueError(f\"Invalid model path: {model_path}\")\n\n print(f\"Loading keras model: '{model_path}'\")\n keras_model = SqueezeNet()\n keras_model.load_weights(model_path)\n output_file = get_tf_filename(str(model_path))\n keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=output_file)\n print(\"MODEL SAVED\")", "def to_savedmodel(model, export_path):\n if export_path.startswith('gs://'):\n _save_oncloud(model, export_path)\n else:\n ### Allow overwriting of export_path if it already exists by removing it first..\n if file_io.file_exists(export_path):\n file_io.delete_recursively(export_path)\n\n builder = saved_model_builder.SavedModelBuilder(export_path)\n\n signature = predict_signature_def(\n inputs={'input': model.inputs[0]}, outputs={'income': model.outputs[0]})\n\n with K.get_session() as sess:\n builder.add_meta_graph_and_variables(\n sess=sess,\n tags=[tag_constants.SERVING],\n signature_def_map={\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature\n })\n builder.save()", "def preload_model(\n mlflow_tracking_uri: str,\n experiment_name: str,\n run_id: str,\n):\n\n mlflow.set_tracking_uri(mlflow_tracking_uri)\n mlflow.set_experiment(experiment_name)\n experiment_details = mlflow.get_experiment_by_name(experiment_name)\n\n mlflow.end_run()\n mlflow.start_run(run_id=run_id)\n\n # pull model from tracking uri\n artifact_loc = (\n str(experiment_details.artifact_location)\n .replace(\"file:\", \"\")\n .replace(\"///\", \"\")\n )\n loc_prefix = \"\"\n if \"P1-AnalyzeTrades\" not in os.getcwd():\n loc_prefix = r\"P1-AnalyzeTrades/\"\n\n metrics, params, tags = parse_mlflow_info(mlflow.get_run(run_id))\n\n model_type = get_model_type(tags)\n\n if model_type == \"sklearn\":\n try: # first try local path]\n mdl = pickle.load(\n open(f\"{artifact_loc}/{run_id}/artifacts/model/model.pkl\", \"rb\")\n )\n except: # then try repo specific path for finalized cases\n mdl = pickle.load(\n open(f\"{loc_prefix}mlruns/0/{run_id}/artifacts/model/model.pkl\", \"rb\")\n )\n else:\n # for h2o models\n h2o.init()\n try:\n logged_model = f\"runs:/{run_id}/model\"\n # logged_model = f'mlruns/0/{run_id}/artifacts/model'\n mdl = mlflow.pyfunc.load_model(logged_model)\n\n # mojo deprecated\n # mdl = h2o.import_mojo(f'{artifact_loc}/{run_id}/artifacts/')\n except:\n logged_model = f\"{loc_prefix}mlruns/0/{run_id}/artifacts/model\"\n mdl = mlflow.pyfunc.load_model(logged_model)\n\n mlflow.end_run()\n\n # load cat dict, if available\n cat_dict = {}\n cat_dict_loc = f\"{artifact_loc}/{run_id}/artifacts/cat_dict.pkl\"\n if os.path.exists(cat_dict_loc):\n cat_dict = pickle.load(open(cat_dict_loc, \"rb\"))\n else: # then try repo specific path for finalized cases\n cat_dict_loc = f\"{loc_prefix}mlruns/0/{run_id}/artifacts/cat_dict.pkl\"\n if os.path.exists(cat_dict_loc):\n cat_dict = pickle.load(open(cat_dict_loc, \"rb\"))\n\n return mdl, cat_dict", "def export(self,\n output_dir: Text,\n tflite_path: Text = None,\n tensorrt: Text = None):\n signitures = self.signitures\n signature_def_map = {\n 'serving_default':\n tf.saved_model.predict_signature_def(\n {signitures['image_arrays'].name: signitures['image_arrays']},\n {signitures['prediction'].name: signitures['prediction']}),\n }\n b = tf.saved_model.Builder(output_dir)\n b.add_meta_graph_and_variables(\n self.sess,\n tags=['serve'],\n signature_def_map=signature_def_map,\n assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),\n clear_devices=True)\n b.save()\n logging.info('Model saved at %s', output_dir)\n\n # also save freeze pb file.\n graphdef = self.freeze()\n pb_path = os.path.join(output_dir, self.model_name + '_frozen.pb')\n tf.io.gfile.GFile(pb_path, 'wb').write(graphdef.SerializeToString())\n logging.info('Frozen graph saved at %s', pb_path)\n\n if tflite_path:\n height, width = utils.parse_image_size(self.params['image_size'])\n input_name = signitures['image_arrays'].op.name\n input_shapes = {input_name: [None, height, width, 3]}\n converter = tf.lite.TFLiteConverter.from_saved_model(\n output_dir,\n input_arrays=[input_name],\n input_shapes=input_shapes,\n output_arrays=[signitures['prediction'].op.name])\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]\n tflite_model = converter.convert()\n\n tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)\n logging.info('TFLite is saved at %s', tflite_path)\n\n if tensorrt:\n from tensorflow.python.compiler.tensorrt import trt # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\n trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())\n trt.create_inference_graph(\n None,\n None,\n precision_mode=tensorrt,\n input_saved_model_dir=output_dir,\n output_saved_model_dir=trt_path,\n session_config=sess_config)\n logging.info('TensorRT model is saved at %s', trt_path)", "def save_model(program, model_path):\n fluid.save(program, model_path)\n logger.info(\"Already save model in {}\".format(model_path))", "def save_model(model, model_filepath):", "def run_model(project=None, model=None, raw=None, dyr=None, xls=None, path=None, server='tcp://127.0.0.1:5678'):\n ret = 0\n if (not project) or (not model):\n logging.error('RT-LAB project or model undefined.')\n sys.exit(-1)\n if (not raw) and (not xls):\n logging.error('PSS/E raw file or ePHASORsim Excel file undefined.')\n sys.exit(-1)\n if not dyr:\n logging.debug('PSS/E dyr file not specified')\n\n sim = SimControl(project, model, path)\n\n simulink = os.path.join(path,project, 'simulink')\n models = os.path.join(path,project, 'models')\n if not os.path.isdir(simulink):\n logging.error('No <{}> directory found.'.format(simulink))\n if not os.path.isdir(models):\n logging.error('No <{}> directory found.'.format(models))\n sys.exit(1)\n else:\n logging.info('Using <{}> directory'.format(models))\n modelPath = models\n else:\n logging.info('Using <{}> directory'.format(simulink))\n modelPath = simulink\n\n\n sim_data = LTBSetup(raw=raw, dyr=dyr, xls=xls, path=modelPath, model=model, simObject=sim)\n\n streaming = Streaming(name='sim', server=server, ltb_data=sim_data)\n\n sim.open()\n sim.load()\n\n sim_data.get_sysparam()\n sim_data.get_varheader_idxvgs()\n sim.set_settings(sim_data.Settings)\n # sim_data.Idxvgs['Line'].update(sim.add_branch_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Line']))\n # sim_data.Idxvgs['Bus'].update(sim.add_bus_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Bus']))\n streaming.send_init()\n logging.debug('Varheader, SysParam and Idxvgs sent.')\n sleep(0.5)\n\n sim.start()\n\n streaming.run()", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def spark_udf(spark, path, run_id=None, result_type=\"double\"):\n\n # Scope Spark import to this method so users don't need pyspark to use non-Spark-related\n # functionality.\n from mlflow.pyfunc.spark_model_cache import SparkModelCache\n from pyspark.sql.functions import pandas_udf\n\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n\n archive_path = SparkModelCache.add_local_model(spark, path)\n\n def predict(*args):\n model = SparkModelCache.get_or_load(archive_path)\n schema = {str(i): arg for i, arg in enumerate(args)}\n # Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2)\n columns = [str(i) for i, _ in enumerate(args)]\n pdf = pandas.DataFrame(schema, columns=columns)\n result = model.predict(pdf)\n return pandas.Series(result)\n\n return pandas_udf(predict, result_type)", "def run_flow(flow_id):\n if flow_id == 1:\n etl.load_images_from_urls()\n elif flow_id == 2:\n etl.find_edges_and_save()\n elif flow_id == 3:\n etl.normalize_dataset()\n elif flow_id == 4:\n classifiers.run_models_comparison()", "def generate_model_fn(mode_feature_cols_map):\n def model_fn(features, labels, mode, params=None, config=None):\n if params is None:\n params = tf.contrib.training.HParams(learning_rate=0.01)\n\n # Extract the id tensor from the input features if it exists in the\n # feature_columns\n id_tensor = None\n if 'id' in features:\n id_tensor = features.pop('id')\n\n # Feature columns for given mode\n feature_cols = mode_feature_cols_map[mode]\n\n # Tensor of logits formed from input features\n logits = tf.feature_column.linear_model(features, feature_cols)\n\n # Apply the logistic function to the logits defined above\n # This is our classifier\n logistic = tf.sigmoid(logits, name='logistic')\n\n classifier_output = {\n 'clicked': logistic\n }\n\n if id_tensor is not None:\n classifier_output['id'] = tf.identity(id_tensor)\n\n loss = None\n train_op = None\n\n if mode in (MODES.TRAIN, MODES.EVAL):\n loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=logits, labels=labels, name='loss')\n )\n\n if mode == MODES.TRAIN:\n global_step = tf.train.get_or_create_global_step()\n train_op = tf.train.GradientDescentOptimizer(\n learning_rate=params.learning_rate\n ).minimize(loss, global_step=global_step)\n\n eval_metric_ops = None\n\n if mode == MODES.EVAL:\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(labels, logistic)}\n\n # Define serving signatures\n prediction_output = tf.estimator.export.PredictOutput(\n classifier_output)\n\n export_outputs = {\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n prediction_output\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=classifier_output,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=export_outputs\n )\n\n return model_fn", "def embed_functions(path_to_output, path_to_models, path_to_histories=None, autoencoder=False,\n dimension=256, epochs=150, batch_size=2048):\n\n # Get functional signatures\n output_tensor = np.load(path_to_output, mmap_mode='r')\n\n # Number of unique functions\n num_fun = output_tensor.shape[-1]\n\n # Define model parameters\n if autoencoder:\n output_tensor = output_tensor/ np.max(output_tensor, axis=-1)\n input_tensor = output_tensor\n activation = 'sigmoid'\n loss = 'mse'\n else:\n input_tensor = np.identity(num_fun)\n activation = 'softmax'\n loss = 'categorical_crossentropy'\n\n # Embedding model\n model_input = Input(shape=(num_fun,), name='input')\n embedding = Dense(dimension, kernel_initializer=he_uniform(), activation='tanh', name='embedding')(model_input)\n model_output = Dense(num_fun, kernel_initializer=he_uniform(), activation=activation, name='output')(embedding)\n model = Model(model_input, model_output, name='encoder_decoder')\n model.summary()\n\n # Compile model\n model.compile(optimizer=RMSprop(decay=1e-8), loss=loss, metrics=['accuracy'])\n\n # Train model\n history = model.fit(input_tensor, output_tensor, epochs=epochs, batch_size=batch_size, shuffle=True)\n\n # Save history\n if path_to_histories is not None:\n with open(os.path.join(path_to_histories, 'embedding_model_history.pickle'), 'wb') as dictionary:\n pickle.dump(history.history, dictionary, protocol=pickle.HIGHEST_PROTOCOL)\n\n # Save trained model\n model.save(os.path.join(path_to_models, 'embedding_model.h5'))", "def save_model(self, path):\n # Save server model\n self.server_model.set_params(self.model)\n model_sess = self.server_model.sess\n return self.server_model.saver.save(model_sess, path)", "def run_model(model):\n\n model.create_initialised_input()\n\n model.run_from_buffer()\n\n output = model.output_parse()\n return output", "def save_model(\n tf_saved_model_dir,\n tf_meta_graph_tags,\n tf_signature_def_key,\n path,\n mlflow_model=None,\n conda_env=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n pip_requirements=None,\n extra_pip_requirements=None,\n):\n _validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)\n\n _logger.info(\n \"Validating the specified TensorFlow model by attempting to load it in a new TensorFlow\"\n \" graph...\"\n )\n _validate_saved_model(\n tf_saved_model_dir=tf_saved_model_dir,\n tf_meta_graph_tags=tf_meta_graph_tags,\n tf_signature_def_key=tf_signature_def_key,\n )\n _logger.info(\"Validation succeeded!\")\n\n if os.path.exists(path):\n raise MlflowException(\"Path '{}' already exists\".format(path), DIRECTORY_NOT_EMPTY)\n os.makedirs(path)\n if mlflow_model is None:\n mlflow_model = Model()\n if signature is not None:\n mlflow_model.signature = signature\n if input_example is not None:\n _save_example(mlflow_model, input_example, path)\n root_relative_path = _copy_file_or_tree(src=tf_saved_model_dir, dst=path, dst_dir=None)\n model_dir_subpath = \"tfmodel\"\n model_dir_path = os.path.join(path, model_dir_subpath)\n shutil.move(os.path.join(path, root_relative_path), model_dir_path)\n\n flavor_conf = dict(\n saved_model_dir=model_dir_subpath,\n meta_graph_tags=tf_meta_graph_tags,\n signature_def_key=tf_signature_def_key,\n )\n mlflow_model.add_flavor(FLAVOR_NAME, **flavor_conf)\n pyfunc.add_to_model(mlflow_model, loader_module=\"mlflow.tensorflow\", env=_CONDA_ENV_FILE_NAME)\n mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))\n\n if conda_env is None:\n if pip_requirements is None:\n default_reqs = get_default_pip_requirements()\n # To ensure `_load_pyfunc` can successfully load the model during the dependency\n # inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.\n inferred_reqs = mlflow.models.infer_pip_requirements(\n path,\n FLAVOR_NAME,\n fallback=default_reqs,\n )\n default_reqs = sorted(set(inferred_reqs).union(default_reqs))\n else:\n default_reqs = None\n conda_env, pip_requirements, pip_constraints = _process_pip_requirements(\n default_reqs,\n pip_requirements,\n extra_pip_requirements,\n )\n else:\n conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)\n\n with open(os.path.join(path, _CONDA_ENV_FILE_NAME), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n # Save `constraints.txt` if necessary\n if pip_constraints:\n write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), \"\\n\".join(pip_constraints))\n\n # Save `requirements.txt`\n write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), \"\\n\".join(pip_requirements))", "def save_model_to_file(self, fn):\n js = json.loads(self.model.to_json())\n\n # Add this model's params\n js[\"rnn\"] = self.to_json()\n with open(fn, 'w') as fout:\n json.dump(js, fout)\n with open(os.path.join(self.model_dir, \"model.summary\"),'w') as summaryFile:\n self.model.summary(print_fn = lambda s: print(s, file=summaryFile))", "def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function", "def save_model(self,\n file_path: str = \"models/world_model.h5\"\n ):\n self.model.save(file_path)", "def run_model(training_data_path, test_data_path, results_path, similarity_score, save_model, new_model_running,\n algorithm_path, threshold, features_list, target_features_list, train_scaler_path, target_scaler_path,\n event):\n\n clear_session()\n\n grid_dictionary = get_lstm_grid_params()\n # Choose between new model creation flow and load existing model flow\n if new_model_running:\n window_size, encoding_dimension, activation, loss, optimizer, threshold, epochs = get_lstm_new_model_parameters()\n else:\n lstm = load_model(algorithm_path)\n window_size = lstm.get_input_shape_at(0)[1]\n X_train_scaler = pickle.load(open(train_scaler_path, 'rb'))\n Y_train_scaler = pickle.load(open(target_scaler_path, 'rb'))\n X_train = None\n Y_train = None\n\n FLIGHT_ROUTES = get_subdirectories(test_data_path)\n\n current_time = get_current_time()\n\n current_time_path = os.path.join(*[str(results_path), 'lstm', str(current_time)])\n create_directories(f\"{current_time_path}\")\n\n # Create sub directories for each similarity function\n for similarity in similarity_score:\n similarity_path = os.path.join(*[str(current_time_path), str(similarity)])\n create_directories(f\"{similarity_path}\")\n\n # Train the model for each flight route\n for flight_route in FLIGHT_ROUTES:\n\n # Execute training for new model flow\n if new_model_running:\n lstm, X_train_scaler, Y_train_scaler, X_train, Y_train = execute_train(flight_route,\n training_data_path=training_data_path,\n results_path=f\"{current_time_path}\",\n window_size=window_size,\n encoding_dimension=encoding_dimension,\n activation=activation,\n loss=loss,\n optimizer=optimizer,\n add_plots=True,\n features_list=features_list,\n epochs=epochs,\n target_features_list=target_features_list,\n event=event)\n\n # Get results for each similarity function\n for similarity in similarity_score:\n current_results_path = os.path.join(*[str(current_time_path), str(similarity), str(flight_route)])\n create_directories(f\"{current_results_path}\")\n tpr_scores, fpr_scores, acc_scores, delay_scores, routes_duration, attack_duration, auc_scores, best_params = execute_predict(\n flight_route,\n test_data_path=test_data_path,\n similarity_score=similarity,\n window_size=window_size,\n threshold=threshold,\n lstm=lstm,\n X_train_scaler=X_train_scaler,\n results_path=current_results_path,\n add_plots=True,\n run_new_model=new_model_running,\n X_train=X_train,\n features_list=features_list,\n target_features_list=target_features_list,\n save_model=save_model,\n Y_train_scaler=Y_train_scaler,\n Y_train=Y_train,\n event=event,\n grid_dictionary=grid_dictionary\n )\n\n df = pd.DataFrame(tpr_scores)\n tpr_path = os.path.join(*[str(current_results_path), str(flight_route) + '_tpr.csv'])\n df.to_csv(f\"{tpr_path}\", index=False)\n\n df = pd.DataFrame(fpr_scores)\n fpr_path = os.path.join(*[str(current_results_path), str(flight_route) + '_fpr.csv'])\n df.to_csv(f\"{fpr_path}\", index=False)\n\n df = pd.DataFrame(acc_scores)\n acc_path = os.path.join(*[str(current_results_path), str(flight_route) + '_acc.csv'])\n df.to_csv(f\"{acc_path}\", index=False)\n\n df = pd.DataFrame(delay_scores)\n delay_path = os.path.join(*[str(current_results_path), str(flight_route) + '_delay.csv'])\n df.to_csv(f\"{delay_path}\", index=False)\n\n df = pd.DataFrame(auc_scores)\n auc_path = os.path.join(*[str(current_results_path), str(flight_route) + '_auc.csv'])\n df.to_csv(f\"{auc_path}\", index=False)\n\n df = pd.DataFrame(best_params)\n best_params_path = os.path.join(*[str(current_results_path), str(flight_route) + '_params.csv'])\n df.to_csv(f\"{best_params_path}\", index=False)\n\n algorithm_name = \"LSTM\"\n\n # Report results for training data to csv files\n for similarity in similarity_score:\n report_similarity_path = os.path.join(*[str(results_path), 'lstm', str(current_time), str(similarity)])\n report_results(f\"{report_similarity_path}\",\n test_data_path,\n FLIGHT_ROUTES,\n algorithm_name,\n similarity,\n routes_duration,\n attack_duration)", "def model_fn(model_dir):\n with open(os.path.join(model_dir, 'model.pkl'), 'rb') as pickle_file:\n model = pickle.load(pickle_file)\n return model", "def download_model_from_url(\n url: str = typer.Argument(..., help='The link to a model'),\n path: Path = typer.Argument(..., file_okay=True, help='The saved path and file name.')\n):\n\n from modelci.hub.registrar import download_model_from_url\n\n download_model_from_url(url, path)\n typer.echo(f'{path} model downloaded successfully.')", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def save(model: nn.Module, path):\n save_model(model, path)", "def save_model(self, fpath):\n self._make_model_folder(fpath)\n self.model.save(os.path.join(fpath, U.MODEL_NAME), save_format=\"h5\")\n return", "def save_model(self, model_path):\n MODEL_FILE = 'simple_model.dat'\n\n model = {\n 'm': self.m.tolist(),\n 'b': self.b,\n 'FEAST_CORE_URL': FEAST_CORE_URL,\n 'FEAST_SERVING_URL': FEAST_SERVING_URL,\n 'ENTITY_ID': ENTITY_ID,\n }\n \n logging.info('Saving model to %s', model_path)\n\n with open(model_path, 'w+') as f:\n json.dump(model, f)", "def save_model_to_file(self, fn):\n js = json.loads(self.model.to_json())\n\n # Add this model's params\n js[\"rnn\"] = self.to_json()\n with open(fn, 'w') as fout:\n json.dump(js, fout)", "def save_model(model, output):\n\n # model.save(os.path.join(output))\n tf.saved_model.save(model, os.path.join(output, \"1\"))\n\n # tf.saved_model.save(model, os.path.join(output, \"1\"))\n print(\"Model successfully saved at: {}\".format(output))", "def train(self, model_id: str, service_config=None):\n payload = dict()\n if service_config is not None:\n payload['serviceConfig'] = service_config\n success, response = self._client_api.gen_request(req_type=\"post\",\n path=f\"/ml/models/{model_id}/train\",\n json_req=payload)\n if not success:\n raise exceptions.PlatformException(response)\n return entities.Execution.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project)", "def serve_image_fn(image_tensor):\n return model(image_tensor)", "def save(path_to_model):\n pass", "def save_model(model, model_path) -> None:\n try:\n createFolder(model_path)\n exportJSON(model.to_json(), model_path+'/model.json')\n model.save_weights(model_path+'/model_weights.h5')\n except Exception as error:\n print(f\"Error: save_model(model, {model_path}) -> {error}\")", "def run_model (arguments):\n if arguments.train is not None:\n # Train a new model, optionally with a certain number of epochs\n predictor = None\n if len(arguments.train) > 0:\n predictor = train(n_epochs=arguments.train[0])\n else:\n predictor = train()\n # Afterwards save it\n now = datetime.now(timezone.utc)\n predictor.to_disk(fname=f\"model_parameters_{now.strftime('%Y%m%d%H%M%S')}\")\n elif arguments.export_embeddings:\n # Load the saved predictor ...\n predictor = Predictor.from_file()\n # ... and then dump the models to disk.\n predictor.subj.export_embeddings(\"subject\")\n predictor.obj.export_embeddings(\"object\")\n print(\"Models are saved to output directory for loading with http://projector.tensorflow.org/.\")\n elif arguments.console:\n # Opens a console for prediction without training\n predictor = Predictor.from_file()\n tinker(predictor)", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def tflite_function(self):\n with self.enable_tflite_mode():\n return self.serve_function()", "def evaluate(self, model_id: str, dataset_id: str, filters: entities.Filters = None, service_config=None):\n\n payload = {'input': {'datasetId': dataset_id}}\n if service_config is not None:\n payload['config'] = {'serviceConfig': service_config}\n if filters is None:\n filters = entities.Filters()\n if filters is not None:\n payload['input']['datasetQuery'] = filters.prepare()\n success, response = self._client_api.gen_request(req_type=\"post\",\n path=f\"/ml/models/{model_id}/evaluate\",\n json_req=payload)\n if not success:\n raise exceptions.PlatformException(response)\n return entities.Execution.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project)", "def save_model(model, model_filepath):\n\n logging.info(\"run save_model\")\n\n # save model with jolib library\n joblib.dump(model, model_filepath)", "def fsl_run_level_wf(\n model,\n step,\n bids_dir,\n output_dir,\n work_dir,\n subject_id,\n database_path,\n smoothing_fwhm=None,\n smoothing_level=None,\n smoothing_type=None,\n use_rapidart=False,\n detrend_poly=None,\n align_volumes=None,\n smooth_autocorrelations=False,\n despike=False,\n name=\"fsl_run_level_wf\",\n):\n bids_dir = Path(bids_dir)\n work_dir = Path(work_dir)\n workflow = pe.Workflow(name=name)\n\n level = step[\"Level\"]\n\n dimensionality = 3 # Nipype FSL.SUSAN Default\n if smoothing_type == \"inp\":\n dimensionality = 2\n\n workflow.__desc__ = \"\"\n (work_dir / model[\"Name\"]).mkdir(exist_ok=True)\n\n include_entities = {}\n if \"Input\" in model:\n if \"Include\" in model[\"Input\"]:\n include_entities = model[\"Input\"][\"Include\"]\n include_entities.update({\"subject\": subject_id})\n\n getter = pe.Node(\n BIDSGet(\n database_path=database_path,\n fixed_entities=include_entities,\n align_volumes=align_volumes,\n ),\n name=\"func_select\",\n )\n\n get_info = pe.MapNode(\n GetRunModelInfo(model=step, detrend_poly=detrend_poly),\n iterfield=[\"metadata_file\", \"regressor_file\", \"events_file\", \"entities\"],\n name=f\"get_{level}_info\",\n )\n\n despiker = pe.MapNode(\n afni.Despike(outputtype=\"NIFTI_GZ\"), iterfield=[\"in_file\"], name=\"despiker\",\n )\n\n realign_runs = pe.MapNode(\n fsl.MCFLIRT(output_type=\"NIFTI_GZ\", interpolation=\"sinc\"),\n iterfield=[\"in_file\", \"ref_file\"],\n name=\"func_realign\",\n )\n\n wrangle_volumes = pe.MapNode(\n IdentityInterface(fields=[\"functional_file\"]),\n iterfield=[\"functional_file\"],\n name=\"wrangle_volumes\",\n )\n\n specify_model = pe.MapNode(\n modelgen.SpecifyModel(high_pass_filter_cutoff=-1.0, input_units=\"secs\"),\n iterfield=[\"functional_runs\", \"subject_info\", \"time_repetition\"],\n name=f\"model_{level}_specify\",\n )\n\n fit_model = pe.MapNode(\n IdentityInterface(\n fields=[\"session_info\", \"interscan_interval\", \"contrasts\", \"functional_data\"],\n mandatory_inputs=True,\n ),\n iterfield=[\"functional_data\", \"session_info\", \"interscan_interval\", \"contrasts\"],\n name=f\"model_{level}_fit\",\n )\n\n first_level_design = pe.MapNode(\n fsl.Level1Design(bases={\"dgamma\": {\"derivs\": False}}, model_serial_correlations=False,),\n iterfield=[\"session_info\", \"interscan_interval\", \"contrasts\"],\n name=f\"model_{level}_design\",\n )\n\n generate_model = pe.MapNode(\n fsl.FEATModel(output_type=\"NIFTI_GZ\"),\n iterfield=[\"fsf_file\", \"ev_files\"],\n name=f\"model_{level}_generate\",\n )\n\n estimate_model = pe.MapNode(\n fsl.FILMGLS(\n threshold=0.0, # smooth_autocorr=True\n output_type=\"NIFTI_GZ\",\n results_dir=\"results\",\n smooth_autocorr=False,\n autocorr_noestimate=True,\n ),\n iterfield=[\"design_file\", \"in_file\", \"tcon_file\"],\n name=f\"model_{level}_estimate\",\n )\n\n if smooth_autocorrelations:\n first_level_design.inputs.model_serial_correlations = True\n estimate_model.inputs.smooth_autocorr = True\n estimate_model.inputs.autocorr_noestimate = False\n\n calculate_p = pe.MapNode(\n fsl.ImageMaths(output_type=\"NIFTI_GZ\", op_string=\"-ztop\", suffix=\"_pval\"),\n iterfield=[\"in_file\"],\n name=f\"model_{level}_caculate_p\",\n )\n\n image_pattern = (\n \"[sub-{subject}/][ses-{session}/]\"\n \"[sub-{subject}_][ses-{session}_]\"\n \"task-{task}_[acq-{acquisition}_]\"\n \"[rec-{reconstruction}_][run-{run}_]\"\n \"[echo-{echo}_][space-{space}_]contrast-{contrast}_\"\n \"stat-{stat<effect|variance|z|p|t|F>}_statmap.nii.gz\"\n )\n\n run_rapidart = pe.MapNode(\n ra.ArtifactDetect(\n use_differences=[True, False],\n use_norm=True,\n zintensity_threshold=3,\n norm_threshold=1,\n bound_by_brainmask=True,\n mask_type=\"file\",\n parameter_source=\"FSL\",\n ),\n iterfield=[\"realignment_parameters\", \"realigned_files\", \"mask_file\"],\n name=\"rapidart_run\",\n )\n\n reshape_rapidart = pe.MapNode(\n Function(\n input_names=[\"run_info\", \"functional_file\", \"outlier_file\", \"contrast_entities\"],\n output_names=[\"run_info\", \"contrast_entities\"],\n function=utils.reshape_ra,\n ),\n iterfield=[\"run_info\", \"functional_file\", \"outlier_file\", \"contrast_entities\"],\n name=\"reshape_rapidart\",\n )\n\n mean_img = pe.MapNode(\n fsl.ImageMaths(output_type=\"NIFTI_GZ\", op_string=\"-Tmean\", suffix=\"_mean\"),\n iterfield=[\"in_file\", \"mask_file\"],\n name=\"smooth_susan_avgimg\",\n )\n\n median_img = pe.MapNode(\n fsl.ImageStats(output_type=\"NIFTI_GZ\", op_string=\"-k %s -p 50\"),\n iterfield=[\"in_file\", \"mask_file\"],\n name=\"smooth_susan_medimg\",\n )\n\n merge = pe.Node(Merge(2, axis=\"hstack\"), name=\"smooth_merge\")\n\n run_susan = pe.MapNode(\n fsl.SUSAN(output_type=\"NIFTI_GZ\"),\n iterfield=[\"in_file\", \"brightness_threshold\", \"usans\"],\n name=\"smooth_susan\",\n )\n\n mask_functional = pe.MapNode(\n ApplyMask(), iterfield=[\"in_file\", \"mask_file\"], name=\"mask_functional\"\n )\n\n # Exists solely to correct undesirable behavior of FSL\n # that results in loss of constant columns\n correct_matrices = pe.MapNode(\n Function(\n input_names=[\"design_matrix\"],\n output_names=[\"design_matrix\"],\n function=utils.correct_matrix,\n ),\n iterfield=[\"design_matrix\"],\n run_without_submitting=True,\n name=f\"correct_{level}_matrices\",\n )\n\n collate = pe.Node(\n MergeAll(\n fields=[\n \"effect_maps\",\n \"variance_maps\",\n \"zscore_maps\",\n \"pvalue_maps\",\n \"tstat_maps\",\n \"contrast_metadata\",\n ],\n check_lengths=True,\n ),\n name=f\"collate_{level}\",\n )\n\n collate_outputs = pe.Node(\n CollateWithMetadata(\n fields=[\"effect_maps\", \"variance_maps\", \"zscore_maps\", \"pvalue_maps\", \"tstat_maps\"],\n field_to_metadata_map={\n \"effect_maps\": {\"stat\": \"effect\"},\n \"variance_maps\": {\"stat\": \"variance\"},\n \"zscore_maps\": {\"stat\": \"z\"},\n \"pvalue_maps\": {\"stat\": \"p\"},\n \"tstat_maps\": {\"stat\": \"t\"},\n },\n ),\n name=f\"collate_{level}_outputs\",\n )\n\n plot_matrices = pe.MapNode(\n PlotMatrices(output_dir=output_dir, database_path=database_path),\n iterfield=[\"mat_file\", \"con_file\", \"entities\", \"run_info\"],\n run_without_submitting=True,\n name=f\"plot_{level}_matrices\",\n )\n\n ds_contrast_maps = pe.MapNode(\n BIDSDataSink(base_directory=output_dir, path_patterns=image_pattern),\n iterfield=[\"entities\", \"in_file\"],\n run_without_submitting=True,\n name=f\"ds_{level}_contrast_maps\",\n )\n\n wrangle_outputs = pe.Node(\n IdentityInterface(fields=[\"contrast_metadata\", \"contrast_maps\"]),\n name=f\"wrangle_{level}_outputs\",\n )\n\n # Setup connections among nodes\n workflow.connect(\n [\n (\n getter,\n get_info,\n [\n (\"metadata_files\", \"metadata_file\"),\n (\"events_files\", \"events_file\"),\n (\"regressor_files\", \"regressor_file\"),\n (\"entities\", \"entities\"),\n ],\n )\n ]\n )\n\n if align_volumes and despike:\n workflow.connect(\n [\n (getter, despiker, [(\"functional_files\", \"in_file\")]),\n (despiker, realign_runs, [(\"out_file\", \"in_file\")]),\n (getter, realign_runs, [(\"reference_files\", \"ref_file\")]),\n (realign_runs, wrangle_volumes, [(\"out_file\", \"functional_file\")],),\n ]\n )\n elif align_volumes and not despike:\n workflow.connect(\n [\n (\n getter,\n realign_runs,\n [(\"functional_files\", \"in_file\"), (\"reference_files\", \"ref_file\")],\n ),\n (realign_runs, wrangle_volumes, [(\"out_file\", \"functional_file\")],),\n ]\n )\n elif despike:\n workflow.connect(\n [\n (getter, despiker, [(\"functional_files\", \"in_file\")]),\n (despiker, wrangle_volumes, [(\"out_file\", \"functional_file\")]),\n ]\n )\n else:\n workflow.connect([(getter, wrangle_volumes, [(\"functional_files\", \"functional_file\")])])\n\n if use_rapidart:\n workflow.connect(\n [\n (get_info, run_rapidart, [(\"motion_parameters\", \"realignment_parameters\")]),\n (getter, run_rapidart, [(\"mask_files\", \"mask_file\")]),\n (wrangle_volumes, run_rapidart, [(\"functional_file\", \"realigned_files\")],),\n (run_rapidart, reshape_rapidart, [(\"outlier_files\", \"outlier_file\")],),\n (\n get_info,\n reshape_rapidart,\n [(\"run_info\", \"run_info\"), (\"contrast_entities\", \"contrast_entities\")],\n ),\n (wrangle_volumes, reshape_rapidart, [(\"functional_file\", \"functional_file\")]),\n (reshape_rapidart, specify_model, [(\"run_info\", \"subject_info\")],),\n (reshape_rapidart, plot_matrices, [(\"run_info\", \"run_info\")]),\n (reshape_rapidart, collate, [(\"contrast_entities\", \"contrast_metadata\")]),\n ]\n )\n else:\n workflow.connect(\n [\n (get_info, specify_model, [(\"run_info\", \"subject_info\")]),\n (get_info, plot_matrices, [(\"run_info\", \"run_info\")]),\n (get_info, collate, [(\"contrast_entities\", \"contrast_metadata\")],),\n ]\n )\n\n if smoothing_level == \"l1\" or smoothing_level == \"run\":\n run_susan.inputs.fwhm = smoothing_fwhm\n run_susan.inputs.dimension = dimensionality\n estimate_model.inputs.mask_size = smoothing_fwhm\n workflow.connect(\n [\n (wrangle_volumes, mean_img, [(\"functional_file\", \"in_file\")]),\n (wrangle_volumes, median_img, [(\"functional_file\", \"in_file\")],),\n (getter, mean_img, [(\"mask_files\", \"mask_file\")]),\n (getter, median_img, [(\"mask_files\", \"mask_file\")]),\n (mean_img, merge, [(\"out_file\", \"in1\")]),\n (median_img, merge, [(\"out_stat\", \"in2\")]),\n (wrangle_volumes, run_susan, [(\"functional_file\", \"in_file\")]),\n (\n median_img,\n run_susan,\n [((\"out_stat\", utils.get_btthresh), \"brightness_threshold\",)],\n ),\n (merge, run_susan, [((\"out\", utils.get_usans), \"usans\")]),\n (getter, mask_functional, [(\"mask_files\", \"mask_file\")]),\n (run_susan, mask_functional, [(\"smoothed_file\", \"in_file\")]),\n (mask_functional, specify_model, [(\"out_file\", \"functional_runs\")],),\n (mask_functional, fit_model, [(\"out_file\", \"functional_data\")],),\n ]\n )\n\n else:\n workflow.connect(\n [\n (getter, mask_functional, [(\"mask_files\", \"mask_file\")]),\n (wrangle_volumes, mask_functional, [(\"functional_file\", \"in_file\")],),\n (mask_functional, specify_model, [(\"out_file\", \"functional_runs\")],),\n (mask_functional, fit_model, [(\"out_file\", \"functional_data\")],),\n ]\n )\n\n workflow.connect(\n [\n (get_info, specify_model, [(\"repetition_time\", \"time_repetition\")],),\n (specify_model, fit_model, [(\"session_info\", \"session_info\")]),\n (\n get_info,\n fit_model,\n [(\"repetition_time\", \"interscan_interval\"), (\"run_contrasts\", \"contrasts\")],\n ),\n (\n fit_model,\n first_level_design,\n [\n (\"interscan_interval\", \"interscan_interval\"),\n (\"session_info\", \"session_info\"),\n (\"contrasts\", \"contrasts\"),\n ],\n ),\n (first_level_design, generate_model, [(\"fsf_files\", \"fsf_file\")]),\n (first_level_design, generate_model, [(\"ev_files\", \"ev_files\")]),\n ]\n )\n\n if detrend_poly:\n workflow.connect(\n [\n (generate_model, correct_matrices, [(\"design_file\", \"design_matrix\")],),\n (correct_matrices, plot_matrices, [(\"design_matrix\", \"mat_file\")],),\n (correct_matrices, estimate_model, [(\"design_matrix\", \"design_file\")],),\n ]\n )\n\n else:\n workflow.connect(\n [\n (generate_model, plot_matrices, [(\"design_file\", \"mat_file\")]),\n (generate_model, estimate_model, [(\"design_file\", \"design_file\")],),\n ]\n )\n\n workflow.connect(\n [\n (getter, plot_matrices, [(\"entities\", \"entities\")]),\n (generate_model, plot_matrices, [(\"con_file\", \"con_file\")]),\n (fit_model, estimate_model, [(\"functional_data\", \"in_file\")]),\n (generate_model, estimate_model, [(\"con_file\", \"tcon_file\")]),\n (estimate_model, calculate_p, [((\"zstats\", utils.flatten), \"in_file\")],),\n (\n estimate_model,\n collate,\n [\n (\"copes\", \"effect_maps\"),\n (\"varcopes\", \"variance_maps\"),\n (\"zstats\", \"zscore_maps\"),\n (\"tstats\", \"tstat_maps\"),\n ],\n ),\n (calculate_p, collate, [(\"out_file\", \"pvalue_maps\")]),\n (\n collate,\n collate_outputs,\n [\n (\"effect_maps\", \"effect_maps\"),\n (\"variance_maps\", \"variance_maps\"),\n (\"zscore_maps\", \"zscore_maps\"),\n (\"pvalue_maps\", \"pvalue_maps\"),\n (\"tstat_maps\", \"tstat_maps\"),\n (\"contrast_metadata\", \"metadata\"),\n ],\n ),\n (collate_outputs, ds_contrast_maps, [(\"out\", \"in_file\"), (\"metadata\", \"entities\")],),\n (\n collate_outputs,\n wrangle_outputs,\n [(\"metadata\", \"contrast_metadata\"), (\"out\", \"contrast_maps\")],\n ),\n ]\n )\n\n return workflow", "async def process(document_id, text, model, out='downloads/'):\n\n file_id = upload(text)\n data = {\n 'lpmn': 'any2txt|wcrft2({\"morfeusz2\":false})|liner2({\"model\":\"'+model+'\"})',\n 'user': 'geocoder',\n 'file': file_id\n }\n\n response, errors = start_task(data)\n\n if errors is not None:\n return {'errors': errors}\n\n if response is not None:\n response = response[0][\"fileID\"]\n content = urlopen(Request(url + '/download' + response)).read().decode()\n with open(out + os.path.basename(document_id) + '.' + model, \"w\") as outfile:\n outfile.write(content)\n\n return {'model': model,\n 'path': out + os.path.basename(document_id) + '.' + model,\n 'errors': None}", "def serve_function(self):\n # Set name attribute of the input TensorSpec.\n input_signature = {\n name: tf.TensorSpec.from_spec(spec, name=name)\n for name, spec in self.features_inputter.input_signature().items()\n }\n\n @tf.function(input_signature=(input_signature,))\n def _run(features):\n features = self.features_inputter.make_features(features=features.copy())\n _, predictions = self(features)\n return predictions\n\n return _run", "def main(_):\n if not FLAGS.model_output_dir:\n raise ValueError(\n \"Undefined model output directory. Perhaps you forgot to set the --model_output_dir flag?\")\n \n if FLAGS.predict_input_file:\n decode()\n else:\n train()", "def download_model(\n self, model_name: str, save_path: Union[str, Path] = \"./zoo/model.pth\"\n ):\n # check if the model name is valide\n assert model_name in list(\n self.meta.name\n ), f\"requested model {model_name} does not exist\"\n # TODO: fix, using pathlib\n # check if save_path already has the model\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n else:\n assert not os.path.exists(\n save_path\n ), f\"the save_path {save_path} is already used\"\n\n # fetch the model file\n model_id = self.meta[self.meta[\"name\"] == model_name][\"models\"].iloc[0]\n model_path = model_id.split(\"/\")\n self.pkg[model_path[0]][model_path[1]].fetch(save_path)", "def run_model(request):\n\n params = unpack_post_parameters(request.POST)\n\n config = []\n df = pd.DataFrame(params['mid_atlantic_mortality'],\n columns=params['mid_atlantic_active_areas'])\n config.append(df)\n df = pd.DataFrame(params['georges_bank_mortality'],\n columns=params['georges_bank_active_areas'])\n config.append(df)\n\n # Invoke the SAMS model wrapper, which in turn runs the model.\n with tempfile.TemporaryDirectory() as tdir:\n s = SamsWrapper(outdir=tdir, numruns=100, startyear=params['start_year'],\n access_area_management=config,\n open_area_f=params['open_area_f_mortality'])\n s.run()\n\n create_web_outputs(params, s)\n\n return render(request, 'results.html')", "def get_workload(model_path):\n\n repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/'\n model_name = os.path.basename(model_path)\n model_url = os.path.join(repo_base, model_path)\n\n from mxnet.gluon.utils import download\n download(model_url, model_name)\n\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\"./\", model_name), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n graph = tf.import_graph_def(graph_def, name='')\n return graph_def", "def _call_model_fn(self, features, labels, is_export_mode=False):\n model_fn_args = util.fn_args(self._model_fn)\n kwargs = {}\n\n # Makes deep copy with `config` and params` in case user mutates them.\n config = copy.deepcopy(self._config)\n params = copy.deepcopy(self._params)\n\n if 'labels' in model_fn_args:\n kwargs['labels'] = labels\n elif labels is not None:\n raise ValueError(\n 'model_fn does not take labels, but input_fn returns labels.')\n if 'mode' in model_fn_args:\n kwargs['mode'] = self._ctx.mode\n if 'config' in model_fn_args:\n kwargs['config'] = config\n if 'params' in model_fn_args:\n kwargs['params'] = params\n\n if 'params' not in model_fn_args:\n raise ValueError('model_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\\'batch_size\\']'.format(self._model_fn))\n\n if is_export_mode:\n batch_size_for_model_fn = None\n else:\n batch_size_for_model_fn = self._ctx.batch_size_for_model_fn\n\n if batch_size_for_model_fn is not None:\n if isinstance(params, hparam.HParams):\n params.add_hparam(_BATCH_SIZE_KEY, batch_size_for_model_fn)\n else:\n params[_BATCH_SIZE_KEY] = batch_size_for_model_fn\n\n estimator_spec = self._model_fn(features=features, **kwargs)\n if (self._ctx.is_running_on_cpu(is_export_mode) and\n isinstance(estimator_spec, TPUEstimatorSpec)):\n # The estimator_spec will be passed to `Estimator` directly, which expects\n # type `EstimatorSpec`.\n return estimator_spec.as_estimator_spec()\n else:\n return estimator_spec", "def model_fn(model_dir):\n model = models.resnet50(pretrained=True)\n\n _ = model.eval()\n\n modules=list(model.children())[:-1]\n model=nn.Sequential(*modules)\n for p in model.parameters():\n p.requires_grad = False\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\n\n model = model.to(device)\n\n return model", "def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render", "def log_model(artifact_path, **kwargs):\n with TempDir() as tmp:\n local_path = tmp.path(artifact_path)\n run_id = active_run().info.run_uuid\n if 'model' in kwargs:\n raise Exception(\"Unused argument 'model'. log_model creates a new model object\")\n\n save_model(dst_path=local_path, model=Model(artifact_path=artifact_path, run_id=run_id),\n **kwargs)\n log_artifacts(local_path, artifact_path)", "def load_model(model_uri, dst_path=None): # noqa: E501\n _check_soft_dependencies(\"mlflow\", severity=\"error\")\n from mlflow.tracking.artifact_utils import _download_artifact_from_uri\n from mlflow.utils.model_utils import (\n _add_code_from_conf_to_system_path,\n _get_flavor_configuration,\n )\n\n local_model_path = _download_artifact_from_uri(\n artifact_uri=model_uri, output_path=dst_path\n )\n flavor_conf = _get_flavor_configuration(\n model_path=local_model_path, flavor_name=FLAVOR_NAME\n )\n _add_code_from_conf_to_system_path(local_model_path, flavor_conf)\n sktime_model_file_path = os.path.join(\n local_model_path, flavor_conf[\"pickled_model\"]\n )\n serialization_format = flavor_conf.get(\n \"serialization_format\", SERIALIZATION_FORMAT_PICKLE\n )\n return _load_model(\n path=sktime_model_file_path, serialization_format=serialization_format\n )", "def save_model(self, filename):\r\n pass", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def save_model(path_name, model):\n\n # Specify a path\n PATH = path_name\n \n # Save\n torch.save(model.state_dict(), PATH)", "def save_model(model):\n\n # model.save(os.path.join(output))\n # model.save(output)\n\n # key = \"{}/{}/examples\".format(prefix,data_partition_name)\n # url = 's3://{}/{}'.format(bucket, key)\n # boto3.Session().resource('s3').Bucket(bucket).Object(key).upload_file('data.csv')\n # print('Done writing to {}'.format(url))\n \n model.save('output/sentiment_model.h5')\n\n s3 = boto3.resource('s3')\n s3.meta.client.upload_file('output/sentiment_model.h5', 'ieor4577-hw4', 'sentiment_model.h5')\n\n # tf.saved_model.save(model, os.path.join(output, \"1\"))\n print(\"Model successfully saved\")", "def save_model(model, model_index, args):\n logger.info(\"saving local model-{}\".format(model_index))\n with open(args.modeldir+\"trained_local_model\"+str(model_index), \"wb\") as f_:\n torch.save(model.state_dict(), f_)\n return", "def load_model(fn, model):\n if fn[-3] != \".tf\":\n fn += \".tf\"\n if model.saver is None:\n with model.graph.as_default():\n model.saver = tf.train.Saver()\n log(\"Loading model from {}\".format(fn))\n model.saver.restore(model.session, fn)\n log(\"Done loading!\")", "def _make_model_v2():\n class CustomModule(tf.Module):\n\n def __init__(self):\n super().__init__()\n self.m = tf.Variable([1.0, 1.0, 1.0], name='slope')\n\n @tf.function\n def __call__(self, x):\n y = self.m * x + 1\n return y\n\n @tf.function(input_signature=[tf.TensorSpec((None, 3), tf.float32)])\n def length(self, x):\n return tf.reduce_sum(self(x) - x, name='length')\n\n @tf.function(input_signature=[tf.TensorSpec([], tf.float32),\n tf.TensorSpec((None, 3), tf.float32)])\n def scalar_multiply(self, z, x):\n return tf.multiply(z, x, name='scale_mult')\n\n module = CustomModule()\n\n # Make a concrete version of __call__\n call = module.__call__.get_concrete_function(tf.TensorSpec((None, 3)))\n\n tf.saved_model.save(\n module, tf_export_path, signatures={\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: call,\n 'length': module.length,\n 'scalar_multiply': module.scalar_multiply\n }\n )", "def load_model(input_path):\n logger.info(\"Loading model from %s\" % input_path)\n return pyspark.ml.PipelineModel.load(input_path)", "def test_functional(self, model_fn):\n if h5py is None:\n self.skipTest('h5py required to run this test')\n\n def _make_model():\n inputs = (keras.Input(shape=(4,), name='examples'),\n keras.Input(shape=(4,), name='neighbors'))\n base_model = model_fn(inputs[0].shape.as_list()[-1], 2)\n outputs = keras.layers.add([base_model(inputs[0]), base_model(inputs[1])])\n return keras.Model(inputs=inputs, outputs=outputs)\n\n with self.cached_session():\n x = (np.random.normal(size=(16, 4)).astype(np.float32),\n np.random.normal(size=(16, 4)).astype(np.float32))\n model = _make_model()\n predictions = model(x)\n # Save and reload.\n model_path = os.path.join(self.get_temp_dir(), 'model.h5')\n model.save(model_path)\n del model\n loaded_model = keras.models.load_model(\n model_path,\n custom_objects={\n '_make_subclassed': _make_subclassed,\n '_make_subclassed_built': _make_subclassed_built,\n },\n compile=False)\n self.assertAllClose(loaded_model(x), predictions, 1e-9)", "def download_model():\n logging.info(\"[genreml] Downloading model...\")\n with urllib.request.urlopen(config.FMAModelConfig.FMA_MODEL_URL) as f:\n data = f.read()\n open(config.FMAModelConfig.FMA_MODEL_PATH, 'wb').write(data)\n logging.info(\"[genreml] Model download complete\")", "def serving_input_fn(self):\n label_ids = tf.placeholder(tf.int32, [None], name='label_ids')\n input_ids = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='input_ids')\n input_mask = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='input_mask')\n segment_ids = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='segment_ids')\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'label_ids': label_ids,\n 'input_ids': input_ids,\n 'input_mask': input_mask,\n 'segment_ids': segment_ids})()\n return input_fn", "def save_torchscript(\n self,\n save_path: str,\n model_only: bool = False,\n device: Optional[TorchDevice] = None,\n ):\n if device is None:\n device = DEVICE\n\n save_ludwig_model_for_inference(\n save_path,\n self.model,\n self.config_obj.to_dict(),\n self.training_set_metadata,\n model_only=model_only,\n device=device,\n )", "def _evaluate_model(\n run_id: str, dataset_filename: str, dataset_sampling_column: str = None\n):\n fix_multiprocessing_with_keras_on_macos()\n\n run = _get_run(run_id)\n hyperparameters = run.config\n\n # no need to run this on a gpu since it's 1 epoch\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n with ModelBestH5File(run) as model_h5_filepath:\n model = _load_untrainable_model(hyperparameters, model_h5_filepath)\n\n model_name = run.config[\"model_name\"]\n x, y = _get_prepared_dataset(\n model_name, hyperparameters, dataset_filename, dataset_sampling_column\n )\n\n wandb.init(\n config={\n \"run_id\": run_id,\n \"dataset_filename\": dataset_filename,\n \"dataset_sampling_column\": dataset_sampling_column,\n },\n tags=[\"model-evaluation\"],\n )\n\n batch_size = hyperparameters[\"batch_size\"]\n label_scale_factor_mmhg = hyperparameters[\"label_scale_factor_mmhg\"]\n acceptable_error_mg_l = hyperparameters[\"acceptable_error_mg_l\"]\n acceptable_fraction_outside_error = hyperparameters[\n \"acceptable_fraction_outside_error\"\n ]\n\n # we're using fit() instead of evaluate() to get the functionality of these callbacks\n # training performance in the results should be ignored, as it can be affected by some\n # training-only layers such as dropout\n model.fit(\n x,\n y,\n batch_size=batch_size,\n epochs=1,\n verbose=2,\n validation_data=(x, y),\n callbacks=[\n ThresholdValMeanAbsoluteErrorOnCustomMetric(\n acceptable_fraction_outside_error=acceptable_fraction_outside_error,\n acceptable_error_mg_l=acceptable_error_mg_l,\n ),\n WandbCallback(verbose=1, monitor=\"val_adjusted_mean_absolute_error\"),\n LogPredictionsAndWeights(\n metric=\"val_adjusted_mean_absolute_error\",\n dataset=([], [], x, y),\n label_scale_factor_mmhg=label_scale_factor_mmhg,\n ),\n ],\n )\n\n # returning model and dataset for use in jupyter notebooks\n return model, (x, y)", "def save_model(path: Path, model, config: dict):\n with open(path / \"model.pkl\", \"wb\") as p:\n pickle.dump(model, p)", "def run_web2py_controller(url):\n # get data from function, and add to output\n # TODO: caching\n url_list = url.split('/')\n data = run_controller_in(url_list[0], url_list[1].replace('.json', ''), current.globalenv)\n view_data = XML(response.json(data['data']))\n\n model_data = '[]'\n if 'model' in data:\n model_data = XML(response.json(data['model']))\n pre_js_str = \"\"\"var viewData = %(view_data)s;\nvar modelData = %(model)s;\n \"\"\" % dict(view_data=view_data, model=model_data) \n\n return pre_js_str", "def save_model(fn, model, ckpt=None):\n if fn[-3] != \".tf\":\n fn += \".tf\"\n if not hasattr(model,\"saver\") or model.saver is None:\n with model.graph.as_default():\n model.saver = tf.train.Saver()\n if ckpt is None:\n ckpt = fn.replace(\".tf\",\".ckpt\")\n ckpt = os.path.basename(ckpt)\n log(\"Saving model to {}\".format(fn))\n model.saver.save(model.session, fn, latest_filename=ckpt)", "def save_model(self, path):\n self._model.save(os.path.join(path, 'trained_model.h5'))\n plot_model(self._model, to_file=os.path.join(path, 'model_structure.png'), show_shapes=True, show_layer_names=True)", "def _example_serving_receiver_fn(tf_transform_output, schema):\n raw_feature_spec = _get_raw_feature_spec(schema)\n raw_feature_spec.pop(_LABEL_KEY)\n\n raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(\n raw_feature_spec, default_batch_size=None)\n serving_input_receiver = raw_input_fn()\n\n transformed_features = tf_transform_output.transform_raw_features(\n serving_input_receiver.features)\n\n return tf.estimator.export.ServingInputReceiver(\n transformed_features, serving_input_receiver.receiver_tensors)", "def model_fn(features, labels, mode):\n\n # Build a Graph that computes predictions from the inference model.\n logits = inference(features, args.hidden1, args.hidden2)\n\n tensors = {}\n # Add to the Graph the Ops for loss calculation.\n if mode == ModeKeys.INFER:\n softmax = tf.nn.softmax(logits)\n tensors['digit'] = tf.argmax(softmax, 1)\n loss_op = None\n else:\n loss_op = loss(logits, labels)\n tensors['loss'] = loss_op\n tf.scalar_summary('loss', loss_op)\n\n # Add to the Graph the Ops for accuracy calculation.\n if mode == ModeKeys.EVAL:\n accuracy_op = evaluation(logits, labels)\n tensors['accuracy'] = accuracy_op\n tf.scalar_summary('training/hptuning/metric', accuracy_op)\n\n # Add to the Graph the Ops that calculate and apply gradients.\n if mode == ModeKeys.TRAIN:\n global_step = framework.get_global_step()\n # Create the gradient descent optimizer with the given learning rate.\n optimizer = tf.train.GradientDescentOptimizer(args.learning_rate)\n # Create a variable to track the global step.\n # Use the optimizer to apply the gradients that minimize the loss\n # (and also increment the global step counter) as a single training step.\n train_op = optimizer.minimize(loss_op, global_step=global_step)\n # Add streaming means.\n else:\n train_op = None\n\n return tensors, loss_op, train_op", "def tff_model_fn():\n keras_model = load_model(FLAGS.batch_size)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n return simple_fedavg_tf.KerasModelWrapper(keras_model,\n test_data.element_spec, loss)", "def log_model(\n sktime_model,\n artifact_path,\n conda_env=None,\n code_paths=None,\n registered_model_name=None,\n signature=None,\n input_example=None,\n await_registration_for=None,\n pip_requirements=None,\n extra_pip_requirements=None,\n serialization_format=SERIALIZATION_FORMAT_PICKLE,\n **kwargs,\n): # TODO: can we specify a type for fitted instance of sktime model below? # noqa: E501\n _check_soft_dependencies(\"mlflow\", severity=\"error\")\n from mlflow.models import Model\n\n if await_registration_for is None:\n from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\n\n await_registration_for = DEFAULT_AWAIT_MAX_SLEEP_SECONDS\n\n return Model.log(\n artifact_path=artifact_path,\n flavor=utils.mlflow_sktime,\n registered_model_name=registered_model_name,\n sktime_model=sktime_model,\n conda_env=conda_env,\n code_paths=code_paths,\n signature=signature,\n input_example=input_example,\n await_registration_for=await_registration_for,\n pip_requirements=pip_requirements,\n extra_pip_requirements=extra_pip_requirements,\n serialization_format=serialization_format,\n **kwargs,\n )" ]
[ "0.58916014", "0.5890586", "0.57216805", "0.5697928", "0.5560071", "0.5538942", "0.55345696", "0.54726624", "0.5426289", "0.5396827", "0.5383806", "0.5382573", "0.5378443", "0.5323406", "0.5306347", "0.5273451", "0.5273443", "0.52704054", "0.52411795", "0.5230054", "0.5208484", "0.5207535", "0.51724917", "0.51683056", "0.51476103", "0.5147138", "0.51464576", "0.51436436", "0.51363933", "0.5119381", "0.51149064", "0.51039016", "0.5103594", "0.50996286", "0.50940365", "0.5078425", "0.5067604", "0.5062733", "0.5045604", "0.5037156", "0.5023908", "0.5023369", "0.50137186", "0.50065947", "0.49973243", "0.49714184", "0.49635687", "0.49466607", "0.4945628", "0.49362293", "0.4935461", "0.49304157", "0.4923702", "0.4919056", "0.49164104", "0.49091434", "0.49084854", "0.48777542", "0.4877495", "0.48760763", "0.4870752", "0.4859297", "0.4857995", "0.4852411", "0.4849997", "0.48498943", "0.48482051", "0.48398438", "0.48354", "0.4834636", "0.48151883", "0.4812121", "0.48085102", "0.48003945", "0.4795215", "0.47915155", "0.47820547", "0.47799268", "0.4772571", "0.4769791", "0.47645083", "0.47622198", "0.47592187", "0.47569507", "0.47537407", "0.47527328", "0.475077", "0.4743883", "0.47438434", "0.4730461", "0.47253817", "0.47223052", "0.4722303", "0.4721304", "0.47197145", "0.47064707", "0.47053805", "0.47030592", "0.4695943", "0.46924374" ]
0.6641278
0
Load a pandas DataFrame and runs a python_function model saved with MLflow against it. Return the prediction results as a CSVformatted pandas DataFrame. If a ``runid`` is specified, ``modelpath`` is treated as an artifact path within that run; otherwise it is treated as a local path.
def predict(model_path, run_id, input_path, output_path, no_conda): if run_id: model_path = _get_model_log_dir(model_path, run_id) model_env_file = _load_model_env(model_path) if not no_conda and model_env_file is not None: conda_env_path = os.path.join(model_path, model_env_file) return _rerun_in_conda(conda_env_path) model = load_pyfunc(model_path) df = pandas.read_csv(input_path) result = model.predict(df) out_stream = sys.stdout if output_path: out_stream = open(output_path, 'w') pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(\n config, feature_table, label_table,\n model_paths, model_summaries,\n save_preds_to_db=False, save_prefix='',\n discard_columns=[], log_dir='./results/'):\n\n # Create log directory if not exists\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # Get feature and label data\n X, y = get_data(feature_table, label_table, discard_columns=discard_columns)\n labeled_indices = np.logical_or(y == 0, y == 1)\n\n # Evaluate models\n metrics_str = [s.rsplit('.', 1) for s in config['eval_config']['metrics']]\n metrics = [getattr(importlib.import_module(m), c) for (m, c) in metrics_str]\n k_values = config['eval_config']['k']\n results = evaluate_multiprocessing(\n model_paths, save_preds_to_db, save_prefix,\n X, y, labeled_indices, metrics, k_values)\n\n # Convert results to dataframe table\n results_columns = [f'{metric.__name__}_at_{k}' for metric in metrics for k in k_values]\n results = pd.DataFrame({\n **pd.DataFrame(model_summaries),\n 'model_path': model_paths,\n 'num_labeled_rows': [int(labeled_indices.sum())] * len(model_paths),\n **pd.DataFrame(np.array(results).round(4), columns=results_columns),\n })\n\n # Save results to csv file\n experiment_name = config['experiment_name']\n results_path = Path(log_dir) / f'{experiment_name}_results.csv'\n results.to_csv(results_path)\n\n return results", "def predict(input_path, model_path, output_path):\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Loading input dataset\")\n X_pred = pd.read_csv(input_path)\n\n logger.info(\"Loading model\")\n model = joblib.load(model_path)\n\n logger.info(\"Generating predictions\")\n predictions = model.predict(X_pred)\n prediction_df = pd.DataFrame({\"predictions\": predictions})\n\n logger.info(f\"Writing output to {output_path}\")\n output_dir = Path(output_path).parent\n output_dir.mkdir(parents=True, exist_ok=True)\n prediction_df.to_csv(output_path, index=False)", "def predict_return(\n mlflow_tracking_uri: str,\n experiment_name: str,\n run_id: str,\n inputs: pd.DataFrame,\n explain: bool = False,\n show_plot: bool = False,\n preloaded_model=None,\n categorical_colname_list=None,\n):\n\n plt.clf() # clear current figure\n\n mlflow.set_tracking_uri(mlflow_tracking_uri)\n mlflow.set_experiment(experiment_name)\n experiment_details = mlflow.get_experiment_by_name(experiment_name)\n\n mlflow.end_run()\n mlflow.start_run(run_id=run_id)\n\n # pull model from tracking uri\n artifact_loc = (\n str(experiment_details.artifact_location)\n .replace(\"file:\", \"\")\n .replace(\"///\", \"\")\n )\n loc_prefix = \"\"\n if \"P1-AnalyzeTrades\" not in os.getcwd():\n loc_prefix = r\"P1-AnalyzeTrades/\"\n\n metrics, params, tags = parse_mlflow_info(mlflow.get_run(run_id))\n\n # add columns if necessary, can only add, not remove extra cols\n cols_required = list(\n pd.DataFrame(\n json.loads(\n json.loads(tags[\"mlflow.log-model.history\"])[0][\"signature\"][\"inputs\"]\n )\n )[\"name\"]\n )\n\n # todo replace above\n col_type_dict = (\n pd.DataFrame(\n json.loads(\n json.loads(tags[\"mlflow.log-model.history\"])[0][\"signature\"][\"inputs\"]\n )\n )\n .set_index(\"name\")\n .to_dict(orient=\"index\")\n )\n\n # ensure categorical splits contained necessary columns\n add_cols = list(set(cols_required) - set(list(inputs.columns)))\n inputs_copy = inputs.copy()\n inputs_copy[add_cols] = 0\n\n # extra columns in dataset\n # print('extra columns in expanded dataset: '+ str(list(set(list(inputs_copy.columns)) - set(cols_required))))\n\n # ensure X is in correct order and complete for model\n inputs_copy = inputs_copy[cols_required]\n\n for c in inputs_copy.columns:\n if col_type_dict[c][\"type\"] == \"double\":\n inputs_copy[c] = inputs_copy[c].astype(float)\n\n if preloaded_model == None:\n mdl, _ = preload_model(\n mlflow_tracking_uri,\n experiment_name,\n run_id,\n )\n else:\n mdl = preloaded_model\n\n mlflow.end_run()\n\n # consider later\n # formula_clean = params['formula'].replace('\\n','')\n\n model_type = get_model_type(tags)\n\n if model_type == \"sklearn\":\n pct_return = mdl.predict(inputs_copy)\n pct_return_df = pd.DataFrame(pct_return, columns=[\"predicted_ret\"])\n else:\n # assume H2O\n pct_return = mdl.predict(inputs_copy)\n pct_return_df = pct_return.rename(columns={\"predict\": \"predicted_ret\"})\n\n # Explain Return for first\n if explain == True:\n try:\n explainer = dill.load(\n open(f\"{artifact_loc}/{run_id}/artifacts/explainer.pkl\", \"rb\")\n )\n except: # for testing\n explainer = dill.load(\n open(f\"{loc_prefix}mlruns/0/{run_id}/artifacts/explainer.pkl\", \"rb\")\n )\n\n # create explained object\n if \"pipeline\" in str(type(mdl)):\n ## fix shap_obj, requires column transformer in step position 0 ,\n ## categorical in position 1\n shap_obj = explainer(mdl[0].transform(inputs_copy))\n else:\n shap_obj = explainer(inputs_copy)\n\n # correct for error\n shap_obj.base_values = shap_obj.base_values\n\n # shap values df with column\n shap_df = pd.DataFrame(shap_obj.values, columns=inputs_copy.columns)\n\n # ensure pct return matches shap, in case of gbm explanation of linear\n adj = (\n pct_return_df.sum().sum()\n - shap_df.sum().sum()\n - float(shap_obj.base_values[0])\n )\n specific_adj = adj / shap_df.shape[1]\n\n if abs(adj) > 0.01:\n print(\"warning, adjusting shap to match actual\")\n shap_df = shap_df + specific_adj\n\n # shap.plots.force(shap_obj.base_values[0][0],\n # shap_values = shap_obj.values,\n # features = inputs_copy.columns,\n # matplotlib = True,\n # show = False)\n\n try:\n shap_obj_adj = shap_obj\n shap_obj_adj.values = shap_obj_adj.values + specific_adj\n\n if \"pipeline\" in str(type(mdl)):\n # def update_shap_obj(shap_obj, X_train, encoder):\n shap_obj_adj.feature_names = list(inputs_copy.columns)\n\n if categorical_colname_list is None:\n categorical_names = list(\n inputs_copy.select_dtypes(include=[\"object\"]).columns\n )\n else:\n categorical_names = categorical_colname_list\n col_idx = list(\n np.where(np.isin(shap_obj_adj.feature_names, categorical_names))[0]\n )\n\n shap_cat = copy.deepcopy(shap_obj_adj)\n shap_cat.data = np.array(shap_obj_adj.data, dtype=\"object\")\n res_arr = (\n mdl[0]\n .transformers_[1][1][1]\n .inverse_transform(\n pd.DataFrame(\n shap_cat.data[:, col_idx], columns=[categorical_names]\n )\n )\n )\n for i, loc in enumerate(col_idx):\n shap_cat.data[:, loc] = res_arr[:, i]\n\n shap.plots.waterfall(shap_cat[0])\n else:\n shap.plots.waterfall(shap_obj_adj[0])\n # waterfall_legacy(shap_obj.base_values[0][0],\n # shap_values = shap_obj.values.ravel()+specific_adj,\n # feature_names = inputs_copy.columns,\n # show = False)\n except:\n # backup, probably not working\n print(f\"backup, probably not working\")\n waterfall_legacy(\n shap_obj.base_values[0],\n shap_values=shap_obj.values.ravel() + specific_adj,\n feature_names=inputs_copy.columns,\n show=False,\n )\n\n f = plt.gcf()\n f.tight_layout()\n if not os.path.exists(\"output/\"):\n os.mkdir(\"output/\")\n f.savefig(\"output/current_force.png\")\n if show_plot:\n # matplotlib.use('svg')\n plt.show()\n\n return pct_return_df, shap_obj, shap_df, f\n\n else:\n return pct_return_df", "def spark_udf(spark, path, run_id=None, result_type=\"double\"):\n\n # Scope Spark import to this method so users don't need pyspark to use non-Spark-related\n # functionality.\n from mlflow.pyfunc.spark_model_cache import SparkModelCache\n from pyspark.sql.functions import pandas_udf\n\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n\n archive_path = SparkModelCache.add_local_model(spark, path)\n\n def predict(*args):\n model = SparkModelCache.get_or_load(archive_path)\n schema = {str(i): arg for i, arg in enumerate(args)}\n # Explicitly pass order of columns to avoid lexicographic ordering (i.e., 10 < 2)\n columns = [str(i) for i, _ in enumerate(args)]\n pdf = pandas.DataFrame(schema, columns=columns)\n result = model.predict(pdf)\n return pandas.Series(result)\n\n return pandas_udf(predict, result_type)", "def _evaluate_model(\n run_id: str, dataset_filename: str, dataset_sampling_column: str = None\n):\n fix_multiprocessing_with_keras_on_macos()\n\n run = _get_run(run_id)\n hyperparameters = run.config\n\n # no need to run this on a gpu since it's 1 epoch\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n with ModelBestH5File(run) as model_h5_filepath:\n model = _load_untrainable_model(hyperparameters, model_h5_filepath)\n\n model_name = run.config[\"model_name\"]\n x, y = _get_prepared_dataset(\n model_name, hyperparameters, dataset_filename, dataset_sampling_column\n )\n\n wandb.init(\n config={\n \"run_id\": run_id,\n \"dataset_filename\": dataset_filename,\n \"dataset_sampling_column\": dataset_sampling_column,\n },\n tags=[\"model-evaluation\"],\n )\n\n batch_size = hyperparameters[\"batch_size\"]\n label_scale_factor_mmhg = hyperparameters[\"label_scale_factor_mmhg\"]\n acceptable_error_mg_l = hyperparameters[\"acceptable_error_mg_l\"]\n acceptable_fraction_outside_error = hyperparameters[\n \"acceptable_fraction_outside_error\"\n ]\n\n # we're using fit() instead of evaluate() to get the functionality of these callbacks\n # training performance in the results should be ignored, as it can be affected by some\n # training-only layers such as dropout\n model.fit(\n x,\n y,\n batch_size=batch_size,\n epochs=1,\n verbose=2,\n validation_data=(x, y),\n callbacks=[\n ThresholdValMeanAbsoluteErrorOnCustomMetric(\n acceptable_fraction_outside_error=acceptable_fraction_outside_error,\n acceptable_error_mg_l=acceptable_error_mg_l,\n ),\n WandbCallback(verbose=1, monitor=\"val_adjusted_mean_absolute_error\"),\n LogPredictionsAndWeights(\n metric=\"val_adjusted_mean_absolute_error\",\n dataset=([], [], x, y),\n label_scale_factor_mmhg=label_scale_factor_mmhg,\n ),\n ],\n )\n\n # returning model and dataset for use in jupyter notebooks\n return model, (x, y)", "def evaluate_model(fn_string, df, features,\n coefficients=None,\n target=None,\n fit_intercept=False):\n features = list(set(df.columns).intersection(features))\n array = df[features].to_numpy()\n func = process_fn(fn_string, features)\n n_samples = len(df)\n predictions = func(array.T)\n if coefficients is None:\n if target is None:\n target = df.columns[0]\n target_values = df[target]\n coefficients = lsq_coefficients(predictions, target_values,\n fit_intercept=fit_intercept)\n slope, intercept = coefficients\n else:\n slope, intercept = coefficients\n predictions = np.add(np.multiply(predictions, slope), intercept)\n return predictions, coefficients", "def run_score(df, tmo_path, score_path, label):\n\n\ttry:\n\t\t# load the model\n\t\twith open(tmo_path, 'rb') as f:\n\t\t\ttmo = pickle.load(f)\n\texcept OSError:\n\t\tlogger.error(\"Cannot open %s\", tmo_path)\n\texcept Exception as e:\n\t\tlogger.error(e)\n\n\tlogger.info(\"Scoring the trained model...\")\n\n\tdata = score(df, tmo, label)\n\n\t# write score results\n\tdata.to_csv(score_path, index=False)\n\tlogger.info('Model scoring results saved to %s', score_path)", "def run_ml_model(model_config:dict, train_fn, results_output_json_fn, test_fn=None,\n model_dir=\"./model/credit_risk_random_forest\", npzdir=\"./npzdir\",\n b_show_plot=False, output_png=None,\n kaggle_submission_output_csv=\"submission.csv\"):\n\n for _dir in [npzdir, model_dir]:\n # model_dir=\"./model/credit_risk_dnn\"\n if os.path.isdir(_dir):\n shutil.rmtree(_dir)\n\n os.makedirs(_dir)\n print(\"mkdir\",_dir)\n\n X_train, X_test, y_train, y_test, scaler = load_train_data_train_test_split(train_fn=train_fn)\n X_ktest, submission_df = load_train_and_kaggle_submission(test_fn=test_fn, scaler=scaler)\n\n model_type = model_config['model_type']\n\n if model_type == \"rf\":\n clf = RandomForestClassifier(**model_config)\n clf.fit(X_train, y_train)\n elif model_type == \"xgboost\":\n if \"scale_pos_weight\" in model_config:\n # rescale weight to make it same as test set\n\n sum_wpos = np.sum(y_train == 1.0)\n sum_wneg = np.sum(y_train == 0.0)\n model_config['scale_pos_weight'] = sum_wneg / sum_wpos\n\n # print weight statistics\n print('weight statistics: wpos=%g, wneg=%g, ratio=%g' % (sum_wpos, sum_wneg, sum_wneg / sum_wpos))\n\n clf = xgb.XGBClassifier(**model_config)\n clf.fit(X_train, y_train)\n else:\n raise RuntimeError(\"Invalid model_type: {}\".format(model_type))\n\n print(\"Model Config\", json.dumps(model_config, indent=2))\n\n print(\"Run clf.predict()\")\n y_pred = clf.predict(X_test)\n\n print(\"Run clf.predict_proba()\")\n y_pred_proba = clf.predict_proba(X_test)\n\n print(y_pred_proba.shape), print(y_test.shape)\n print(y_pred_proba[:, 1].shape)\n y_pred_proba = y_pred_proba[:, 1]\n\n save_predictions_npz(fn=os.path.join(npzdir, \"predict.npz\"), predictions=y_pred)\n save_predictions_npz(fn=os.path.join(npzdir, \"predict_proba.npz\"), predictions=y_pred_proba)\n\n # show results\n print(\"roc_auc_score\", roc_auc_score(y_test, y_pred_proba))\n print(\"pauc(.50)\", roc_auc_score(y_test, y_pred_proba, max_fpr=0.5))\n print(\"pauc(.05)\", roc_auc_score(y_test, y_pred_proba, max_fpr=0.05))\n print(\"pauc(.005)\", roc_auc_score(y_test, y_pred_proba, max_fpr=0.005))\n print(\"recall_score\", recall_score(y_test, y_pred))\n print(\"precision_score\", precision_score(y_test, y_pred))\n\n stats_json = {\n \"roc_auc_score\": roc_auc_score(y_test, y_pred_proba),\n \"pauc@0.50\": roc_auc_score(y_test, y_pred_proba, max_fpr=0.5),\n \"pauc@0.05\": roc_auc_score(y_test, y_pred_proba, max_fpr=0.05),\n \"pauc@0.005\": roc_auc_score(y_test, y_pred_proba, max_fpr=0.005),\n \"recall_score\": recall_score(y_test, y_pred),\n \"precision_score\": precision_score(y_test, y_pred),\n \"meta\": {\n \"train_fn\": train_fn,\n \"kaggle_test_fn\": test_fn,\n \"kaggle_submission_output_csv\": kaggle_submission_output_csv,\n \"results_output_json_fn\": results_output_json_fn,\n \"model_dir\" : model_dir,\n \"b_show_plot\" : b_show_plot,\n \"output_png\": output_png,\n },\n \"model_config\": model_config\n }\n\n # preds = y_pred_proba[:, 1]\n fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)\n plot_roc_curve_interp(fpr=fpr, tpr=tpr, label='default', output_png=output_png)\n\n if b_show_plot:\n plt.show()\n\n with open(results_output_json_fn, 'w') as fp:\n json.dump(stats_json, fp, indent=2)\n print(\"Wrote to\", results_output_json_fn)\n\n # make kaggle predictions\n print(\"Making kaggle submission predictions\")\n submission_df[\"TARGET\"] = clf.predict(X_ktest)\n submission_df.to_csv(kaggle_submission_output_csv, index=False)\n print(\"Wrote to\", kaggle_submission_output_csv)\n print(\"Kaggle Prediction labels:\", submission_df[\"TARGET\"].value_counts())\n\n return stats_json", "def train_and_eval(model_dir, model_type, train_steps, train_data, test_data, train_embeddings_file_name, test_embeddings_file_name, positive_labels, combination_method, method):\n \n index_map, weights = wvd.load(train_embeddings_file_name)\n #Get positive labels\n positive_labels = positive_labels.split(',')\n \n print(\"reading data...\")\n train_file_name = train_data \n df_train = pd.read_table(train_file_name, dtype={'node1':str, 'node2':str})\n df_train = df_train.sample(frac=1)\n\n # remove NaN elements\n df_train = df_train.dropna(how='any', axis=0)\n \n df_train[LABEL_COLUMN] = (\n df_train[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n\n model_dir = tempfile.mkdtemp() if not model_dir else model_dir\n print(\"model directory = %s\" % model_dir)\n \n train_x, _, train_y, _ = get_input(df_train, weights, index_map, combination_method)\n \n print(\"\\nBuilding model...\")\n m = build_estimator(model_dir, model_type, weights, index_map, combination_method)\n \n print(\"\\nTraining model...\")\n if model_type == \"regressor\":\n m.fit(train_x, train_y, n_epoch=train_steps, show_metric=True, snapshot_epoch=False)\n \n print(\"\\nTesting model...\")\n index_map, weights = wvd.load(test_embeddings_file_name)\n \n print(\"reading data...\")\n test_file_name = test_data\n df_test = pd.read_table(test_file_name, dtype={'node1':str, 'node2':str})\n df_test = df_test.sample(frac=1)\n\n # remove NaN elements\n df_test = df_test.dropna(how='any', axis=0)\n \n df_test[LABEL_COLUMN] = (\n df_test[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n \n if model_type == \"regressor\":\n test_x, test_original_y, test_index_y, test_original_x = get_input(df_test, weights, index_map, combination_method, data_purpose='test')\n node_sets = get_node_sets(test_original_x, test_original_y)\n \n print(\"\\nPredicting:\")\n model_predictions = m.predict(test_x)\n model_predictions = list(model_predictions)\n #Covert back to 1 and 0\n predictions = []\n model_predictions_probs = []\n for prediction in model_predictions:\n predictions.append(prediction[1]) #non-thresholded value of positve class\n model_predictions_probs.append(prediction[1])\n \n k = int(len([i for i in test_original_y if i == 1]) * 0.3)\n do_evaluations([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method)\n #Uncomment to log ranked links\n #log_predictions([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n # positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method,\n # outfilename=combination_method, method=method)", "def infer(\n context: mlrun.MLClientCtx,\n model: str,\n dataset: DatasetType,\n drop_columns: Union[str, List[str], int, List[int]] = None,\n label_columns: Union[str, List[str]] = None,\n log_result_set: bool = True,\n result_set_name: str = \"prediction\",\n batch_id: str = None,\n perform_drift_analysis: bool = None,\n sample_set: DatasetType = None,\n drift_threshold: float = 0.7,\n possible_drift_threshold: float = 0.5,\n inf_capping: float = 10.0,\n artifacts_tag: str = \"\",\n **predict_kwargs: Dict[str, Any],\n):\n # Loading the model:\n context.logger.info(f\"Loading model...\")\n model_handler = AutoMLRun.load_model(model_path=model, context=context)\n if label_columns is None:\n label_columns = [\n output.name for output in model_handler._model_artifact.spec.outputs\n ]\n\n # Get dataset by object, URL or by FeatureVector:\n context.logger.info(f\"Loading data...\")\n x, label_columns = _read_dataset_as_dataframe(\n dataset=dataset,\n label_columns=label_columns,\n drop_columns=drop_columns,\n )\n\n # Predict:\n context.logger.info(f\"Calculating prediction...\")\n y_pred = model_handler.model.predict(x, **predict_kwargs)\n\n # Prepare the result set:\n result_set = _prepare_result_set(x=x, label_columns=label_columns, y_pred=y_pred)\n\n # Check for logging the result set:\n if log_result_set:\n # Log the result set:\n context.logger.info(f\"Logging result set (x | prediction)...\")\n context.log_dataset(\n key=result_set_name,\n df=result_set,\n db_key=result_set_name,\n tag=artifacts_tag,\n )\n # Log the batch ID:\n if batch_id is None:\n batch_id = hashlib.sha224(str(datetime.now()).encode()).hexdigest()\n context.log_result(\n key=\"batch_id\",\n value=batch_id,\n )\n\n # Check for performing drift analysis:\n if (\n perform_drift_analysis is None\n and model_handler._model_artifact.spec.feature_stats is not None\n ):\n perform_drift_analysis = True\n if perform_drift_analysis:\n context.logger.info(\"Performing drift analysis...\")\n # Get the sample set statistics (either from the sample set or from the statistics logged with the model):\n sample_set_statistics = _get_sample_set_statistics(\n sample_set=sample_set,\n model_artifact_feature_stats=model_handler._model_artifact.spec.feature_stats,\n )\n # Produce the artifact:\n (\n drift_table_plot,\n metric_per_feature_dict,\n analysis_results,\n ) = _perform_drift_analysis(\n sample_set_statistics=sample_set_statistics,\n inputs=result_set,\n drift_threshold=drift_threshold,\n possible_drift_threshold=possible_drift_threshold,\n inf_capping=inf_capping,\n )\n # Log the artifact and results:\n context.log_artifact(drift_table_plot, tag=artifacts_tag)\n context.log_artifact(metric_per_feature_dict, tag=artifacts_tag)\n context.log_results(results=analysis_results)", "def run_fn(fn_args: TrainerFnArgs):\n\n # Training set size\n TRAIN_SIZE = get_dataset_size(fn_args.train_files)\n NUM_STEPS = TRAIN_SIZE / BATCH_SIZE # number of steps per epoch for which to train model\n \n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n \n train_dataset = _input_fn(fn_args.train_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n eval_dataset = _input_fn(fn_args.eval_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n\n model = _build_keras_model(\n tf_transform_output,\n hidden_units=[HIDDEN_UNITS_1, HIDDEN_UNITS_2, HIDDEN_UNITS_3],\n learning_rate=LEARNING_RATE)\n\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, update_freq='batch')\n \n model.fit(\n train_dataset,\n epochs=NUM_EPOCHS, \n steps_per_epoch=NUM_STEPS,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default':\n _get_serve_tf_examples_fn(model,\n LABEL_COLUMN,\n tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def export(output, model_path, run_id, mlflow_home):\n mlflow.azureml.export(output=output, model_path=model_path, run_id=run_id,\n mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None)", "def run_models(\n weather_fn: str,\n weather_header_row: int,\n start_date: str,\n start_time: str,\n duration: int,\n selected_models: Dict,\n params_grass: Dict,\n params_mk5: Dict,\n params_vesta: Dict,\n params_vesta_fhr: Dict,\n ) -> Dict:\n start = dt.datetime.now()\n weather_df = get_weather(weather_fn, weather_header_row)\n weather_df = trim_weather(weather_df, start_date, start_time, duration)\n \n\n MODELS = {\n # 'GRASS_Cheney_98': ros_grass_cheney(weather_df, grass_state, grass_curing),\n 'GRASS_Cheney_98': ros_grass_cheney(weather_df, params_grass),\n 'FOREST_Mk5': ros_forest_mk5(weather_df, params_mk5),\n 'FOREST_Vesta': ros_forest_vesta(weather_df, params_vesta),\n 'FOREST_Vesta_FHR': ros_forest_vesta_fhr(weather_df, params_vesta_fhr),\n 'FOREST_Vesta_KT': ros_forest_vesta_kt(weather_df, params_vesta),\n }\n\n model_outputs = {} # model name as key, dataframes as val\n\n models_run = 0\n for key, val in selected_models.items():\n if val:\n model_outputs[key] = MODELS[key]\n models_run += 1\n\n time_elapsed = dt.datetime.now()-start\n print(f'{models_run} models run in {time_elapsed}')\n return model_outputs", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def run_eval(self, models, input_file):\n\n input_df = pd.read_csv(input_file)\n num_models, num_samples = len(models), input_df.shape[0]\n matrix = np.empty((num_models, num_samples), dtype=int)\n\n temp_eval_file = tempfile.NamedTemporaryFile().name\n eval_log = tempfile.NamedTemporaryFile().name\n\n for i, moses_model in enumerate(models):\n cmd = ['eval-table', \"-i\", input_file, \"-c\", moses_model.model, \"-o\", temp_eval_file, \"-u\",\n self.target_feature, \"-f\", eval_log]\n self.logger.debug(\"Evaluating model %s\" % moses_model.model)\n process = subprocess.Popen(args=cmd, stdout=subprocess.PIPE)\n\n stdout, stderr = process.communicate()\n\n if process.returncode == 0:\n matrix[i] = np.genfromtxt(temp_eval_file, skip_header=1, dtype=int)\n else:\n self.logger.error(\"The following error raised by eval-table %s\" % stderr.decode(\"utf-8\"))\n raise ChildProcessError(stderr.decode(\"utf-8\"))\n\n return matrix", "def predict():\n\n # Input file\n flask_file = request.files['file']\n if not flask_file:\n return 'Upload a CSV file'\n \n X_train = pd.read_csv(request.files.get('file'))\n results = run_model(X_train)\n return results.to_json(orient='values')", "def test(which, fitted_model_filename):\n click.echo(\"Mode: test.\")\n defaults = get_defaults()\n\n # bootstrap input\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n mode = \"{}_test\".format(which)\n boot_data = bootstrap(new_options, mode=mode)\n\n model = boot_data['model']\n X_test, y_test = boot_data['data']\n defaults = boot_data['defaults']\n\n eval_metrics = get_evaluation_metrics()\n # model = RandomForestClassifier(random_state=defaults.MISC.SEED, class_weight='balanced')\n\n # X_train, y_train = load_data(defaults, which='train')\n # scaler = StandardScaler()\n # numeric_cols = X_train.select_dtypes(include=np.number).columns.tolist()\n # X_train.loc[:, numeric_cols] = scaler.fit_transform(X_train[numeric_cols])\n\n # model.fit(X_train, y_train)\n\n test_results = test_performance(conf=defaults,\n model=model,\n X_test=X_test, y_test=y_test,\n eval_metrics=eval_metrics)\n results = pd.DataFrame(test_results.values(), index=test_results.keys(), columns=[\"test\"])\n\n results_filename = \"{}_results_{}.csv\".format(mode, fitted_model_filename.split(\".\")[0])\n results_path = os.path.join(defaults.OUTPUT.RESULTS_PATH, results_filename)\n results.to_csv(results_path)", "def dump_to_csv(self, function, path, kwargs=None):\n if not kwargs:\n kwargs = {}\n df_to_dump = function(self.df, **kwargs)\n df_to_dump.to_csv(path)", "def predict_model():\n # Decode the request\n data = request.data.decode(\"utf-8\")\n\n # Write data from the request in a local csv file\n test_csv = \"test_local.csv\"\n f = open(test_csv, \"w\", encoding=\"utf-8\")\n f.write(data)\n f.close()\n\n # Load the test csv file as a DataFrame\n test_df = pd.read_csv(test_csv)\n\n # Get submission DataFrame\n predictions_df = model.predict(test_df)\n\n # Send csv file as response\n res = make_response(predictions_df.to_csv(index=False))\n res.headers[\"Content-Disposition\"] = \"attachment; filename=submission.csv\"\n res.headers[\"Content-Type\"] = \"text/csv\"\n return res", "def model_pred(model_name):\n with open('model-'+model_name+'.pickle', 'rb') as clf_file:\n clf_test = pickle.load(clf_file)\n\n df_test = pd.read_csv(\"test.csv\")\n df_test = df_test.fillna(method='ffill')\n\n for column in df_test.columns:\n if df_test[column].dtype == type(object):\n le = preprocessing.LabelEncoder()\n df_test[column] = le.fit_transform(df_test[column])\n\n y_test_output = df_test.values\n y_pred_output = clf_test.predict(y_test_output)\n output = []\n for a,b in zip(y_test_output,y_pred_output):\n output.append([int(a[0]),b])\n\n out_df = pd.DataFrame(output,columns = ['id','P'])\n out_df.to_csv('pred-'+model_name+'.csv',index = False)", "def run_model(training_data_path, test_data_path, results_path, similarity_score, save_model, new_model_running,\n algorithm_path, threshold, features_list, target_features_list, train_scaler_path, target_scaler_path,\n event):\n\n clear_session()\n\n grid_dictionary = get_lstm_grid_params()\n # Choose between new model creation flow and load existing model flow\n if new_model_running:\n window_size, encoding_dimension, activation, loss, optimizer, threshold, epochs = get_lstm_new_model_parameters()\n else:\n lstm = load_model(algorithm_path)\n window_size = lstm.get_input_shape_at(0)[1]\n X_train_scaler = pickle.load(open(train_scaler_path, 'rb'))\n Y_train_scaler = pickle.load(open(target_scaler_path, 'rb'))\n X_train = None\n Y_train = None\n\n FLIGHT_ROUTES = get_subdirectories(test_data_path)\n\n current_time = get_current_time()\n\n current_time_path = os.path.join(*[str(results_path), 'lstm', str(current_time)])\n create_directories(f\"{current_time_path}\")\n\n # Create sub directories for each similarity function\n for similarity in similarity_score:\n similarity_path = os.path.join(*[str(current_time_path), str(similarity)])\n create_directories(f\"{similarity_path}\")\n\n # Train the model for each flight route\n for flight_route in FLIGHT_ROUTES:\n\n # Execute training for new model flow\n if new_model_running:\n lstm, X_train_scaler, Y_train_scaler, X_train, Y_train = execute_train(flight_route,\n training_data_path=training_data_path,\n results_path=f\"{current_time_path}\",\n window_size=window_size,\n encoding_dimension=encoding_dimension,\n activation=activation,\n loss=loss,\n optimizer=optimizer,\n add_plots=True,\n features_list=features_list,\n epochs=epochs,\n target_features_list=target_features_list,\n event=event)\n\n # Get results for each similarity function\n for similarity in similarity_score:\n current_results_path = os.path.join(*[str(current_time_path), str(similarity), str(flight_route)])\n create_directories(f\"{current_results_path}\")\n tpr_scores, fpr_scores, acc_scores, delay_scores, routes_duration, attack_duration, auc_scores, best_params = execute_predict(\n flight_route,\n test_data_path=test_data_path,\n similarity_score=similarity,\n window_size=window_size,\n threshold=threshold,\n lstm=lstm,\n X_train_scaler=X_train_scaler,\n results_path=current_results_path,\n add_plots=True,\n run_new_model=new_model_running,\n X_train=X_train,\n features_list=features_list,\n target_features_list=target_features_list,\n save_model=save_model,\n Y_train_scaler=Y_train_scaler,\n Y_train=Y_train,\n event=event,\n grid_dictionary=grid_dictionary\n )\n\n df = pd.DataFrame(tpr_scores)\n tpr_path = os.path.join(*[str(current_results_path), str(flight_route) + '_tpr.csv'])\n df.to_csv(f\"{tpr_path}\", index=False)\n\n df = pd.DataFrame(fpr_scores)\n fpr_path = os.path.join(*[str(current_results_path), str(flight_route) + '_fpr.csv'])\n df.to_csv(f\"{fpr_path}\", index=False)\n\n df = pd.DataFrame(acc_scores)\n acc_path = os.path.join(*[str(current_results_path), str(flight_route) + '_acc.csv'])\n df.to_csv(f\"{acc_path}\", index=False)\n\n df = pd.DataFrame(delay_scores)\n delay_path = os.path.join(*[str(current_results_path), str(flight_route) + '_delay.csv'])\n df.to_csv(f\"{delay_path}\", index=False)\n\n df = pd.DataFrame(auc_scores)\n auc_path = os.path.join(*[str(current_results_path), str(flight_route) + '_auc.csv'])\n df.to_csv(f\"{auc_path}\", index=False)\n\n df = pd.DataFrame(best_params)\n best_params_path = os.path.join(*[str(current_results_path), str(flight_route) + '_params.csv'])\n df.to_csv(f\"{best_params_path}\", index=False)\n\n algorithm_name = \"LSTM\"\n\n # Report results for training data to csv files\n for similarity in similarity_score:\n report_similarity_path = os.path.join(*[str(results_path), 'lstm', str(current_time), str(similarity)])\n report_results(f\"{report_similarity_path}\",\n test_data_path,\n FLIGHT_ROUTES,\n algorithm_name,\n similarity,\n routes_duration,\n attack_duration)", "def train_and_eval(config):\n print 'model directory = %s' % config.model_output\n\n model = rf(\n n_estimators=config.num_trees, max_depth=config.tree_depth,\n max_leaf_nodes=config.max_nodes)\n\n img, label, feat = get_records(\n os.path.join(config.tfrecord_dir, 'train.tfrecords'), config)\n model.fit(\n x=feat, y=label)\n\n test_img, test_label, test_feat = get_records(\n os.path.join(config.tfrecord_dir, 'val.tfrecords'), config)\n results = model.predict(\n x=test_img, y=test_label)\n return results, model", "def predict_with_lgbm(test_df, row_ids, model_filepath):\n if os.path.isdir(model_filepath):\n click.echo(\"Loading models in directory\" + model_filepath)\n models_in_dir = os.listdir(model_filepath)\n num_models = len(models_in_dir)\n predictions = np.zeros(len(row_ids))\n\n for i, model in enumerate(models_in_dir, start=1):\n with timer(\"Loading model [\" + str(i) + \"/\" + str(num_models) + \"]\"):\n lgbm_model = lgb.Booster(model_file=model_filepath + \"/\" + model)\n\n with timer(\"Predicting values [\" + str(i) + \"/\" + str(num_models) + \"]\"):\n predictions_current = lgbm_model.predict(test_df)\n predictions += np.expm1(predictions_current)\n\n predictions = predictions / num_models\n predictions[predictions < 0] = 0\n return predictions\n\n else:\n with timer(\"Loading model \" + model_filepath):\n lgbm_model = lgb.Booster(model_file=model_filepath)\n\n with timer(\"Predicting values\"):\n predictions = lgbm_model.predict(test_df)\n # Invert log and set possible neg. values to 0\n predictions = np.expm1(predictions)\n predictions[predictions < 0] = 0\n return predictions", "def write_to_csv(self, log_dir, run_dir, hmc=False):\n _, run_str = os.path.split(run_dir)\n avg_data = {\n 'log_dir': log_dir,\n 'run_dir': run_str,\n 'hmc': hmc,\n }\n\n for key, val in dict(sorted(self.data.items())).items():\n tensor = tf.convert_to_tensor(val)\n arr, steps = therm_arr(tensor.numpy(), therm_frac=0.2)\n if 'steps' not in avg_data:\n avg_data['steps'] = len(steps)\n avg_data[key] = np.mean(arr)\n\n # avg_data[key] = tf.reduce_mean(arr)\n\n avg_df = pd.DataFrame(avg_data, index=[0])\n outdir = os.path.join(BASE_DIR, 'logs', 'GaugeModel_logs')\n csv_file = os.path.join(outdir, 'inference.csv')\n head, tail = os.path.split(csv_file)\n io.check_else_make_dir(head)\n io.log(f'Appending inference results to {csv_file}.')\n if not os.path.isfile(csv_file):\n avg_df.to_csv(csv_file, header=True, index=False, mode='w')\n else:\n avg_df.to_csv(csv_file, header=False, index=False, mode='a')", "def load_pyfunc(path, run_id=None, suppress_warnings=False):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf = _load_model_conf(path)\n model_py_version = conf.get(PY_VERSION)\n if not suppress_warnings:\n _warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)\n if CODE in conf and conf[CODE]:\n code_path = os.path.join(path, conf[CODE])\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path\n return importlib.import_module(conf[MAIN]).load_pyfunc(data_path)", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n Y_pred = pd.DataFrame(Y_pred, columns=category_names)\n \n # calculate summary stats on test data\n results = pd.DataFrame()\n for column_name in Y_pred.columns:\n col_report = classification_report(y_true=Y_test[[column_name]], y_pred=Y_pred[[column_name]], output_dict=True)\n accuracy = col_report['accuracy']\n precision = col_report['macro avg']['precision']\n recall = col_report['macro avg']['recall']\n results[column_name] = [accuracy, precision, recall]\n results.index = ['accuracy', 'precision', 'recall']\n results.mean(axis=1) \n \n # save results to local csv file\n model_name = type(model.best_params_['clf']).__name__\n avg_accuracy = results.mean(axis=1)['accuracy']\n avg_precision = results.mean(axis=1)['precision']\n avg_recall = results.mean(axis=1)['recall']\n params = model.best_params_\n stored_results = pd.DataFrame({'Model': [model_name], 'Accuracy': [avg_accuracy], 'Precision': [avg_precision], \n 'Recall': [avg_recall], 'Parameters': [params]})\n\n add_header = not os.path.isfile('models/model_results.csv')\n with open('models/model_results.csv', 'a') as f:\n stored_results.to_csv(f, header=add_header, index=False)", "def run_test(path_to_driver, path_to_browser, path_to_test, path_targets, key_to_infer,\n columns_int, columns_float, infer_url, path_pred):\n \n server, driver = start_server_and_driver(path_to_driver, path_to_browser)\n column_names, test_dict_ord = prepare_data(path_to_test, path_targets, key_to_infer, \n columns_int, columns_float)\n do_inferences(driver, infer_url, path_pred, column_names,\n test_dict_ord)\n stop_server_and_driver(server, driver)", "def eval(self, df):\n ## Check invariant; model inputs must be subset of df columns\n if not set(self.var).issubset(set(df.columns)):\n raise ValueError(\n \"Model function `{}` var not a subset of given columns\".format(\n self.name\n )\n )\n\n ## Set up output\n n_rows = df.shape[0]\n results = zeros((n_rows, len(self.out)))\n\n for ind in range(n_rows):\n results[ind] = self.func(*df.loc[ind, self.var])\n\n ## Package output as DataFrame\n return DataFrame(data=results, columns=self.out)", "def arima_model(self, run_id):\n # Retrieve data for modelling\n measures = self.daily_avg(run_id)\n\n # don't try to compute if there aren't any measures\n if measures is None:\n return pd.DataFrame()\n\n # Take past 7-day average of exogenous predictors to use for\n # future prediction\n exog_future_predictors = pd.concat(\n [measures.iloc[-7:, :].mean(axis=0).to_frame().T]*7,\n ignore_index=True\n )\n\n try:\n # Find optimal order for model\n params = arma_order_select_ic(measures['flow'], ic='aic')\n try:\n # Build and fit model\n mod = ARIMA(measures['flow'],\n order=(params.aic_min_order[0], 0,\n params.aic_min_order[1]),\n exog=measures[['temp', 'precip']]).fit()\n\n prediction = pd.DataFrame(\n [mod.forecast(steps=7,\n exog=exog_future_predictors[\n ['temp', 'precip']],\n alpha=0.05)[0]]).T\n except Exception:\n # If model doesn't converge, return \"prediction\"\n # of most recent day\n prediction = pd.concat([measures.iloc[-1, :].to_frame().T]*7,\n ignore_index=True)['flow']\n except ValueError:\n # If order fitting doesn't converge, return \"prediction\"\n # of most recent day\n prediction = pd.concat([measures.iloc[-1, :].to_frame().T]*7,\n ignore_index=True)['flow']\n\n # Add dates and return past 21 days for plotting\n prediction_dates = [measures.index[-2] +\n datetime.timedelta(days=x) for x in range(0, 7)]\n prediction.index = prediction_dates\n past = measures['flow'][-22:-1]\n prediction = pd.concat([past[:-1], prediction], axis=0)\n\n return prediction", "def predict_with_ctb(test_df, row_ids, model_filepath):\n if os.path.isdir(model_filepath):\n click.echo(\"Loading models in directory\" + model_filepath)\n models_in_dir = os.listdir(model_filepath)\n num_models = len(models_in_dir)\n predictions = np.zeros(len(row_ids))\n\n for i, model in enumerate(models_in_dir, start=1):\n with timer(\"Loading model [\" + str(i) + \"/\" + str(num_models) + \"]\"):\n ctb_model = ctb.CatBoostRegressor()\n ctb_model.load_model(model_filepath + \"/\" + model)\n\n with timer(\"Predicting values [\" + str(i) + \"/\" + str(num_models) + \"]\"):\n predictions_current = ctb_model.predict(test_df)\n predictions += np.expm1(predictions_current)\n\n predictions = predictions / num_models\n predictions[predictions < 0] = 0\n return predictions\n\n else:\n with timer(\"Loading model \" + model_filepath):\n ctb_model = ctb.CatBoostRegressor()\n ctb_model.load_model(model_filepath)\n\n with timer(\"Predicting values\"):\n predictions = ctb_model.predict(test_df)\n # Invert log and set possible neg. values to 0\n predictions = np.expm1(predictions)\n predictions[predictions < 0] = 0\n return predictions", "def __call__(self, predictor_model) -> None:\n self.save_result(self.evaluate(predictor_model))", "def predict_with_lgbm_meter(test_df, row_ids, model_filepath):\n\n with timer(\"Loading models in directory\" + model_filepath):\n models_in_dir = sorted(os.listdir(model_filepath))\n test_by_meter = []\n row_id_by_meter = []\n for i in range(4):\n is_meter = test_df[\"meter\"] == i\n test_temp = test_df[is_meter]\n row_temp = row_ids[is_meter]\n test_by_meter.append(test_temp)\n row_id_by_meter.append(row_temp)\n\n predictions = []\n row_ids_prediction = []\n with timer(\"Predicting values\"):\n for model, test, row in zip(models_in_dir, test_by_meter, row_id_by_meter):\n del test[\"meter\"]\n lgbm_model = lgb.Booster(model_file=model_filepath + \"/\" + model)\n\n predictions_current = lgbm_model.predict(test)\n predictions.extend(list(np.expm1(predictions_current)))\n row_ids_prediction.extend(row)\n\n # Order the predictions by merging them to the original row ids\n pred_df = pd.DataFrame({\"row_id\": row_ids_prediction, \"pred\": predictions})\n row_ids_df = pd.DataFrame({\"true_row_ids\": row_ids})\n pred_ordered_df = row_ids_df.merge(pred_df, left_on=\"true_row_ids\",\n right_on=\"row_id\", how=\"left\")\n predictions = pred_ordered_df[\"pred\"].copy(deep=True)\n predictions[predictions < 0] = 0\n return predictions", "def test_naive_forecaster_model_with_regressor_pyfunc_output(\n naive_forecaster_model_with_regressor, model_path, data_longley\n):\n _, _, _, X_test = data_longley\n\n flavor.save_model(sktime_model=naive_forecaster_model_with_regressor, path=model_path)\n loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path)\n\n X_test_array = convert(X_test, \"pd.DataFrame\", \"np.ndarray\")\n\n model_predict = naive_forecaster_model_with_regressor.predict(fh=FH, X=X_test)\n predict_conf = pd.DataFrame([{\"fh\": FH, \"predict_method\": \"predict\", \"X\": X_test_array}])\n pyfunc_predict = loaded_pyfunc.predict(predict_conf)\n np.testing.assert_array_equal(model_predict, pyfunc_predict)\n\n model_predict_interval = naive_forecaster_model_with_regressor.predict_interval(\n fh=FH, coverage=COVERAGE, X=X_test\n )\n predict_interval_conf = pd.DataFrame(\n [\n {\n \"fh\": FH,\n \"predict_method\": \"predict_interval\",\n \"coverage\": COVERAGE,\n \"X\": X_test_array,\n }\n ]\n )\n pyfunc_predict_interval = loaded_pyfunc.predict(predict_interval_conf)\n np.testing.assert_array_equal(model_predict_interval.values, pyfunc_predict_interval.values)\n\n model_predict_quantiles = naive_forecaster_model_with_regressor.predict_quantiles(\n fh=FH, alpha=ALPHA, X=X_test\n )\n predict_quantiles_conf = pd.DataFrame(\n [\n {\n \"fh\": FH,\n \"predict_method\": \"predict_quantiles\",\n \"alpha\": ALPHA,\n \"X\": X_test_array,\n }\n ]\n )\n pyfunc_predict_quantiles = loaded_pyfunc.predict(predict_quantiles_conf)\n np.testing.assert_array_equal(model_predict_quantiles.values, pyfunc_predict_quantiles.values)\n\n model_predict_var = naive_forecaster_model_with_regressor.predict_var(fh=FH, cov=COV, X=X_test)\n predict_var_conf = pd.DataFrame(\n [\n {\n \"fh\": FH,\n \"predict_method\": \"predict_var\",\n \"cov\": COV,\n \"X\": X_test_array,\n }\n ]\n )\n pyfunc_predict_var = loaded_pyfunc.predict(predict_var_conf)\n np.testing.assert_array_equal(model_predict_var.values, pyfunc_predict_var.values)", "def execute_predict(flight_route,\n test_data_path=None,\n similarity_score=None,\n window_size=None,\n threshold=None,\n lstm=None,\n X_train_scaler=None,\n results_path=None,\n add_plots=True,\n run_new_model=False,\n X_train=None,\n features_list=None,\n target_features_list=None,\n save_model=False,\n Y_train_scaler=None,\n Y_train=None,\n event=None,\n grid_dictionary=None):\n\n tpr_scores = defaultdict(list)\n fpr_scores = defaultdict(list)\n acc_scores = defaultdict(list)\n delay_scores = defaultdict(list)\n routes_duration = defaultdict(list)\n attack_duration = defaultdict(list)\n auc_scores = defaultdict(list)\n best_params = defaultdict(list)\n\n # Set a threshold in new model creation flow\n if run_new_model:\n event.wait()\n threshold = predict_train_set(lstm,\n X_train,\n save_model,\n add_plots,\n threshold,\n features_list,\n target_features_list,\n results_path,\n flight_route,\n similarity_score,\n X_train_scaler,\n Y_train,\n Y_train_scaler)\n\n flight_dir = os.path.join(test_data_path, flight_route)\n ATTACKS = get_subdirectories(flight_dir)\n\n figures_results_path = os.path.join(results_path, \"Figures\")\n create_directories(figures_results_path)\n\n attacks_figures_results_path = os.path.join(figures_results_path, \"Attacks\")\n create_directories(attacks_figures_results_path)\n\n # Iterate over all attacks in order to find anomalies\n for attack in ATTACKS:\n event.wait()\n attack_name = attack\n\n if \"_\" in attack_name:\n attack_name = attack_name.split(\"_\")[0]\n\n current_attack_figures_results_path = os.path.join(attacks_figures_results_path, attack_name)\n create_directories(current_attack_figures_results_path)\n\n attacks_path = os.path.join(*[str(test_data_path), str(flight_route), str(attack)])\n for flight_csv in os.listdir(f\"{attacks_path}\"):\n\n flight_attack_path = os.path.join(*[str(attacks_path), str(flight_csv)])\n df_test_source = pd.read_csv(f\"{flight_attack_path}\")\n df_test_labels = df_test_source[[ATTACK_COLUMN]].values\n\n attack_time = len(df_test_labels)\n\n input_df_test = df_test_source[features_list]\n target_df_test = df_test_source[target_features_list]\n\n # Step 1 : Clean test data set\n input_clean_df_test = clean_data(input_df_test)\n target_clean_df_test = clean_data(target_df_test)\n\n # Step 2: Normalize the data\n X_test = X_train_scaler.transform(input_clean_df_test)\n\n X_test_preprocessed, Y_test_labels_preprocessed = get_testing_data_lstm(X_test, df_test_labels, window_size)\n\n Y_test = Y_train_scaler.transform(target_clean_df_test)\n\n Y_test_preprocessed = get_training_data_lstm(Y_test, window_size)\n\n current_best_params = {}\n if grid_dictionary:\n lstm, current_best_params, X_test_preprocessed, Y_test_preprocessed, Y_test_labels_preprocessed = get_gridSearch_model(\n grid_dictionary,\n X_test,\n Y_test,\n X_train,\n Y_train,\n len(features_list),\n len(target_features_list),\n df_test_labels,\n similarity_score)\n\n X_pred = lstm.predict(X_test_preprocessed, verbose=0)\n assert len(Y_test_preprocessed) == len(X_pred)\n\n scores_test = []\n for i, pred in enumerate(X_pred):\n scores_test.append(anomaly_score_multi(Y_test_preprocessed[i], pred, similarity_score))\n\n # Add reconstruction error scatter if plots indicator is true\n event.wait()\n if add_plots:\n title = f'Outlier Score Testing for {flight_csv} in {flight_route}({attack})'\n plot_reconstruction_error_scatter(scores=scores_test,\n labels=Y_test_labels_preprocessed,\n threshold=threshold,\n plot_dir=current_attack_figures_results_path,\n title=title\n )\n key = get_plots_key(algorithm='LSTM', similarity=similarity_score, flight_route=flight_route)\n plt_path = os.path.join(*[str(current_attack_figures_results_path), str(title) + '.png'])\n InputSettings.set_plots(key, plt_path)\n\n mean_y_actual = multi_mean(Y_test_preprocessed)\n mean_y_pred = multi_mean(X_pred)\n\n assert mean_y_actual.shape == mean_y_pred.shape\n\n for i, target_feature in enumerate(target_features_list):\n title = \"Test performance of LSTM for \" + target_feature + \" feature in \" + flight_csv\n plot_prediction_performance(Y_train=mean_y_actual[:, i],\n X_pred=mean_y_pred[:, i],\n results_path=current_attack_figures_results_path,\n title=title,\n y_label=\"Sensor's Mean Value\")\n\n predictions = [1 if x >= threshold else 0 for x in scores_test]\n\n # Add roc curve if plots indicator is true\n if add_plots:\n pass\n # plot_roc(y_true=df_test_labels,y_pred=predictions, plot_dir=results_path,title=f'ROC Curve - {flight_csv} in {flight_route}({attack})')\n\n attack_start, attack_end = get_attack_boundaries(df_test_source[ATTACK_COLUMN])\n\n method_scores = get_method_scores(predictions, attack_start, attack_end,\n add_window_size=True, window_size=window_size)\n\n auc_key = get_auc_plot_key(algorithm='LSTM', similarity=similarity_score, flight_route=flight_route)\n auc_title = f'Receiver Operating Characteristic for {flight_csv} in {flight_route}({attack})'\n auc_plt_path = os.path.join(*[str(current_attack_figures_results_path), str(auc_title) + '.png'])\n InputSettings.set_plots(auc_key, auc_plt_path)\n auc = calculate_auc(scores_test, Y_test_labels_preprocessed.ravel(), 'LSTM', auc_plt_path, attack)\n\n tpr_scores[attack].append(method_scores[0])\n fpr_scores[attack].append(method_scores[1])\n acc_scores[attack].append(method_scores[2])\n delay_scores[attack].append(method_scores[3])\n routes_duration[attack].append(attack_time)\n attack_duration[attack].append(method_scores[4])\n auc_scores[attack].append(auc)\n best_params[attack].append(current_best_params)\n\n return tpr_scores, fpr_scores, acc_scores, delay_scores, routes_duration, attack_duration, auc_scores, best_params", "def run():\n\n df = read_input() # the parameters\n df = add_time_period(df) # a feature\n df = is_holiday(df) # a feature\n df = scale_continous(df) # continous feature transformation\n df = encode_dummy(df) # categorical feature transformation\n df = order_columns(df) # ordering model inputs\n model = load_model() # the multiple linear regression model\n prediction = int(model.predict(df)) # form a prediction\n return prediction # return the prediction", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)", "def run_model(model_name, clf, X_train, y_train, X_test, y_test, feat, output_dirpath):\n model = clf.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n train_pred = model.predict(X_train)\n model_pred = model.predict(X_test)\n\n # Save predictions\n dirpath = os.path.join(output_dirpath, '{}_{}'.format(model_name, feat), 'predictions')\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n np.savetxt(os.path.join(dirpath, f'{model_name.replace(\"/\", \"_\").replace(\" \", \"_\")}_test_preds.txt'), model_pred)\n np.savetxt(os.path.join(dirpath, f'{model_name.replace(\"/\", \"_\").replace(\" \", \"_\")}_train_preds.txt'), train_pred)\n\n # Save classifier (with weights)\n dirpath = os.path.join(output_dirpath, '{}_{}'.format(model_name, feat), 'models')\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n with open(os.path.join(dirpath, f'{model_name.replace(\"/\", \"_\").replace(\" \", \"_\")}.pkl'), 'wb') as f:\n pickle.dump(model, f)\n\n return model, score, model_pred", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n print(f\"Parameters {fn_args}\")\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n batch_size=fn_args.train_batches)\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n batch_size=fn_args.eval_batches)\n\n # mirrored_strategy = tf.distribute.MirroredStrategy()\n # with mirrored_strategy.scope():\n model = encoder_decoder_model.build_keras_model(\n timesteps=fn_args.timesteps,\n number_features=fn_args.number_features,\n outer_units=fn_args.outer_units,\n inner_units=fn_args.inner_units)\n\n steps_per_epoch = fn_args.training_example_count / fn_args.train_batches\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard()\n\n model.fit(\n train_dataset,\n epochs=int(fn_args.train_steps / steps_per_epoch),\n steps_per_epoch=steps_per_epoch,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default': _get_serve_tf_examples_fn(\n model, tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n\n model.save(\n fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def save_model_output2csv(RunSet='FP-MOYA-Nest', res='0.25x0.3125',\n folder='./'):\n import seaborn as sns\n # Which flights to plot?\n if (RunSet == 'FP-MOYA-Nest') and (res == '0.25x0.3125'):\n # Local settings/variables\n flight_IDs = ['C006', 'C007']\n sdate_d = {\n 'C006': datetime.datetime(2017, 3, 1),\n 'C007': datetime.datetime(2017, 3, 2),\n }\n # Loop by flight and retrieve the files as dataframes\n dfs_mod = {}\n for flight_ID in flight_IDs:\n # Get data\n sdate = sdate_d[flight_ID]\n dfs_mod_GC = get_GEOSChem4flightnum(flight_ID=flight_ID,\n res=res,\n RunSet=RunSet,\n sdate=sdate,\n )\n # Save to csv\n df = dfs_mod_GC[list(dfs_mod_GC.keys())[0]]\n filename_str = 'GC_planeflight_data_{}_{}'\n filename = filename_str.format(RunSet, flight_ID)\n# filename = AC.rm_spaces_and_chars_from_str(filename)\n df.to_csv(os.path.join(folder+filename+'.csv'))\n\n elif (res == '0.25x0.3125') and (RunSet == 'FP-Nest'):\n flight_nums = [\n # 217,\n 218, 219, 220, 221, 222, 223, 224, 225,\n ]\n flight_IDs = ['C{}'.format(i) for i in flight_nums]\n # - Loop by flight and retrieve the files as dataframes (mod + obs)\n # Model\n dfs_mod_GC = {}\n for flight_ID in flight_IDs:\n dfs = get_GEOSChem4flightnum(flight_ID=flight_ID, res=res,\n RunSet=RunSet,)\n df = dfs[RunSet]\n # Add the derived variables to the dataframe\n df = add_deriv_vars2df(df=df)\n# dfs_mod[flight_ID] = df\n # Save to csv\n# df = dfs_mod_GC[ list(dfs_mod_GC.keys())[0] ]\n filename_str = 'GC_planeflight_data_{}_{}'\n filename = filename_str.format(RunSet, flight_ID)\n# filename = AC.rm_spaces_and_chars_from_str(filename)\n df.to_csv(os.path.join(folder+filename+'.csv'))", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def runner(base, model=None):\n\n if model is None:\n model = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"rf_allneg.clf\"\n )\n infile = os.path.join(base, \"mp_feat_norm.txt\")\n X, memo = io.prepare_feat(infile)\n clf = deserialize(model)\n prob = np.array(clf.predict_proba(X))\n pos = np.array([\"Yes\" if x == 1 else \"No\" for x in clf.predict(X)])\n out = np.concatenate((memo, prob, pos.reshape(-1, 1)), axis=1)\n header = [\"ID\", \"NEG\", \"POS\", \"IS_CMPLX\"]\n df = pd.DataFrame(out, columns=header)\n df = df[[\"ID\", \"POS\", \"NEG\", \"IS_CMPLX\"]]\n outfile = os.path.join(base, \"rf.txt\")\n df.to_csv(outfile, sep=\"\\t\", index=False)\n return True", "def score(self, experiment_path: str, result_file: str, **kwargs):\n # Update config parameters with new kwargs\n\n config = torch.load(glob.glob(\n \"{}/run_config*\".format(experiment_path))[0],\n map_location=lambda storage, loc: storage)\n config_parameters = dict(config, **kwargs)\n model = torch.load(glob.glob(\n \"{}/run_model*\".format(experiment_path))[0],\n map_location=lambda storage, loc: storage)\n encoder = torch.load(glob.glob(\n '{}/run_encoder*'.format(experiment_path))[0],\n map_location=lambda storage, loc: storage)\n testlabel = config_parameters['testlabel']\n testdata = config_parameters['testdata']\n # Only a single item to evaluate\n if isinstance(testlabel, list) and len(testlabel) == 1:\n testlabel = testlabel[0]\n if isinstance(testdata, list) and len(testdata) == 1:\n testdata = testdata[0]\n\n labels_df = pd.read_csv(testlabel, sep=' ')\n labels_df['encoded'], encoder = utils.encode_labels(\n labels=labels_df['bintype'], encoder=encoder)\n config_parameters.setdefault('colname', ('filename', 'encoded'))\n dataloader = dataset.getdataloader(\n data_frame=labels_df,\n data_file=testdata,\n num_workers=4,\n batch_size=1, # do not apply any padding\n colname=config_parameters[\n 'colname'] # For other datasets with different key names\n )\n model = model.to(DEVICE).eval()\n genuine_label_idx = encoder.transform(['genuine'])[0]\n\n with torch.no_grad(), open(result_file,\n 'w') as wp, tqdm(total=len(dataloader),\n unit='utts') as pbar:\n datawriter = csv.writer(wp, delimiter=' ')\n datawriter.writerow(['filename', 'score'])\n for batch in dataloader:\n inputs, _, filenames = batch\n inputs = inputs.float().to(DEVICE)\n preds = model(inputs)\n for pred, filename in zip(preds, filenames):\n # Single batchsize\n datawriter.writerow([filename, pred[0].item()])\n pbar.update()\n print(\"Score file can be found at {}\".format(result_file))", "def evaluate(input_path, model_path, metrics_path):\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Loading input dataset\")\n dataset = pd.read_csv(input_path)\n\n X_eval = dataset.drop(\"Survived\", axis=1)\n y_eval = dataset[\"Survived\"]\n\n logger.info(\"Loading model\")\n model = joblib.load(model_path)\n\n logger.info(\"Calculating metrics\")\n scorer = metrics.make_scorer(metrics.mean_squared_error)\n cv_results = cross_validate(model, X=X_eval, y=y_eval, scoring=scorer, cv=5)\n\n metric_values = {\"mse\": cv_results[\"test_score\"].mean()}\n\n logger.info(f\"Writing output to {metrics_path}\")\n with open(metrics_path, \"w\") as file_:\n json.dump(metric_values, file_)", "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)", "def main(modelfile, features, print_results=True):\n model = utils.get_model(modelfile)\n if not model:\n return []\n x = numpy.array([features])\n model_output = get_model_output(model, x)\n results = get_results(model_output, model['outputs'])\n\n if print_results:\n show_results(results, n=10)\n return results", "def save_results(ui_dict, result_dict, result_df=None, save_model_df=False):\r\n # Write user inputs dictionary as a dataframe\r\n ui_dict_df = pd.DataFrame.from_dict(ui_dict,\r\n orient='index')\r\n ui_dict_df.rename(columns={0:'Values'}, inplace=True)\r\n \r\n # Write model outputs dictionary as a dataframe\r\n result_dict_df = pd.DataFrame.from_dict(result_dict,\r\n orient='index')\r\n result_dict_df.rename(columns={0:'Values'}, inplace=True)\r\n \r\n # Write header and separator rows for the dataframe\r\n head_df = pd.DataFrame.from_dict({\"Model Outputs\":\"---------\"},\r\n orient='index')\r\n sep_df = pd.DataFrame.from_dict({\"User Inputs\":\"---------\"},\r\n orient='index')\r\n \r\n # Concatenate dataframes together and write as a CSV\r\n result_dict_df = pd.concat([head_df, result_dict_df, sep_df, ui_dict_df])\r\n result_dict_df.to_csv(out.out_loc(\"result_dict.csv\", ui_dict[\"run_name\"])) \r\n \r\n if result_df is not None:\r\n # Save the result dataframe for future reference\r\n result_df.to_csv(out.out_loc(\"result_df.csv\", ui_dict[\"run_name\"]))\r\n \r\n if save_model_df is True:\r\n model_df = result_df[['Usage (kWh)', 'Reported NYSEG Price ($/kWh)', \r\n 'LBMP ($/MWHr)', 'Marginal Cost Losses ($/MWHr)',\r\n 'Marginal Cost Congestion ($/MWHr)']]\r\n model_df.to_csv(out.out_loc(\"model_df.csv\", ui_dict[\"run_name\"]))", "def run_model(model):\n\n model.create_initialised_input()\n\n model.run_from_buffer()\n\n output = model.output_parse()\n return output", "def apply(self):\n\n sc = SparkContext(appName=\"Model Applier\")\n sqlContext = SQLContext(sc)\n\n # Add model and supporting files to SparkContext\n for item in self.model_location_dict.items():\n ModelApplier.add_files_to_context(item[1], sc)\n\n partition_processor = self.get_partition_processor()\n infile = sc.textFile(self.input_location)\n header_line = infile.first()\n infile = infile.filter(lambda x: x != header_line)\n\n result = infile.mapPartitions(partition_processor).flatMap(lambda x: x)\n print('result.class', result.__class__)\n\n result = result.map(lambda (x, a, y, segment, model_version):\n (int(x), float(a), float(y), segment, model_version))\n sqlContext.createDataFrame(result).saveAsParquetFile(self.output_location)", "def eval_input_fn(df):\n fts = df.drop(columns=['class'])\n labs = df.filter(items=['class']).values.astype(int)\n\n features = {k:list(v.values) for k,v in fts.items()}\n features = dict(features)\n x = fts.values\n x = np.array([[x]]).reshape((np.shape(x)[0], np.shape(x)[1], 1, 1))\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices({\"x_ph\":x,\"y_ph\":convert_to_one_hot(labs)})\n \n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).batch(np.shape(x)[0]).repeat()\n # Return the read end of the pipeline.\n return dataset.make_one_shot_iterator().get_next()", "def train_and_eval():\n # train_file_name = 'adult.data'\n # test_file_name = 'adult.test'\n train_file_name = 'poker-hand-testing.data'\n test_file_name = 'poker-hand-training-true.data'\n #test_file_name = maybe_download()\n df_train = pd.read_csv(\n tf.gfile.Open(\"/opt/tensor/race_result_clean.csv\"),\n names=COLUMNS,\n skipinitialspace=True,\n skiprows=1)\n df_test = pd.read_csv(\n tf.gfile.Open(\"/opt/tensor/race_result_clean.csv\"),\n names=COLUMNS,\n skipinitialspace=True,\n skiprows=1)\n\n #df_train[LABEL_COLUMN] = (df_train[\"CLASS_Poker_Hand\"].apply(lambda x: x>5)).astype(int)\n #df_test[LABEL_COLUMN] = (df_test[\"CLASS_Poker_Hand\"].apply(lambda x: x>5)).astype(int)\n\n model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir\n print(\"model directory = %s\" % model_dir)\n m = build_estimator(model_dir)\n print(m)\n m.fit(input_fn=lambda: input_fn(df_train), steps=FLAGS.train_steps)\n results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)\n for key in sorted(results):\n print(\"%s: %s\" % (key, results[key]))", "def test_model(det_model, path='data/test'):\n result = []\n i = 0\n tests = sorted(os.listdir(path))\n for tst_file in tests:\n print(tst_file)\n test_data = pd.read_csv(os.path.join(path, tst_file), index_col=0)\n res = det_model.test(test_data)\n result.append((i, res,))\n i += 1\n\n with open('output/{0}.csv'.format(det_model), 'w+') as output:\n writer = csv.writer(output, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerows(result)", "def model_fn(model_dir):\n model_path = Path(model_dir)/\"model.joblib\"\n clf = joblib.load(model_path)\n return clf", "def evaluate(self, model_id: str, dataset_id: str, filters: entities.Filters = None, service_config=None):\n\n payload = {'input': {'datasetId': dataset_id}}\n if service_config is not None:\n payload['config'] = {'serviceConfig': service_config}\n if filters is None:\n filters = entities.Filters()\n if filters is not None:\n payload['input']['datasetQuery'] = filters.prepare()\n success, response = self._client_api.gen_request(req_type=\"post\",\n path=f\"/ml/models/{model_id}/evaluate\",\n json_req=payload)\n if not success:\n raise exceptions.PlatformException(response)\n return entities.Execution.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project)", "def create_model_csv(self):\n\n self.model_df.to_csv(self.model_output_file)", "def read_results(\n self,\n model_run_names: list,\n model_names: list,\n output_names: list,\n timesteps: list = None,\n decisions: list = None,\n time_decision_tuples: list = None,\n ):\n\n self.validate_names(model_run_names, model_names, output_names)\n\n results_dict = self._store.get_results(\n model_run_names,\n model_names[0],\n output_names,\n timesteps,\n decisions,\n time_decision_tuples,\n )\n\n # Keep tabs on the units for each output\n for model_run_name in model_run_names:\n for output_name in output_names:\n res = results_dict[model_run_name][output_name]\n self._output_units[res.name] = res.unit\n\n # For each output, concatenate all requested model runs into a single data frame\n formatted_frames = []\n for output_name in output_names:\n # Get each DataArray as a pandas data frame and concatenate, resetting the index to\n # give back a flat data array\n list_of_df = [results_dict[x][output_name].as_df() for x in model_run_names]\n names_of_df = [x for x in results_dict.keys()]\n\n formatted_frames.append(\n pd.concat(\n list_of_df, keys=names_of_df, names=[\"model_run\"]\n ).reset_index()\n )\n\n # Append the other output columns to the first data frame\n formatted_frame = formatted_frames.pop(0)\n output_names.pop(0)\n\n for other_frame, output_name in zip(formatted_frames, output_names):\n assert (formatted_frame[\"model_run\"] == other_frame[\"model_run\"]).all()\n assert (\n formatted_frame[\"timestep_decision\"] == other_frame[\"timestep_decision\"]\n ).all()\n formatted_frame[output_name] = other_frame[output_name]\n\n # Unpack the timestep_decision tuples into individual columns and drop the combined\n formatted_frame[[\"timestep\", \"decision\"]] = pd.DataFrame(\n formatted_frame[\"timestep_decision\"].tolist(), index=formatted_frame.index\n )\n\n formatted_frame = formatted_frame.drop(columns=[\"timestep_decision\"])\n\n # Now reorder the columns. Want model_run then timestep then decision\n cols = formatted_frame.columns.tolist()\n\n assert cols[0] == \"model_run\"\n cols.insert(1, cols.pop(cols.index(\"timestep\")))\n cols.insert(2, cols.pop(cols.index(\"decision\")))\n assert cols[0:3] == [\"model_run\", \"timestep\", \"decision\"]\n\n return formatted_frame[cols]", "def execute(train_ts, train_inputs, test_inputs, op_exec):\n if op_exec == \"mlp\":\n model = mod.mlp_model()\n model.fit(train_inputs, train_ts)\n forecast = model.forecast(test_inputs)\n return forecast\n elif op_exec == \"rf\":\n model = mod.rf_model()\n model.fit(train_inputs, train_ts)\n forecast = model.forecast(test_inputs)\n return forecast", "def run_flow(flow_id):\n if flow_id == 1:\n etl.load_images_from_urls()\n elif flow_id == 2:\n etl.find_edges_and_save()\n elif flow_id == 3:\n etl.normalize_dataset()\n elif flow_id == 4:\n classifiers.run_models_comparison()", "def evaluate_df(self, df):\n ## Check invariant; model inputs must be subset of df columns\n var_diff = set(self.var).difference(set(df.columns))\n if len(var_diff) != 0:\n raise ValueError(\n \"Model inputs not a subset of given columns;\\n\"\n + \"missing var = {}\".format(var_diff)\n )\n\n df_tmp = df.copy().drop(self.out, axis=1, errors=\"ignore\")\n ## Evaluate each function\n for func in self.functions:\n ## Concatenate to make intermediate results available\n df_tmp = concat((df_tmp, func.eval(df_tmp)), axis=1)\n\n return df_tmp[self.out]", "def run(self, dataset_path):\n features = self._generate_features(self._feature_extractors)\n features.to_csv(dataset_path)", "def test_functional(self, model_fn):\n if h5py is None:\n self.skipTest('h5py required to run this test')\n\n def _make_model():\n inputs = (keras.Input(shape=(4,), name='examples'),\n keras.Input(shape=(4,), name='neighbors'))\n base_model = model_fn(inputs[0].shape.as_list()[-1], 2)\n outputs = keras.layers.add([base_model(inputs[0]), base_model(inputs[1])])\n return keras.Model(inputs=inputs, outputs=outputs)\n\n with self.cached_session():\n x = (np.random.normal(size=(16, 4)).astype(np.float32),\n np.random.normal(size=(16, 4)).astype(np.float32))\n model = _make_model()\n predictions = model(x)\n # Save and reload.\n model_path = os.path.join(self.get_temp_dir(), 'model.h5')\n model.save(model_path)\n del model\n loaded_model = keras.models.load_model(\n model_path,\n custom_objects={\n '_make_subclassed': _make_subclassed,\n '_make_subclassed_built': _make_subclassed_built,\n },\n compile=False)\n self.assertAllClose(loaded_model(x), predictions, 1e-9)", "def load_df_from_job_pipeline(self,\n model_id: str,\n geolevel: Optional[str] = None,\n response_format: str = 'csv',\n portfolio_id: Optional[str] = None,\n market_id: Optional[str] = None,\n sites: Optional[List[Tuple[float, float, str]]] = None,\n buffers: Optional[List[str]] = None,\n geoid_list: Optional[List[int]] = None) -> pandas.DataFrame:\n self.create_job(\n model_id=model_id,\n geolevel=geolevel,\n response_format=response_format,\n portfolio_id=portfolio_id,\n market_id=market_id,\n sites=sites,\n buffers=buffers,\n geoid_list=geoid_list)\n\n for idx in range(100):\n if self._logging:\n print('Checking if job is complete...')\n\n job_status = self.status\n if job_status == 'Completed':\n return self.download_job_to_dataframe()\n elif job_status == 'Processing':\n time.sleep(10)\n else:\n raise APIQueryFailedException('Job failed', job_status)\n\n raise APIQueryFailedException('Job never completed successfully')", "def test_auto_arima_model_pyfunc_output(auto_arima_model, model_path, serialization_format):\n flavor.save_model(\n sktime_model=auto_arima_model,\n path=model_path,\n serialization_format=serialization_format,\n )\n loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path)\n\n model_predict = auto_arima_model.predict(fh=FH)\n predict_conf = pd.DataFrame([{\"fh\": FH, \"predict_method\": \"predict\"}])\n pyfunc_predict = loaded_pyfunc.predict(predict_conf)\n np.testing.assert_array_equal(model_predict, pyfunc_predict)\n\n model_predict_interval = auto_arima_model.predict_interval(fh=FH, coverage=COVERAGE)\n predict_interval_conf = pd.DataFrame(\n [\n {\n \"fh\": FH,\n \"predict_method\": \"predict_interval\",\n \"coverage\": COVERAGE,\n }\n ]\n )\n pyfunc_predict_interval = loaded_pyfunc.predict(predict_interval_conf)\n np.testing.assert_array_equal(model_predict_interval.values, pyfunc_predict_interval.values)\n\n model_predict_quantiles = auto_arima_model.predict_quantiles(fh=FH, alpha=ALPHA)\n predict_quantiles_conf = pd.DataFrame(\n [\n {\n \"fh\": FH,\n \"predict_method\": \"predict_quantiles\",\n \"alpha\": ALPHA,\n }\n ]\n )\n pyfunc_predict_quantiles = loaded_pyfunc.predict(predict_quantiles_conf)\n np.testing.assert_array_equal(model_predict_quantiles.values, pyfunc_predict_quantiles.values)\n\n model_predict_var = auto_arima_model.predict_var(fh=FH, cov=COV)\n predict_var_conf = pd.DataFrame([{\"fh\": FH, \"predict_method\": \"predict_var\", \"cov\": COV}])\n pyfunc_predict_var = loaded_pyfunc.predict(predict_var_conf)\n np.testing.assert_array_equal(model_predict_var.values, pyfunc_predict_var.values)", "def perform_trace_modelling(traces, model_func, model_args):\n results = mp.Manager().dict()\n procs = []\n cores, traces_per_core = get_cores_and_traces_per_core(len(traces))\n for core_num in range(cores):\n core_traces = get_traces_for_core(traces, traces_per_core, core_num)\n procs.append(mp.Process(target=model_func,\n args=(core_traces, results, *model_args)))\n initialize_and_join_processes(procs)\n return dict(results)", "def main(database_filepath,model_filepath):\n X_train, X_test, y_train, y_test = load_data(database_filepath)\n \n print(X_train.shape,y_train.shape)\n \n print('Building model...')\n model = build_pipeline()\n \n print('Training model...')\n model.fit(X_train, y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, y_test)\n\n print('Saving model...')\n save_model(model, model_filepath)\n\n print('Trained model saved!')", "def all(config_file):\n with open(config_file) as f:\n config = json.load(f)\n scenes = get_realsense_scenes(config['realsense_dir'])\n all_dfs = []\n for scene in scenes:\n scene_data = get_data_from_scene(scene)\n logger.info(\"Evaluating - %s\", scene['scene_name'])\n df = run_test_on_scene(scene_data, config)\n all_dfs.append(df)\n\n df = pd.concat(all_dfs, axis=0)\n df = df.reset_index()\n print(df)\n df.to_csv(config['save_csv'])", "def execute_train(flight_route,\n training_data_path=None,\n results_path=None,\n window_size=None,\n encoding_dimension=None,\n activation=None,\n loss=None,\n optimizer=None,\n add_plots=True,\n features_list=None,\n epochs=10,\n target_features_list=None,\n event=None):\n\n without_anomaly_path = os.path.join(*[str(training_data_path), str(flight_route), 'without_anom.csv'])\n df_train = pd.read_csv(f\"{without_anomaly_path}\")\n\n input_df_train = df_train[features_list]\n target_df_train = df_train[target_features_list]\n\n # Step 1 : Clean train data set\n input_df_train = clean_data(input_df_train)\n\n target_df_train = clean_data(target_df_train)\n\n # Step 2: Normalize the data\n X_train, X_train_scaler = normalize_data(data=input_df_train,\n scaler=\"min_max\")\n X_train_preprocessed = get_training_data_lstm(X_train, window_size)\n\n Y_train, Y_train_scaler = normalize_data(data=target_df_train, # target data\n scaler=\"min_max\")\n Y_train_preprocessed = get_training_data_lstm(Y_train, window_size)\n\n # Get the model which is created by user's parameters\n lstm = get_lstm_autoencoder_model(timesteps=window_size,\n input_features=input_df_train.shape[1],\n target_features=target_df_train.shape[1],\n encoding_dimension=encoding_dimension,\n activation=activation,\n loss=loss,\n optimizer=optimizer)\n\n event.wait()\n\n history = lstm.fit(X_train_preprocessed, Y_train_preprocessed, epochs=epochs, verbose=0).history\n\n # Add plots if the indicator is true\n if add_plots:\n plot(history['loss'], ylabel='loss', xlabel='epoch', title=f'{flight_route} Epoch Loss', plot_dir=results_path)\n\n return lstm, X_train_scaler, Y_train_scaler, X_train_preprocessed, Y_train_preprocessed", "def apply_rf_model(vds, rf_pipeline, rf_features, label='va.label', va_root='va.rf', globals_root='global.rf'):\n\n logger.info(\"Applying RF model to VDS\")\n\n df = vds_to_rf_df(vds, rf_features, label=label)\n\n feature_importance = get_features_importance(rf_pipeline)\n\n transformed = rf_pipeline.transform(df)\n\n logger.info(\"Annotating dataset with results\")\n\n # Required for RDD.toDF() !\n spark = SparkSession(vds.hc.sc)\n\n kt = KeyTable.from_dataframe(\n transformed.rdd.map(\n lambda row:\n Row(variant=row['variant'],\n probability=row[\"probability\"].toArray().tolist(),\n prediction=row[\"predictedLabel\"])\n ).toDF()\n ).persist()\n\n probability_to_dict_expr = 'probability = index([{%s}], label).mapValues(x => x.prob)' % \"},{\".join(\n ['label: \"%s\", prob: probability[%d]' % (l, i) for (i, l) in enumerate(get_labels(rf_pipeline))])\n\n kt = kt.annotate(['variant = Variant(variant)',\n probability_to_dict_expr]).key_by('variant')\n\n vds = vds.annotate_variants_table(kt,\n expr=\"%s.prediction = table.prediction, %s.probability = table.probability\" % (\n va_root, va_root))\n vds = vds.annotate_global(globals_root, feature_importance, TDict(TString(), TDouble()))\n\n return vds", "def main(input_filepath, model_filepath, output_filepath, config_file):\n logger = logging.getLogger(__name__)\n logger.info('Loading training set, test set and model and predicting.')\n\n # Parse config file\n config = parse_config(config_file)\n\n # Load data\n X_train = pd.read_csv(input_filepath + '/X_train.csv')\n y_train = pd.read_csv(input_filepath + '/y_train.csv').values.ravel()\n\n X_test = pd.read_csv(input_filepath + '/X_test.csv')\n y_test = pd.read_csv(input_filepath + '/y_test.csv').values.ravel()\n\n # Load model\n model = Model.load(model_filepath + config['predicting']['model_name'])\n\n # Make predictions\n train_pred = model.predict(X_train)\n test_pred = model.predict(X_test)\n\n # Evaluate model\n train_score = np.sqrt(mean_squared_error(y_train, train_pred))\n test_score = np.sqrt(mean_squared_error(y_test, test_pred))\n\n # Plot predictions\n scores = (\n (r'$RMSE={:,.0f}$' + ' EUR').format(train_score),\n (r'$RMSE={:,.0f}$' + ' EUR').format(test_score),\n )\n pred_plots = plot_predictions(scores, train_pred, test_pred, y_train,\n y_test)\n pred_plots.savefig(output_filepath + '/pred_plots.png')", "def train_model_for_prediction(path_to_csv, path_to_json_dir, company, department, classifier_id='random_forest',\n needs_type='manual', ratio_cleaner_val=None, random_state=None, remove_ratios=False,\n remove_needs=False):\n (prepared_train_features, prepared_train_targets,\n prepared_test_features, prepared_test_targets,\n interpretation_keys) = prepare_features_targets(path_to_csv, path_to_json_dir, company, department,\n needs_type=needs_type, ratio_cleaner_val=ratio_cleaner_val,\n random_state=random_state, remove_ratios=remove_ratios,\n remove_needs=remove_needs)\n\n if classifier_id == 'random_forest':\n classifier = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=1)\n elif classifier_id == 'k_neighbors':\n classifier = KNeighborsClassifier()\n elif classifier_id == 'radius_neighbors':\n classifier = RadiusNeighborsClassifier()\n else:\n raise ValueError('Invalid classifier_id specified:', classifier_id + '.',\n 'Must be of type \\'random_forest\\', \\'k_neighbors\\', or \\'radius_neighbors\\'.')\n\n classifier.fit(prepared_train_features, prepared_train_targets)\n\n # run the test data through it to gauge effectiveness.\n test_predictions = classifier.predict(prepared_test_features)\n print_model_analysis(test_predictions, prepared_test_targets)\n\n return classifier, interpretation_keys", "def to_mlflow(\n self,\n tracking_uri: Optional[str] = None,\n experiment_id: Optional[int] = None,\n run_name: str = \"log_biometext_model\",\n input_example: Optional[Dict] = None,\n conda_env: Optional[Dict] = None,\n ) -> str:\n if tracking_uri:\n mlflow.set_tracking_uri(tracking_uri)\n\n # This conda environment is only needed when serving the model later on with `mlflow models serve`\n conda_env = conda_env or {\n \"name\": \"mlflow-dev\",\n \"channels\": [\"defaults\", \"conda-forge\"],\n \"dependencies\": [\n \"python=3.7.9\",\n \"pip>=20.3.0\",\n {\"pip\": [\"mlflow\", f\"biome-text=={__version__}\"]},\n ],\n }\n\n with tempfile.TemporaryDirectory() as tmpdir_name:\n file_path = Path(self.save(directory=tmpdir_name))\n\n with mlflow.start_run(\n experiment_id=experiment_id, run_name=run_name\n ) as run:\n mlflow.log_artifact(str(file_path), \"biometext_pipeline\")\n mlflow.pyfunc.log_model(\n artifact_path=\"mlflow_model\",\n python_model=BiomeTextModel(),\n artifacts={\n BiomeTextModel.ARTIFACT_CONTEXT: mlflow.get_artifact_uri(\n f\"biometext_pipeline/{file_path.name}\"\n )\n },\n input_example=input_example,\n conda_env=conda_env,\n )\n model_uri = os.path.join(run.info.artifact_uri, \"mlflow_model\")\n\n return model_uri", "def test_model(predictions: np.array, configs: dict, folder_path: str, test_data_index: pd.Index,\n y_test: np.array,\n study_period_data: pd.DataFrame, parent_model_type: str = 'deep_learning', model_type: str = None,\n history=None, index_id='',\n index_name='', study_period_length: int = 0, model=None, period_range: tuple = (0, 0),\n start_date: datetime.date = datetime.date.today(), end_date: datetime.date = datetime.date.today(),\n get_val_score_only=False, weighting_criterion=None, plotting=False, market_logs=False, **kwargs):\n\n if get_val_score_only:\n # In case classifier is part of MixedEnsemble as is being validated\n y_test = y_test[kwargs['model_index']]\n test_data_index = test_data_index[kwargs['model_index']]\n print(f'\\nGetting validation score for {Style.BRIGHT}{Fore.BLUE}{model_type}{Style.RESET_ALL} ...')\n else:\n print(f'\\nTesting {Style.BRIGHT}{Fore.BLUE}{model_type}{Style.RESET_ALL} model on unseen data ...')\n\n # print(f'{Style.BRIGHT}{Fore.MAGENTA}Length of test data: {len(y_test)}{Style.RESET_ALL}')\n\n study_period_data = study_period_data.copy()\n y_test = y_test.copy()\n predictions = predictions.copy()\n\n timer = Timer().start()\n # JOB: Create data frame with true and predicted values\n if isinstance(test_data_index, pd.MultiIndex):\n test_set_comparison = pd.DataFrame({'y_test': y_test.astype('int8').flatten(), 'prediction': predictions},\n index=test_data_index)\n\n else:\n test_set_comparison = pd.DataFrame({'y_test': y_test.astype('int8').flatten(), 'prediction': predictions},\n index=pd.MultiIndex.from_tuples(test_data_index,\n names=['datadate', 'stock_id']))\n\n # JOB: Transform index of study period data to match test_set_comparison index\n study_period_data.index = study_period_data.index.tolist() # Flatten MultiIndex to tuples\n study_period_data.index.name = 'stock_id' # Rename index\n study_period_data.set_index('datadate', append=True, inplace=True)\n\n # JOB: Merge test set with study period data\n test_set_comparison = test_set_comparison.merge(study_period_data, how='left', left_index=True,\n right_on=['datadate', 'stock_id'])\n\n del study_period_data\n\n # JOB: Create normalized predictions (e.g., directional prediction relative to cross-sectional median of predictions)\n test_set_comparison.loc[:, 'norm_prediction'] = test_set_comparison.loc[:, 'prediction'].gt(\n test_set_comparison.groupby('datadate')['prediction'].transform('median')).astype(np.int16)\n\n # JOB: Create cross-sectional ranking\n test_set_comparison.loc[:, 'prediction_rank'] = test_set_comparison.groupby('datadate')['prediction'].rank(\n method='first', ascending=False).astype('int16')\n test_set_comparison.loc[:, 'prediction_percentile'] = test_set_comparison.groupby('datadate')['prediction'].rank(\n pct=True)\n\n test_data_start_date = test_set_comparison.index.get_level_values('datadate').min().date()\n test_data_end_date = test_set_comparison.index.get_level_values('datadate').max().date()\n test_set_n_days = test_set_comparison.index.get_level_values('datadate').unique().size\n test_set_n_constituents = test_set_comparison.index.get_level_values('stock_id').unique().size\n\n cross_section_size = int(round(test_set_comparison.groupby('datadate')['y_test'].count().mean()))\n print(f'Average size of cross sections: {int(cross_section_size)}')\n\n # Define top k values\n top_k_list = [5, 10]\n\n if cross_section_size > 30:\n top_k_list.extend([50, 100, 150, 200, 250])\n\n # JOB: Create empty dataframe for recording top-k accuracies\n top_k_metrics = pd.DataFrame()\n top_k_metrics.index.name = 'k'\n\n t_costs = 0.0005 # Set transaction costs per half-turn\n\n top_10_excess_return_series = None\n top_10_error_series = None\n market_return_series = None\n market_cum_returns = None\n market_metrics = None\n\n if not get_val_score_only:\n market_metrics, market_return_series, market_cum_returns = get_market_metrics(test_set_comparison,\n t_costs=t_costs,\n index_id=index_id,\n index_name=index_name,\n test_data_start_date=test_data_start_date,\n test_data_end_date=test_data_end_date,\n market_logs=market_logs)\n\n for top_k in top_k_list:\n # JOB: Filter test data by top/bottom k affiliation\n long_positions = test_set_comparison[test_set_comparison['prediction_rank'] <= top_k]\n short_positions = test_set_comparison[\n test_set_comparison['prediction_rank'] > test_set_comparison['cs_length'] - top_k]\n short_positions.loc[:, 'daily_return'] = - short_positions.loc[:, 'daily_return']\n\n full_portfolio = pd.concat([long_positions, short_positions], axis=0)\n\n if not get_val_score_only:\n if top_k == 5:\n # Get series of daily portfolio returns\n top_10_excess_return_series = calc_excess_returns(\n full_portfolio.groupby(level=['datadate'])['daily_return'].mean()).rename('daily_excess_return')\n top_10_excess_return_series = top_10_excess_return_series.reset_index()\n top_10_excess_return_series.loc[:, 'datadate'] = top_10_excess_return_series['datadate'].dt.strftime(\n '%Y-%m-%d')\n top_10_excess_return_series.set_index('datadate', inplace=True)\n\n sorted_portfolio = full_portfolio.set_index('prediction_rank', append=True, inplace=False)\n sorted_portfolio.reset_index(['stock_id'], inplace=True)\n sorted_portfolio.sort_index(level=['datadate', 'prediction_rank'], inplace=True)\n sorted_portfolio.reset_index(level='datadate', inplace=True, drop=True)\n top_10_error_series = (sorted_portfolio['norm_prediction'] - sorted_portfolio['y_test']).abs()\n top_10_error_series = top_10_error_series.values.tolist()\n\n cumulative_return = (top_10_excess_return_series.get('daily_excess_return') + 1).cumprod().rename(\n 'Cumulative Portfolio Return')\n cumulative_return.index.name = 'Time'\n\n if plotting:\n # Merge market and portfolio returns\n merged = pd.concat([cumulative_return, market_cum_returns], axis=1, join='outer')\n merged.plot()\n plt.legend(loc='best')\n plt.title(label=model_type)\n plt.show()\n\n annualized_sharpe = calc_sharpe(full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),\n annualize=True)\n annualized_sharpe_atc = calc_sharpe(\n full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,\n annualize=True)\n annualized_sortino = calc_sortino(full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),\n annualize=True)\n annualized_sortino_atc = calc_sortino(\n full_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,\n annualize=True)\n\n accuracy = None\n\n # JOB: Calculate accuracy score over all trades\n if parent_model_type == 'deep_learning':\n accuracy = binary_accuracy(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values).numpy()\n\n elif parent_model_type == 'tree_based':\n accuracy = accuracy_score(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values)\n\n elif parent_model_type == 'mixed':\n accuracy = accuracy_score(full_portfolio['y_test'].values,\n full_portfolio['norm_prediction'].values)\n\n mean_daily_return = full_portfolio.groupby(level=['datadate'])['daily_return'].mean().mean()\n\n mean_daily_excess_return = calc_excess_returns(\n full_portfolio.groupby(level=['datadate'])['daily_return'].mean().rename('daily_return')).mean()\n\n mean_daily_short = short_positions.groupby(level=['datadate'])['daily_return'].mean().mean()\n mean_daily_long = long_positions.groupby(level=['datadate'])['daily_return'].mean().mean()\n\n top_k_metrics.loc[top_k, 'Accuracy'] = accuracy\n top_k_metrics.loc[top_k, 'Mean Daily Return'] = mean_daily_return\n top_k_metrics.loc[top_k, 'Annualized Return'] = annualize_metric(mean_daily_return)\n top_k_metrics.loc[top_k, 'Mean Daily Excess Return'] = mean_daily_excess_return\n top_k_metrics.loc[top_k, 'Annualized Excess Return'] = annualize_metric(mean_daily_excess_return)\n top_k_metrics.loc[top_k, 'Annualized Sharpe'] = annualized_sharpe\n top_k_metrics.loc[top_k, 'Annualized Sortino'] = annualized_sortino\n top_k_metrics.loc[top_k, 'Mean Daily Return (Short)'] = mean_daily_short\n top_k_metrics.loc[top_k, 'Mean Daily Return (Long)'] = mean_daily_long\n\n # JOB: Add metrics incl. transaction costs of 5 bps per half-turn\n top_k_metrics.loc[top_k, 'Mean Daily Return_atc'] = mean_daily_return - 4 * t_costs\n top_k_metrics.loc[top_k, 'Annualized Return_atc'] = annualize_metric(mean_daily_return - 4 * t_costs)\n top_k_metrics.loc[top_k, 'Mean Daily Excess Return_atc'] = mean_daily_excess_return - 4 * t_costs\n top_k_metrics.loc[top_k, 'Annualized Excess Return_atc'] = annualize_metric(\n mean_daily_excess_return - 4 * t_costs)\n top_k_metrics.loc[top_k, 'Annualized Sharpe_atc'] = annualized_sharpe_atc\n top_k_metrics.loc[top_k, 'Annualized Sortino_atc'] = annualized_sortino_atc\n top_k_metrics.loc[top_k, 'Mean Daily Return (Short)_atc'] = mean_daily_short - 2 * t_costs\n top_k_metrics.loc[top_k, 'Mean Daily Return (Long)_atc'] = mean_daily_long - 2 * t_costs\n\n if get_val_score_only:\n print(f'{weighting_criterion} score: {round(top_k_metrics.loc[5, weighting_criterion], 4)}')\n return top_k_metrics.loc[5, weighting_criterion]\n\n top_k_metrics = pd.concat([top_k_metrics, market_metrics.to_frame().T], join='outer', verify_integrity=True)\n top_k_metrics.fillna('-', inplace=True)\n\n # JOB: Display top-k metrics\n pretty_print_table(top_k_metrics)\n\n # JOB: Plot accuracies and save figure to file\n if plotting:\n for col in top_k_metrics.columns:\n top_k_metrics[col].plot(kind='line', legend=True, fontsize=14)\n plt.savefig(os.path.join(ROOT_DIR, folder_path, f'top_k_{col.lower()}.png'), dpi=600)\n plt.show()\n\n if parent_model_type == 'deep_learning':\n # JOB: Plot training and validation metrics for LSTM\n try:\n plot_train_val(history, configs['model']['metrics'], store_png=True, folder_path=folder_path)\n except AttributeError as ae:\n print(f'{Fore.RED}{Style.BRIGHT}Plotting failed.{Style.RESET_ALL}')\n # print(ae)\n except UnboundLocalError as ule:\n print(\n f'{Fore.RED}{Back.YELLOW}{Style.BRIGHT}Plotting failed. History has not been created.{Style.RESET_ALL}')\n # print(ule)\n\n # JOB: Evaluate model on full test data\n test_score = None\n if parent_model_type == 'deep_learning':\n test_score = float(binary_accuracy(test_set_comparison['y_test'].values,\n test_set_comparison['norm_prediction'].values).numpy())\n\n print(f'\\nTest score on full test set: {float(np.round(test_score, 4))}')\n\n elif parent_model_type in ['tree_based', 'mixed']:\n test_score = accuracy_score(test_set_comparison['y_test'].values,\n test_set_comparison['norm_prediction'].values)\n print(f'\\nTest score on full test set: {np.round(test_score, 4)}')\n\n # pretty_print_table(\n # pd.DataFrame({'y_test': test_set_comparison['y_test'].values, 'norm_prediction': test_set_comparison[\n # 'norm_prediction'].values}).sample(100)) # TODO: Remove\n\n total_epochs = len(history.history['loss']) if history is not None else None\n\n # JOB: Fill dict for logging\n data_record = {\n 'ID': config.run_id,\n 'Experiment Run End': datetime.datetime.now().isoformat(),\n 'Parent Model Type': parent_model_type,\n 'Model Type': model_type,\n 'Index ID': index_id,\n 'Index Name': index_name,\n 'Study Period ID': config.study_period_id,\n 'Study Period Length': study_period_length,\n 'Period Range': period_range,\n 'Study Period Start Date': start_date.isoformat(),\n 'Study Period End Date': end_date.isoformat(),\n 'Test Set Size': y_test.shape[0],\n 'Days Test Set': test_set_n_days,\n 'Constituent Number': test_set_n_constituents,\n 'Average Cross Section Size': cross_section_size,\n 'Test Set Start Date': test_data_start_date.isoformat(),\n 'Test Set End Date': test_data_end_date.isoformat(),\n 'Total Accuracy': test_score,\n\n 'Top-k Accuracy Scores': top_k_metrics['Accuracy'].to_dict(),\n 'Top-k Mean Daily Return': top_k_metrics['Mean Daily Return'].to_dict(),\n 'Top-k Mean Daily Excess Return': top_k_metrics['Mean Daily Excess Return'].to_dict(),\n 'Top-k Annualized Excess Return': top_k_metrics['Annualized Excess Return'].to_dict(),\n 'Top-k Annualized Return': top_k_metrics['Annualized Return'].to_dict(),\n 'Top-k Annualized Sharpe': top_k_metrics['Annualized Sharpe'].to_dict(),\n 'Top-k Annualized Sortino': top_k_metrics['Annualized Sortino'].to_dict(),\n 'Mean Daily Return (Short)': top_k_metrics['Mean Daily Return (Short)'].to_dict(),\n 'Mean Daily Return (Long)': top_k_metrics['Mean Daily Return (Long)'].to_dict(),\n\n 'Top-k Mean Daily Return_atc': top_k_metrics['Mean Daily Return_atc'].to_dict(),\n 'Top-k Annualized Return_atc': top_k_metrics['Annualized Return_atc'].to_dict(),\n 'Top-k Mean Daily Excess Return_atc': top_k_metrics['Mean Daily Excess Return_atc'].to_dict(),\n 'Top-k Annualized Excess Return_atc': top_k_metrics['Annualized Excess Return_atc'].to_dict(),\n 'Top-k Annualized Sharpe_atc': top_k_metrics['Annualized Sharpe_atc'].to_dict(),\n 'Top-k Annualized Sortino_atc': top_k_metrics['Annualized Sortino_atc'].to_dict(),\n 'Top-k Mean Daily Return (Short)_atc': top_k_metrics['Mean Daily Return (Short)_atc'].to_dict(),\n 'Top-k Mean Daily Return (Long)_atc': top_k_metrics['Mean Daily Return (Long)_atc'].to_dict(),\n\n 'Model Configs': model.get_params(),\n 'Total Epochs': total_epochs,\n\n 'Return Series': top_10_excess_return_series['daily_excess_return'].to_dict(),\n 'Prediction Error': top_10_error_series\n }\n\n # JOB: Write to logs\n write_to_logs(data_record)\n\n print('Done testing on unseen data.')\n timer.stop()\n\n return top_10_error_series", "def main_stats_model(y_train: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.ndarray,\n model_name: str = '',\n model_parameters: dict = None,\n model_preprocessing: str = '',\n sequence_origin: str = '',\n primers_origin: str = '',\n taxonomy_level: Union[List[int], int] = '',\n selected_primer: Union[List[str], str] = '',\n test_size: float = 0.2,\n feature_importances: np.ndarray = None,\n k: int = 4,\n save_csv: bool = False,\n xgb_model=None,\n rf_model=None,\n save_model=False,\n save_tree: int = 0):\n model_path = folder_paths['model_results'] + model_name + '{}'.format(slash)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n folder_number = get_new_model_folder_number(model_name=model_name)\n analysis_path = model_path + '{:0>5d}_analysis_{}_{}{}'.format(folder_number, selected_primer, taxonomy_level, slash)\n os.makedirs(analysis_path)\n\n log_path = analysis_path + 'model_results.txt'\n logger = StatLogger(log_path=log_path)\n\n # Basic information on configuration\n test_size = get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin,\n primers_origin, taxonomy_level, selected_primer, test_size, logger)\n\n # Metrics of model results\n main_class_prop, accuracy = get_metrics_model(y_train, y_test, y_pred, logger, feature_importances, k, save_tree,\n xgb_model,\n analysis_path=analysis_path)\n\n if save_csv:\n add_optimal_model_params(folder_number, selected_primer, taxonomy_level, accuracy, model_parameters,\n model_path=model_path)\n\n if save_model:\n if xgb_model is not None:\n xgb_model.save_model(analysis_path+'0001.model')\n if rf_model is not None:\n filename = analysis_path+'0001.model'\n pickle.dump(rf_model, open(filename, 'wb'))\n\n logger.close_file()\n\n return test_size, main_class_prop, accuracy", "def main(input_filepath, model_type, model_path):\n with open(\"src/config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)\n\n with timer(\"Loading testing data\"):\n test_df = pd.read_pickle(input_filepath + \"/test_data.pkl\")\n\n row_ids = test_df[\"row_id\"]\n del test_df[\"row_id\"]\n\n if model_type == \"xgb\":\n predictions = predict_with_xgb(test_df, model_path)\n\n elif model_type == \"lgbm\":\n predictions = predict_with_lgbm(test_df, row_ids, model_path)\n\n elif model_type == \"ctb\":\n predictions = predict_with_ctb(test_df, row_ids, model_path)\n\n elif model_type == \"lgbm_meter\":\n predictions = predict_with_lgbm_meter(test_df, row_ids, model_path)\n\n elif model_type == \"lgbm_building\":\n predictions = predict_with_lgbm_building(test_df, row_ids, model_path)\n\n else:\n raise ValueError(model_type + \" is not a valid model type to predict from\")\n\n with timer(\"Creating submission file\"):\n create_submission_file(row_ids, predictions, cfg[\"use_leaks\"])", "def predict(test_data_path, sample_data_filepath, target_value_type, model_function, parameters, test_system_information, sample_system_information, title):\n\n test_data = load_data(test_data_path)\n sample_data = load_data(sample_data_filepath)\n\n x_test = test_data[\"x\"]\n x_sample = sample_data[\"x\"]\n\n n = len(parameters)\n if n == 1:\n parameter_a = parameters[0]\n predict_value = model_function(x_test, parameter_a)\n elif n == 2:\n parameter_a = parameters[0]\n parameter_b = parameters[1]\n predict_value = model_function(x_test, parameter_a, parameter_b)\n elif n == 3:\n parameter_a = parameters[0]\n parameter_b = parameters[1]\n parameter_c = parameters[2]\n predict_value = model_function(\n x_test, parameter_a, parameter_b, parameter_c)\n else:\n raise ValueError(\"Function do not support that model\")\n\n plt.plot(x_test, predict_value, '.--', label=\"predict data\")\n plt.plot(x_test, test_data[target_value_type], '.-',\n label=test_system_information + ' experiment data')\n plt.plot(x_sample, sample_data[target_value_type], '.-',\n label=sample_system_information + ' experiment data')\n plt.title(title)\n plt.xlabel('x_value')\n plt.ylabel(\"time/s\")\n plt.legend()\n plt.savefig(title + \".jpg\")\n plt.close()\n\n return predict_value", "def run(model_str, output_dir):\n RANDOM_SEED = 0\n\n SHOULD_LOG_IMPORT = True\n\n OUTPUT_DIR = str(output_dir)\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n MODEL_STR = str(model_str)\n RESAMPLING_NUMBER = 5\n ROC_CURVE_STEPS = 100\n ROC_PLOT_PATH = os.path.join(OUTPUT_DIR, 'roc.png')\n\n manager = ROCManager(RANDOM_SEED)\n manager.load_data(SHOULD_LOG_IMPORT)\n manager.plot_roc_distributions(MODEL_STR, RESAMPLING_NUMBER, ROC_CURVE_STEPS, ROC_PLOT_PATH)", "def runs_loader(path):\n files = sorted(glob.glob(f\"{path}/*_runs.csv\"))\n df_lis = list(range(len(files)))\n for i, f in enumerate(files):\n try:\n df_lis[i] = pd.read_csv(f, sep=\",\", header=0)\n print('Read runs.csv\\n', f, df_lis[i].shape,\n df_lis[i]['dataset__id'][0], df_lis[i]['pipeline__id'][0])\n except Exception as e:\n print(e)\n continue\n df = pd.concat(df_lis, axis=0, sort=False).reset_index()\n # with pd.option_context('display.max_rows', None,\n # 'display.max_columns', None):\n # msg = tabulate.tabulate(df, headers='keys', tablefmt='psql')\n # print(msg)\n return df", "def predict_and_evaluate(config, workdir, ckpt_path=None):\n logging.info('Starting testing at %s', workdir)\n tf.io.gfile.makedirs(workdir)\n\n rng = jax.random.PRNGKey(config.seed)\n # Build input pipeline.\n rng, data_rng = jax.random.split(rng)\n data_rng = jax.random.fold_in(data_rng, jax.process_index())\n test_ds = []\n for split in config.dataset.test_splits:\n ds = input_pipeline.create_val_dataset(\n config.dataset, split, config.dataset.test_per_device_batch_size,\n config.dataset.test_pad_last_batch)\n test_ds.append(ds)\n\n # Initialize model.\n inputs = train_utils.get_init_inputs(test_ds[0])\n rng, model_rng = jax.random.split(rng)\n predict_config = models.TransformerConfig(**config.model.to_dict())\n predict_config = predict_config.replace(decode=True)\n model = models.Model(predict_config)\n state = train_utils.create_train_state(\n model, config, model_rng, inputs=inputs)\n\n writer = metric_writers.create_default_writer(\n workdir, just_logging=jax.process_index() > 0)\n\n # Set up checkpointing of the model and the input pipeline.\n checkpoint_dir = os.path.join(workdir, 'checkpoints')\n ckpt = checkpoint.MultihostCheckpoint(checkpoint_dir, max_to_keep=3)\n\n logging.info('Testing and evaluating checkpoint %s', ckpt_path)\n try:\n state = ckpt.restore(state, ckpt_path)\n except FileNotFoundError:\n state = ckpt.restore_or_initialize(state)\n step = int(state.step)\n\n p_pred_step = jax.pmap(\n functools.partial(predict_step, config=predict_config),\n axis_name='batch',\n static_broadcasted_argnums=(3,))\n p_init_cache = jax.pmap(\n functools.partial(init_cache, config=predict_config), axis_name='batch')\n\n # Distribute testing.\n state = flax_utils.replicate(state)\n with metric_writers.ensure_flushes(writer):\n test_metrics = {}\n for ds, split in zip(test_ds, config.dataset.test_splits):\n ds_metrics = evaluate_sequence_accuracy(p_pred_step, p_init_cache, state,\n ds, config, split, workdir,\n config.num_test_steps)\n ds_metrics = {f'{k}_{split}': v for k, v in ds_metrics.items()}\n test_metrics.update(ds_metrics)\n writer.write_scalars(step, test_metrics)", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def evaluate_model(model, eval_path):\n with msg.loading(f\"Loading model '{model}'...\"):\n nlp = spacy.load(model)\n data, _ = format_data(srsly.read_jsonl(eval_path))\n sc = nlp.evaluate(data)\n result = [\n (\"Precision\", f\"{sc.ents_p:.3f}\"),\n (\"Recall\", f\"{sc.ents_r:.3f}\"),\n (\"F-Score\", f\"{sc.ents_f:.3f}\"),\n ]\n msg.table(result)", "def __call__(self, *args, **kwargs):\n if self.experiment is not None:\n mlflow.set_experiment(self.experiment)\n\n with self.start_run() as mlflow_run:\n # Bind mlflow run to self for use in run\n self.mlflow_run = mlflow_run\n\n output = self.run(*args, **kwargs)\n\n # Unbind mlflow run\n self.mlflow_run = None\n\n return output", "def eval(self, test_file_path: str) -> Tuple[List[Dict[str, float]], classification_report]:\n # TODO write code to extract features from test_file_path and \n # test the model\n pass", "def evaluate(func, dset_path, model_path):\n dset = load_dataset(dset_path, 'trva', False)\n\n \"\"\"\n average class-based zero-shot accuracy\n \"\"\"\n scores = func(dset['Xte_unseen'], dset['Ste_unseen_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Cte_unseen'][preds]\n acc_zsl = compute_acc(dset['Lte_unseen'], preds)\n\n \"\"\"\n average class-based generalized zsl accuracy on seen test classes\n \"\"\"\n scores = func(dset['Xte_seen'], dset['Sall_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Call'][preds]\n acc_gzsl_seen = compute_acc(dset['Lte_seen'], preds)\n\n \"\"\"\n average class-based generalized zsl accuracy on unseen test classes\n \"\"\"\n scores = func(dset['Xte_unseen'], dset['Sall_gt'], model_path)\n preds = np.argmax(scores, 1)\n preds = dset['Call'][preds]\n acc_gzsl_unseen = compute_acc(dset['Lte_unseen'], preds)\n\n print 'ZSL accuracy: ', acc_zsl\n print 'Generalized ZSL accuracy on seen classes: ', acc_gzsl_seen\n print 'Generalized ZSL accuracy on unseen classes: ', acc_gzsl_unseen", "def analysis_pipeline(main_folder, verbose=True):\n model_folders = get_model_folders(main_folder)\n csv = 'model_data.csv'\n csv_path = os.path.join(main_folder,csv)\n if os.path.exists(csv_path):\n main_df = pd.read_csv(csv_path, sep=\"!\")\n else:\n main_df = dict()\n for folder in model_folders:\n save_folder = os.path.join(main_folder, folder)\n if \"save_folder\" in main_df and save_folder in set(main_df['save_folder']):\n if verbose:\n print(\"Skipping\",folder,\" due to previous record\")\n continue\n if verbose:\n print(\"\\n\\nAnalyzing\", folder)\n \n df = analyze_model(save_folder, verbose=verbose)\n if isinstance(main_df, dict):\n main_df = df\n else:\n main_df = main_df.append(df, sort=True)\n return main_df", "def run(\n trained_model: Ridge,\n mlflow: mlflow,\n model_name: str = \"diabetes\",\n app_logger: AppLogger = get_disabled_logger(),\n parent_tracer: Tracer = None,\n) -> ModelVersion:\n logger = logging.getLogger(__name__)\n try:\n component_name = \"Diabetes_Publish_Model\"\n\n # mlflow tracking\n mlflow_run = mlflow.active_run()\n mlflow_run_id = mlflow_run.info.run_id\n mlflow_experiment_id = mlflow_run.info.experiment_id\n\n logger = app_logger.get_logger(\n component_name=component_name,\n custom_dimensions={\n \"mlflow_run_id\": mlflow_run_id,\n \"mlflow_experiment_id\": mlflow_experiment_id,\n },\n )\n tracer = app_logger.get_tracer(\n component_name=component_name, parent_tracer=parent_tracer\n )\n\n logger.info(\"Running MLOps publish model\")\n\n temp_model_dir = tempfile.mkdtemp()\n model_path = os.path.join(temp_model_dir, model_name)\n with tracer.span(\"save_model\"):\n mlflow.sklearn.save_model(trained_model, model_path)\n mlflow.log_artifact(model_path)\n model_uri = \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=model_name\n )\n\n logger.info(\"Publishing trained model into mlflow model registry\")\n with tracer.span(\"register_model\"):\n model_details = mlflow.register_model(model_uri=model_uri, name=model_name)\n model_version = model_details.version\n\n mlflow.log_param(\"model_version\", model_version)\n mlflow.log_param(\"model_name\", model_name)\n\n logger.info(f\"published model name: {model_name}, version: {model_version}\")\n logger.info(\"Completed MLOps publish model\")\n\n return model_details\n except Exception as exp:\n logger.error(\"an exception occurred in publish model\")\n raise Exception(\"an exception occurred in publish model\") from exp", "def evaluate(path_to_config, path_to_model):\n\n config, paths, session_id = setup(path_to_config, 1)\n assert isinstance(config, ExperimentConfig)\n logger = logging.getLogger(\"%s.main\" % config.name)\n\n logger.info(\"Evaluating network on test data\")\n\n network = Network(config, paths, session_id, 0)\n network.build()\n network.evaluate(DATA_TYPE_TEST, model_path=path_to_model)", "def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def transformation():\n data = None\n\n # Convert from CSV to pandas\n if flask.request.content_type == 'text/csv':\n data = flask.request.data.decode('utf-8')\n s = StringIO.StringIO(data)\n # Makoto.Sano@Mack-the-Psych.com\n # data = pd.read_csv(s, header=None)\n data = pd.read_csv(s)\n else:\n return flask.Response(response='This predictor only supports CSV data', status=415, mimetype='text/plain')\n\n # Makoto.Sano@Mack-the-Psych.com\n # print('Invoked with {} records'.format(data.shape[0]))\n print('Invoked with {} records'.format(len(data)))\n \n # Do the prediction\n predictions = ScoringService.predict(data)\n\n # Convert from numpy back to CSV\n out = StringIO.StringIO()\n\n # Makoto.Sano@Mack-the-Psych.com\n # pd.DataFrame({'results':predictions}).to_csv(out, header=False, index=False)\n predictions.to_csv(out, index=False)\n result = out.getvalue()\n\n return flask.Response(response=result, status=200, mimetype='text/csv')", "def evaluate(model, datagen, X_test, Y_test, batch_size, save_folder_path=None):\n\n print(\"[INFO] Evaluating model...\")\n\n scores = model.evaluate_generator(\n datagen.flow(X_test, Y_test, batch_size=batch_size),\n verbose=1)\n \n print(\"[INFO] Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n \n if save_folder_path is not None:\n # Write results to path\n assert os.path.isdir(save_folder_path) == True, \"Unable to save evaluation results, save_folder_path is not a folder\"\n eval_results_path = save_folder_path + \"/eval_results.txt\"\n eval_handle = open(eval_results_path, 'w')\n eval_handle.write(\"Model name: {}\\n\\n\".format(MODEL_NAME))\n eval_handle.write(\"Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n eval_handle.close()", "def test_sktime_pyfunc_raises_invalid_df_input(auto_arima_model, model_path):\n flavor.save_model(sktime_model=auto_arima_model, path=model_path)\n loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path)\n\n with pytest.raises(MlflowException, match=\"The provided prediction pd.DataFrame \"):\n loaded_pyfunc.predict(pd.DataFrame([{\"predict_method\": \"predict\"}, {\"fh\": FH}]))\n\n with pytest.raises(MlflowException, match=\"The provided prediction configuration \"):\n loaded_pyfunc.predict(pd.DataFrame([{\"invalid\": True}]))\n\n with pytest.raises(MlflowException, match=\"Invalid `predict_method` value\"):\n loaded_pyfunc.predict(pd.DataFrame([{\"predict_method\": \"predict_proba\"}]))", "def run(output_dir, data_dict, transform_func):\n logger.info('BEGIN TransformStage')\n\n # Reorganize data - Dict (key=filename, value=df)\n dfs = {os.path.split(k)[1].split('.')[0]: df\n for k, (_, df) in data_dict.items()\n }\n\n # Make dataframes\n df_out = transform_func(dfs)\n\n write_output(output_dir, df_out)\n\n logger.info('END TransformStage')\n\n return df_out", "def evaluate(model, tokenizer, dataset, lines, output_test_file, batch_size=32):\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=batch_size)\n\n print(\"*** Evaluating ***\")\n eval_loss = 0.0\n num_steps = 0\n preds = None\n out_label_ids = None\n for i, batch in enumerate(dataloader):\n if i % 200 == 199:\n print(\"=\", end=\"\")\n if i % 5000 == 4999:\n print(\"[Step \" + str(i+1) + \" / \" + str(len(dataloader)) + \"] \" )\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n labels = batch[3]\n outputs = model(input_ids=batch[0], attention_mask=batch[1], labels=labels)\n tmp_eval_loss, logits = outputs[:2]\n eval_loss += tmp_eval_loss.mean().item()\n \n num_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n \n eval_loss = eval_loss / num_steps\n \n preds_label = np.argmax(preds, axis=1)\n \n accuracy = (preds_label == out_label_ids).mean()\n output_dir = os.path.dirname(output_test_file)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n with open(output_test_file, \"w\") as writer:\n all_logits = preds.tolist()\n for i, logit in enumerate(all_logits):\n line = '<CODESPLIT>'.join(\n [item.encode('ascii', 'ignore').decode('ascii') for item in lines[i]])\n\n writer.write(line + '<CODESPLIT>' + '<CODESPLIT>'.join([str(l) for l in logit]) + '\\n')\n print(\"Accuracy =\", str(accuracy))\n\n return accuracy", "def model_fn(model_dir):\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the store model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved transformers.\n transformer_path = os.path.join(model_dir, 'transformers.pkl')\n with open(transformer_path, 'rb') as f:\n model.transformer = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model", "def predict_with_xgb(test_df, model_filepath):\n test_dmatrix = xgb.DMatrix(test_df)\n del test_df\n\n with timer(\"Loading model \" + model_filepath):\n xgb_model = xgb.Booster()\n xgb_model.load_model(model_filepath)\n\n with timer(\"Predicting values\"):\n predictions = xgb_model.predict(test_dmatrix)\n # Invert log and set possible neg. values to 0\n predictions = np.expm1(predictions)\n predictions[predictions < 0] = 0\n return predictions", "def model_evaluate(query):\n engine = create_engine('sqlite:///data/DisasterResponse.db')\n df = pd.read_sql_table('Messages', engine)\n\n model = joblib.load(\"models/model.pkl\")\n \n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n return classification_labels, classification_results", "def get_metric_fn(self, sklearn_fn, kwargs=None):\n kwargs = kwargs if kwargs else dict()\n if self.data_content is not None and self.task_id is not None:\n predictions_arff = self._generate_arff_dict()\n elif \"predictions\" in self.output_files:\n predictions_file_url = openml._api_calls._file_id_to_url(\n self.output_files[\"predictions\"],\n \"predictions.arff\",\n )\n response = openml._api_calls._download_text_file(predictions_file_url)\n predictions_arff = arff.loads(response)\n # TODO: make this a stream reader\n else:\n raise ValueError(\n \"Run should have been locally executed or \" \"contain outputfile reference.\"\n )\n\n # Need to know more about the task to compute scores correctly\n task = get_task(self.task_id)\n\n attribute_names = [att[0] for att in predictions_arff[\"attributes\"]]\n if (\n task.task_type_id in [TaskType.SUPERVISED_CLASSIFICATION, TaskType.LEARNING_CURVE]\n and \"correct\" not in attribute_names\n ):\n raise ValueError('Attribute \"correct\" should be set for ' \"classification task runs\")\n if task.task_type_id == TaskType.SUPERVISED_REGRESSION and \"truth\" not in attribute_names:\n raise ValueError('Attribute \"truth\" should be set for ' \"regression task runs\")\n if task.task_type_id != TaskType.CLUSTERING and \"prediction\" not in attribute_names:\n raise ValueError('Attribute \"predict\" should be set for ' \"supervised task runs\")\n\n def _attribute_list_to_dict(attribute_list):\n # convenience function: Creates a mapping to map from the name of\n # attributes present in the arff prediction file to their index.\n # This is necessary because the number of classes can be different\n # for different tasks.\n res = OrderedDict()\n for idx in range(len(attribute_list)):\n res[attribute_list[idx][0]] = idx\n return res\n\n attribute_dict = _attribute_list_to_dict(predictions_arff[\"attributes\"])\n\n repeat_idx = attribute_dict[\"repeat\"]\n fold_idx = attribute_dict[\"fold\"]\n predicted_idx = attribute_dict[\"prediction\"] # Assume supervised task\n\n if (\n task.task_type_id == TaskType.SUPERVISED_CLASSIFICATION\n or task.task_type_id == TaskType.LEARNING_CURVE\n ):\n correct_idx = attribute_dict[\"correct\"]\n elif task.task_type_id == TaskType.SUPERVISED_REGRESSION:\n correct_idx = attribute_dict[\"truth\"]\n has_samples = False\n if \"sample\" in attribute_dict:\n sample_idx = attribute_dict[\"sample\"]\n has_samples = True\n\n if (\n predictions_arff[\"attributes\"][predicted_idx][1]\n != predictions_arff[\"attributes\"][correct_idx][1]\n ):\n pred = predictions_arff[\"attributes\"][predicted_idx][1]\n corr = predictions_arff[\"attributes\"][correct_idx][1]\n raise ValueError(\n \"Predicted and Correct do not have equal values:\"\n \" %s Vs. %s\" % (str(pred), str(corr))\n )\n\n # TODO: these could be cached\n values_predict = {}\n values_correct = {}\n for line_idx, line in enumerate(predictions_arff[\"data\"]):\n rep = line[repeat_idx]\n fold = line[fold_idx]\n if has_samples:\n samp = line[sample_idx]\n else:\n samp = 0 # No learning curve sample, always 0\n\n if task.task_type_id in [\n TaskType.SUPERVISED_CLASSIFICATION,\n TaskType.LEARNING_CURVE,\n ]:\n prediction = predictions_arff[\"attributes\"][predicted_idx][1].index(\n line[predicted_idx]\n )\n correct = predictions_arff[\"attributes\"][predicted_idx][1].index(line[correct_idx])\n elif task.task_type_id == TaskType.SUPERVISED_REGRESSION:\n prediction = line[predicted_idx]\n correct = line[correct_idx]\n if rep not in values_predict:\n values_predict[rep] = OrderedDict()\n values_correct[rep] = OrderedDict()\n if fold not in values_predict[rep]:\n values_predict[rep][fold] = OrderedDict()\n values_correct[rep][fold] = OrderedDict()\n if samp not in values_predict[rep][fold]:\n values_predict[rep][fold][samp] = []\n values_correct[rep][fold][samp] = []\n\n values_predict[rep][fold][samp].append(prediction)\n values_correct[rep][fold][samp].append(correct)\n\n scores = []\n for rep in values_predict.keys():\n for fold in values_predict[rep].keys():\n last_sample = len(values_predict[rep][fold]) - 1\n y_pred = values_predict[rep][fold][last_sample]\n y_true = values_correct[rep][fold][last_sample]\n scores.append(sklearn_fn(y_true, y_pred, **kwargs))\n return np.array(scores)", "def save_df(data_frame, file_path):\r\n data_frame.to_csv(file_path)\r\n return None", "def to_filesystem(\n self,\n directory: str,\n store_model: bool = True,\n ) -> None:\n if self.data_content is None or self.model is None:\n raise ValueError(\"Run should have been executed (and contain \" \"model / predictions)\")\n\n os.makedirs(directory, exist_ok=True)\n if not os.listdir(directory) == []:\n raise ValueError(\n \"Output directory {} should be empty\".format(os.path.abspath(directory))\n )\n\n run_xml = self._to_xml()\n predictions_arff = arff.dumps(self._generate_arff_dict())\n\n # It seems like typing does not allow to define the same variable multiple times\n with open(os.path.join(directory, \"description.xml\"), \"w\") as fh: # type: TextIO\n fh.write(run_xml)\n with open(os.path.join(directory, \"predictions.arff\"), \"w\") as fh:\n fh.write(predictions_arff)\n if store_model:\n with open(os.path.join(directory, \"model.pkl\"), \"wb\") as fh_b: # type: IO[bytes]\n pickle.dump(self.model, fh_b)\n\n if self.flow_id is None:\n self.flow.to_filesystem(directory)\n\n if self.trace is not None:\n self.trace._to_filesystem(directory)", "def eval_model(config, period, test_data):\n if config.network == 'MLPwithGAN':\n model = MLPwithGAN(config)\n elif config.network == 'MLP':\n model = MLP(config)\n elif config.network == 'LSTM':\n model = VanillaLSTM(config)\n elif config.network == 'CNN':\n model = CNNfeature(config)\n else:\n raise Exception('Unknown model type:{}'.format(config.network))\n\n if config.ensemble:\n m = model\n model = []\n\n for i in glob(gen_path(config.path, str(period)) + '/m*'):\n m.load_state_dict(\n torch.load(gen_path(i, filename=config.network + '.pkl')))\n m.to(config.device)\n m.eval()\n model.append(m)\n else:\n model.load_state_dict(\n torch.load(gen_path(config.path, str(period), 'model', filename=config.network + '.pkl')))\n model.to(config.device)\n model.eval()\n dataloader_test = test_data[0]\n test_date = test_data[1]\n test_symbol = test_data[2]\n sc_y = joblib.load(gen_path(config.path, str(period), 'scaler', filename='training_sc_y.pkl'))\n predict_y_test, real_y_test, valid_index_test = make_prediction(dataloader_test, sc_y, model, config)\n\n stock_score = pd.DataFrame()\n stock_score[\"symbol\"] = test_symbol[valid_index_test]\n stock_score[\"score\"] = predict_y_test\n stock_score['truth'] = real_y_test\n stock_score[\"date\"] = test_date[valid_index_test]\n stock_score = stock_score.sort_values(by=[\"date\"])\n stock_score.to_csv(gen_path(config.path, 'stock_score', filename=str(period) + '.csv'), index=False)", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)" ]
[ "0.62471294", "0.591763", "0.589063", "0.5767314", "0.55636126", "0.5445633", "0.54294544", "0.53925204", "0.53252816", "0.5324131", "0.5243129", "0.5235573", "0.5235118", "0.5224704", "0.5215015", "0.5210782", "0.5205274", "0.5199694", "0.5183146", "0.51796335", "0.517105", "0.5169469", "0.51507986", "0.5137271", "0.5133975", "0.51291794", "0.51275486", "0.51270366", "0.51130104", "0.51122004", "0.5101997", "0.50948703", "0.50784975", "0.5073292", "0.50299144", "0.50200206", "0.49801815", "0.49783888", "0.49766415", "0.4973279", "0.4954292", "0.49530128", "0.49479917", "0.49456415", "0.49395183", "0.49275404", "0.49222028", "0.492105", "0.4916614", "0.49100623", "0.490235", "0.4898278", "0.4894289", "0.48863816", "0.48853195", "0.4880423", "0.4879965", "0.4871862", "0.48666176", "0.48622823", "0.4836918", "0.48343903", "0.4832592", "0.48315987", "0.48303545", "0.4824213", "0.48224446", "0.48164532", "0.48082927", "0.4807947", "0.47997624", "0.47923556", "0.47879404", "0.47876102", "0.47811896", "0.47782916", "0.47744435", "0.47669002", "0.47653645", "0.4759983", "0.4759001", "0.47569355", "0.47568214", "0.4749901", "0.47482497", "0.47464466", "0.47397923", "0.4733923", "0.47338808", "0.47325718", "0.47321892", "0.4724039", "0.47168818", "0.4710184", "0.47078097", "0.4704975", "0.47049224", "0.47012043", "0.46981603", "0.46959758" ]
0.68641376
0
Returns an array for the inheritance at a nuclear locus with n alleles.
def nuclear_inheritance_at_single_locus(n): ary = np.zeros((n,n,n)) for female in range(n): for male in range(n): for offspring in range(n): if female==male==offspring: ary[female,male,offspring]=1. if female!=male: if (offspring==female) or (offspring==male): ary[female,male,offspring]=0.5 return ary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nuclear_inheritance_at_y_locus(n):\n ary = np.zeros((n,n,n)) # female, male, offspring\n for male in range(1,n):\n ary[0,male,0] = ary[0,male,male] = 1 # one daughter, one son\n return ary", "def basisVar(bases):\n \n var = [] # The ordered list of active coordinates \n k = 0\n for b in bases:\n if np.isscalar(b):\n # A singleton, inactive coordinate\n k += 1\n else:\n # An active basis\n for i in range(b.nd):\n var.append(k)\n k += 1 \n return var", "def coordSubcoord(bases):\n subcoord_of_coord = []\n for b in bases:\n if np.isscalar(b):\n subcoord_of_coord.append(0) # Singleton scalar\n else:\n for i in range(b.nd):\n subcoord_of_coord.append(i)\n return subcoord_of_coord", "def get_base_coords(self):\n num_bases = len(self.tour)\n base_coords = np.zeros((num_bases,3), dtype=float)\n for i,base in enumerate(self.tour):\n helix_num = base.h\n helix_pos = base.p\n helix = self.helix_list[helix_num]\n base_coords[i] = base.coordinates\n return base_coords", "def get_nuclei(self):\n sim_layer = self._wrap_ns(self.setup_config[\"sim_layer\"])\n return cmds.listRelatives(sim_layer, ad=True, type=\"nucleus\") or []", "def gt_bases(self):\n result = []\n for a in self.gt_alleles:\n if a is None:\n result.append(None)\n elif a == 0:\n result.append(self.site.REF)\n else:\n result.append(self.site.ALT[a - 1].value)\n return tuple(result)", "def nlegomena(self, n: int) -> list:\n nlegomena_ = [typ for typ, freq in self.items() if freq == n]\n return nlegomena_", "def locations_n(self):\n return self.locations[1]", "def __addNuclideBases():\n with open(os.path.join(context.RES, \"nuclides.dat\")) as f:\n for line in f:\n # Skip header lines\n if line.startswith(\"#\") or line.startswith(\"Z\"):\n continue\n lineData = line.split()\n _z = int(lineData[0])\n _n = int(lineData[1])\n a = int(lineData[2])\n state = int(lineData[3])\n sym = lineData[4].upper()\n mass = float(lineData[5])\n abun = float(lineData[6])\n halflife = lineData[7]\n if halflife == \"inf\":\n halflife = numpy.inf\n else:\n halflife = float(halflife)\n nuSF = float(lineData[8])\n\n element = elements.bySymbol[sym]\n nb = NuclideBase(element, a, mass, abun, state, halflife)\n nb.nuSF = nuSF", "def get_bases():\n\treturn ((MV.ONE,),) + MV.blades[1:]\n\t# return ((MV.ONE,),) + MV.bases[1:]", "def getBaseDistinctions(self):\n\n return []", "def create_array( n ):", "def __init__(self, n):\n self.n = n\n self.parents = [-1] * n", "def get_n(self, n):\n \n return [self.get_values() for _ in range(n)]", "def get_bases():\n\tbss = []\n\tfor es in MV.index:\n\t\tbs = []\n\t\tif es == ():\n\t\t\tbs.append(_1)\n\t\telse:\n\t\t\tfor js in es:\n\t\t\t\tbmv = reduce(operator.mul, map(lambda j: e[j], js))\n\t\t\t\tbs.append(bmv)\n\t\t\t\t\n\t\tbss.append(bs)\n\t\n\treturn bss", "def test_taxonomy(n=5):\n ecoli_file = join(this_dir, \"e_coli_core.xml.gz\")\n ids = [\"Escherichia_coli_{}\".format(i) for i in range(1, n + 1)]\n taxa = pd.DataFrame({\"id\": ids})\n taxa[\"genus\"] = \"Escherichia\"\n taxa[\"species\"] = \"Eschericia coli\"\n taxa[\"reactions\"] = 95\n taxa[\"metabolites\"] = 72\n taxa[\"file\"] = ecoli_file\n return taxa", "def vnN(self):\n return np.array(\n [x for x in [self.nNx, self.nNy, self.nNz] if x is not None],\n dtype=int\n )", "def getHierarchies():", "def getHierarchies():", "def ancestry_iris(self):\n return list(self._class_types)", "def get_atril_array(self):\n return self.atril", "def tris(self):\n return self.nlegomena(3)", "def from_antenna_config(filename, z, nu=None):\n\tantll = np.loadtxt(filename)\n\tRe = 6.371e6 # in m\n\tpp = np.pi/180\n\tif not nu: nu = c2t.z_to_nu(z) # MHz\n\tantxyz = np.zeros((antll.shape[0],3))\t\t # in m\n\tantxyz[:,0] = Re*np.cos(antll[:,1]*pp)*np.cos(antll[:,0]*pp)\n\tantxyz[:,1] = Re*np.cos(antll[:,1]*pp)*np.sin(antll[:,0]*pp)\n\tantxyz[:,2] = Re*np.sin(antll[:,1]*pp)\t\n\tdel pp, antll\n\tN_ant = antxyz.shape[0]\n\tNbase = np.zeros((N_ant*(N_ant-1)/2,3))\n\tpair_comb = itertools.combinations(xrange(N_ant), 2)\n\tpair_comb = list(pair_comb)\t\n\tlam = c_light/(nu*1e6)/1e2 \t\t\t # in m\n\tfor i in xrange(Nbase.shape[0]):\n\t\tii,jj = pair_comb[i]\n\t\tux = (antxyz[ii,0]-antxyz[jj,0])/lam\n\t\tuy = (antxyz[ii,1]-antxyz[jj,1])/lam\n\t\tuz = (antxyz[ii,2]-antxyz[jj,2])/lam\n\t\tNbase[i,:] = ux,uy,uz \n\treturn Nbase, N_ant", "def nucleotides(self):\n return self._nucleotides # 将字符串变作一个列表", "def test_ontology_term_inheritance(ontology):\n\tassert len(ontology.inherited(\"TO:0000001\")) == 1\n\tassert len(ontology.inherited(\"TO:0000002\")) == 2\n\tassert len(ontology.inherited(\"TO:0000003\")) == 2\n\tassert len(ontology.inherited(\"TO:0000004\")) == 4\n\tassert len(ontology.inherited(\"TO:0000005\")) == 5\n\tassert len(ontology.inherited(\"TO:0000006\")) == 5\n\tassert len(ontology.inherited(\"TO:0000007\")) == 2\n\tassert len(ontology.inherited(\"TO:0000008\")) == 3\n\tassert len(ontology.inherited(\"TO:0000009\")) == 4\n\n\tassert len(ontology.inherited([\"TO:0000002\",\"TO:0000003\"])) == 3\n\tassert len(ontology.inherited([\"TO:0000009\",\"TO:0000005\"])) == 8\n\tassert len(ontology.inherited([\"TO:0000004\",\"TO:0000003\"])) == 4\n\tassert len(ontology.inherited([\"TO:0000002\"])) == 2\n\tassert len(ontology.inherited([])) == 0", "def n(self):\n return len(self.genotypes)", "def __init__(self, n):\n self._count = n\n self._parent = list(range(n))\n self._rank = [0]*n\n\n \"\"\"\n Added a 'binary' list to keep track of sites that have been\n unioned, as well as an integer that counts the number of\n isolated sites. Also a list to keep track of the roots'\n tree sizes, as well as an integer that holds the maximum\n tree size (maximum component in the graph)\n \"\"\"\n self._nodes = [1]*n\n self._iso = n\n self._size = [1]*n\n self._max = 0", "def get_codon_arr(chromosome: Chromosome) -> np.ndarray:\n\n seq_len = len(chromosome.sequence)\n arr = np.zeros((seq_len - 2,), dtype=np.int)\n\n for f in chromosome.features:\n\n if f.type != 'CDS':\n continue\n if f.strand == '-':\n continue\n\n protein_len = (f.end - f.start) // 3\n for aa in range(protein_len):\n pos = f.start + (aa * 3) - 1 # -1 to 0-based\n arr[pos] = 1\n\n return arr", "def get_ROIs(self, base):\n locs3d = self.locs3d\n #print loc3d\n base_locs = locs3d[base]\n ROI_dic = dict((i, [Id]) for i,Id in enumerate(base))\n for i, loc in enumerate(locs3d):\n if i not in base:\n dist = np.sqrt(np.sum((base_locs - loc)**2, 1))\n min_i = np.argmin(dist)\n ROI_dic[min_i].append(i)\n out = ROI_dic.values()\n return out", "def get_n(self):\n return np.append([self.n_init],[s.n for s in self.surfaces])", "def get_occupants(self):\n return [x.get_occupant_type() for x in self.huts]", "def ncoordinates(self):\n return _coordsys.coordsys_ncoordinates(self)", "def parentNCBInames(self):\n return self.parentClade.ncbiNames", "def pentakis(self):\n return self.nlegomena(5)", "def make_nodes(n):\n return [\n protein(namespace='NS', name=str(i))\n for i in range(1, n)\n ]", "def n(l):\n return np.array(l,dtype=object)", "def _makebaselines(self):\n nholes = self.ctrs.shape[0]\n blist = []\n for i in range(nholes):\n for j in range(nholes):\n if i < j:\n blist.append((i, j))\n barray = np.array(blist).astype(np.int)\n #blname = []\n bllist = []\n for basepair in blist:\n # blname.append(\"{0:d}_{1:d}\".format(basepair[0],basepair[1]))\n baseline = self.ctrs[basepair[0]] - self.ctrs[basepair[1]]\n bllist.append(baseline)\n return barray, np.array(bllist)", "def vnEx(self):\n return np.array(\n [x for x in [self.nCx, self.nNy, self.nNz] if x is not None],\n dtype=int\n )", "def init_naive_array(n):\n result = list()\n for i in range(1, n+1):\n result.append(i)\n return result", "def getBatch(self, n, rng, dataset):\n pmax = self._root.priority\n step = pmax / n\n indices = np.zeros(n, dtype='int32')\n for i in range(n):\n p = rng.uniform(i*step, (i+1)*step)\n node = self.find(p)\n index = self._checkTerminal(node.position, dataset)\n if (index >= 0):\n indices[i] = index\n else:\n return np.zeros(0)\n\n return indices", "def get_classes(self):\n return list(range(self.num_clss))", "def n_classes(self):\n raise NotImplementedError", "def n_classes(self):\n raise NotImplementedError", "def getOtherSaCurrentAnts( ):\n if subarrayNo == 1:\n otherSa = Subarray.getSubarrayRef( 2 )\n otherSaAntAssignments = otherSa.getAntennaAssignments( )\n elif subarrayNo == 2:\n otherSa = Subarray.getSubarrayRef( 1 )\n otherSaAntAssignments = otherSa.getAntennaAssignments( )\n else:\n otherSa1Ants = Subarray.getSubarrayRef(1).getAntennaAssignments()\n otherSa2Ants = Subarray.getSubarrayRef(2).getAntennaAssignments()\n otherSaAntAssignments = otherSa1Ants + otherSa2Ants\n\n otherSaAnts = [ i.carmaAntennaNo for i in otherSaAntAssignments ]\n return otherSaAnts", "def childNCBInames(self):\n return self.childClade.ncbiNames", "def initialize_dna(self):\n return np.random.rand(1, self.n_genes) * 2 - 1", "def analytical_energies(n):\n\n energies = []\n for nx in range(n):\n for ny in range(n):\n energies.append(energy(nx,ny))\n energies = np.sort(energies)\n return energies", "def __addNaturalNuclideBases():\n for element in elements.byZ.values():\n if element.symbol not in byName:\n if element.isNaturallyOccurring():\n NaturalNuclideBase(element.symbol, element)", "def __init__(self):\n self.rho=[]\n self.te=[]\n self.ti=[]\n self.ne=[]\n self.ni=[]\n self.ni1=[]\n self.ni2=[]\n self.ni3=[]\n self.vtor=[]\n self.zeff=[]\n\n self.nion=1\n self.Z=[]\n self.A=[]\n self.coll_mode=[]", "def base_composition(reads, base):\n assert base.upper() in set(\"ACGT\")\n\n \"\"\" Reports nucelotide frequencies at each position in the\n sam sequences\n \"\"\"\n # DNA_Alphabet=[\"A\",\"C\",\"T\",\"G\",\"N\"]\n all_nucs = []\n for read in reads:\n nucs = {} # Dictionary to store nucleotide data.\n seq = read[9]\n for i in range(0, len(seq)):\n nucs[str(i + 1)] = seq[i]\n all_nucs.append(nucs)\n all_items = []\n counts = []\n for dicts in all_nucs:\n for item in dicts.items():\n all_items.append(item)\n all_items.sort(key=operator.itemgetter(0))\n groups = [map(operator.itemgetter(1), list(group))\n for key, group in itertools.groupby(\n all_items, operator.itemgetter(0))]\n for group in groups:\n counts.append(group.count(base))\n\n pos = range(1, len(seq) + 1)\n\n # Create plot.\n plt.figure(1, figsize=(8, 8))\n plt.axes([0.1, 0.1, 0.8, 0.8])\n plt.bar(pos, counts, facecolor='g')\n plt.xlabel(\"Position\")\n plt.ylabel(\"number of mapped reads\")\n plt.title(base)\n plt.show()", "def Nlevels(self):\n return self._nlevels", "def getInnocents():\n\n cams = CAMS\n for cam in cams:\n image = cam.getImage()\n bloons, sizes = Vision_Processing.GetBalloon.getFriends(image)\n points = []\n for bloon in bloons:\n points.append(np.array([bloon[0], bloon[1]]))\n for i in range(len(bloons)):\n cam.addTarget(points[i])\n cam.addSize(sizes[i])\n targets = triangulate()\n # targets = cartesianToSpheric(targets, place, orientation)\n return targets", "def getHierarchies(unique_name=None):", "def readBulk(self, baseAddr, depth): \r\n data = []\r\n if self.depth - baseAddr < depth:\r\n return \"WARNING: Change the depth parameter,from %d there are only %d (<%d) possible memory addresses\"%\\\r\n (baseAddr, self.depth - baseAddr , depth)\r\n\r\n else:\r\n for i in range(baseAddr, baseAddr + depth ):\r\n data.append(self.readRam(i))\r\n\r\n return data", "def nD(self):\n return self.locations.shape[0]", "def get_spawn_locs(n: int, spawnbox: Optional[str] = None) -> np.ndarray:\r\n if spawnbox is None:\r\n spawnbox = cng.SPAWNBOX_OBJ\r\n\r\n box = bpy.data.objects[spawnbox]\r\n loc = np.array(box.location) # Center location\r\n scale = np.array(box.scale)\r\n\r\n points = np.random.uniform(low=-scale, high=scale, size=(n, 3)) + loc\r\n return points", "def agent_locs_idx(self):\n return tuple(self.agent_locs.T)", "def tetrakis(self):\n return self.nlegomena(4)", "def get_uniprot_clusters():\n\tjson_str = \"\"\n\tfor line in open(gpcr_tree_path, 'r'):\n\t\tjson_str += line \n\tgpcr_tree = json.loads(json_str)\n\n\tuniprot_clusters = []\n\t\n\tfor a in gpcr_tree[\"children\"]:\n\t\tfor b in a[\"children\"]:\n\t\t\tcluster = []\n\t\t\tfor uniprot in b[\"children\"]:\n\t\t\t\tuniprot_id = (str(uniprot['name']) + \"_human\").upper()\n\t\t\t\tcluster.append(uniprot_id)\n\t\t\tuniprot_clusters.append(cluster)\n\n\treturn uniprot_clusters", "def subarrayAntSetup( unownedAntennas = False ):\n\n # Form lists of MP canonical names for antenna and subarray numbers.\n canonAntNumberMps = []\n canonSubNumberMps = []\n \n numAnts = device.CarmaAnt().getNumAnts()\n for antNo in xrange(1, 1 + numAnts):\n canonAntNumberMps.append( \"Control.Antenna%d.carmaAntennaNumber\"%antNo )\n canonSubNumberMps.append( \"Control.Antenna%d.subarrayNumber\"%antNo )\n\n # Query MP Values in a single pass. Note that we don't throw on invalid\n # or missing MPs but rather check for None below.\n [antennaNumbers,subarrayNumbers] = queryMpValues( [canonAntNumberMps, \n canonSubNumberMps ],\n True ) # No throw\n \n # Verify that the returned lists are in step - get out if they aren't.\n if len( antennaNumbers ) != len( subarrayNumbers ):\n raise Exception, \"Ant # list size does not match subarray # list size!\"\n\n # Form up our answer. If somebody knows an easy way to do this with\n # iterators, let me know.\n answer = []\n thisSubarrayNo = Subarray.getSubarrayNo()\n for index in xrange( 0, len( antennaNumbers ) ):\n subNo = subarrayNumbers[index]\n antNo = antennaNumbers[index]\n \n if subNo == None or antNo == None:\n continue\n\n if ( unownedAntennas == False ) :\n if subNo == thisSubarrayNo:\n answer.append( antNo )\n else :\n if subNo != thisSubarrayNo:\n answer.append( antNo )\n\n return answer", "def all_baselines():\n for i in range(len(active_ants)):\n ID1 = active_ants[i]\n for j in range(i + 1, len(active_ants[i + 1:])):\n ID2 = active_ants[j]\n print(\"Baseline between antennae \" + str(ID1) + \\\n \" and \" + str(ID2) + \" = \" + str(ant.baseline(ID1, ID2)))", "def get_flat_bases():\n\trs = []\n\tfor bs in get_bases():\n\t\tfor b in bs:\n\t\t\trs.append(b)\n\treturn rs", "def n_classes(self):\n raise NotImplementedError()", "def super_categories(self):\n R = self.base().base_ring()\n category = GradedHopfAlgebrasWithBasis(R)\n return [Realizations(self.base()), category.Quotients()]", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def create_n_defenders(n, rs_nb, hp_proportion, hp_unit_cost=0, offset=0, name=\"\"):\n defenders = []\n for i in range(offset,n+offset):\n if(name != \"\"):\n d = Defender(i,rs_nb,hp_proportion=hp_proportion,hp_unit_cost=hp_unit_cost, name=name)\n else:\n d = Defender(i,rs_nb,hp_proportion=hp_proportion,hp_unit_cost=hp_unit_cost)\n defenders.append(d)\n return defenders", "def LINNEAN_RANKS(cls):\n return (\n cls.KINGDOM,\n cls.PHYLUM,\n cls.CLASS,\n cls.ORDER,\n cls.FAMILY,\n cls.GENUS,\n cls.SPECIES\n )", "def _get_elbow_info(self, n_components: int) -> Tuple[range, List[float]]:\n principalDf = self._get_principal_df(n_components)\n return self._generate_intertias(n_components, principalDf)", "def members(self):\n if self.type == \"mdbiosraidarray\":\n members = self.parents[0].parents\n else:\n members = self.parents\n return list(members)", "def level_n_descendants(self, n):\r\n return Node.s_level_n_descendants(self, n)", "def add_virus(self, n):\n if n is None or n <= 0:\n raise ValueError('n must be positive')\n\n radius = utils.mass_to_radius(conf.VIRUS_MASS)\n for _ in range(n):\n pos = utils.gen_non_overlap_position(self.agents.values(), radius)\n self.viruses.append(Virus(pos[0], pos[1], radius, conf.VIRUS_MASS))", "def nRx(self):\n return sum(rx.locations.shape[0] for rx in self.source_field.receiver_list)", "def vnC(self):\n return np.array(\n [x for x in [self.nCx, self.nCy, self.nCz] if x is not None],\n dtype=int\n )", "def base_idx_neighbor_idx_simplices(n_base, n_neighbors=5, n_dim=2):\n combinations = np.array(list(itertools.combinations(np.arange(1,\n n_neighbors),\n n_dim-1))).astype(int)\n base_indices = np.repeat(np.arange(n_base), len(combinations))\n all_simplices = np.vstack([base_indices,\n np.tile(combinations, (n_base, 1)).T]).T\n #print('simplices', os.getpid(), len(all_simplices), flush=True)\n return all_simplices", "def pull_reads(self, n, prng): \n if not self.sampling:\n self.convert_to_array()\n index = prng.randint(0, self.total, size=n)\n index = np.sort(index)\n return self.reads[index,:]", "def __init__(self, inp_taxa=None):\n\t\tself.Initial_R1_List = []\n\t\tself.Initial_R2_List = []\n\t\t\n\t\t#\"\"\"\n\t\t#this is the final possible R2 list of the current cluster\n\t\t#\"\"\"\n\t\t#self.Final_PossibleR2List = []\n\t\t\"\"\"\n\t\ttaxa list of the current cluster\n\t\t\"\"\"\n\t\tself.Species_List = [] \n\t\t\"\"\"\n\t\tset to 1 once the cluster is traversed during DFS order of traversing the clusters\n\t\tthis is required in printing the supertree in newick format \n\t\t\"\"\"\n\t\tself.explored = 0 \n\t\t\"\"\"\n\t\tstores the indices of clusters cy, such that, depending on the relation type:\n\t\tcurr_clust->cy / cy->curr_clust / R3 (cy, curr_clust) / R4 (cy, curr_clust) are present\n\t\t\"\"\"\n\t\tself.Reln_List = [[] for i in range(4)]\n\t\t#\"\"\"\n\t\t#stores the indices of clusters cy such that curr_clust->cy connection \n\t\t#needs to be checked\n\t\t#\"\"\"\n\t\t#self.possible_R1_list = []\n\t\t#\"\"\"\n\t\t#stores the indices of clusters cy such that curr_clust<-cy connection \n\t\t#needs to be checked\n\t\t#\"\"\"\n\t\t#self.possible_R2_list = []\n\t\t#\"\"\"\n\t\t#stores the indices of clusters cy such that curr_clust<----cy holds\n\t\t#but curr_clust---->cy does not hold\n\t\t#\"\"\"\n\t\t#self.Distinct_possible_R2_list = []\n\t\t\"\"\"\n\t\tduring initialization, append one tuple to this cluster\n\t\t\"\"\"\n\t\tif inp_taxa is not None:\n\t\t\tself._Append_taxa(inp_taxa)", "def clusters(self,rng):\n #clusts = subclust(normalize(self.training_data),0.4,0.5)\n if self.extended:\n dat = self.training_data / rng\n else:\n dat = self.training_data[:,0:-1] / rng[0:-1]\n\n clusts = subclust(normalize(dat))\n\n print len(clusts),\"initial clusters for class\",self.name\n if self.extended:\n return np.array([self.training_data[i] for i in clusts])\n else:\n return np.array([self.training_data[i,0:-1] for i in clusts])", "def __init__(self, n):\n self.n = n\n self.e = [set() for i in range(n)]", "def get_n_random_itrees(self, n, subs_size):\n random_itrees = np.empty(n, dtype=object) # Allocate list for storing the trees.\n # TODO: parallelize!\n for k in np.arange(n):\n # Get a random sample of training examples to build next random itree.\n data_sub = self.data[np.random.choice(self.data.shape[0], subs_size, replace=False), :]\n random_itrees[k] = self.get_random_itree(data_sub) # Get next random itree \n self.random_itrees = random_itrees\n self.subs_size = subs_size", "def constituents(self, pnp=False):\n a = []\n for word in self.words:\n if pnp and word.pnp is not None:\n if len(a) == 0 or a[-1] != word.pnp:\n a.append(word.pnp)\n elif word.chunk is not None:\n if len(a) == 0 or a[-1] != word.chunk:\n a.append(word.chunk)\n else:\n a.append(word)\n return a", "def expected_inheritance(variant_obj):\n manual_models = set()\n for gene in variant_obj.get('genes', []):\n manual_models.update(gene.get('manual_inheritance', []))\n return list(manual_models)", "def nPositions(self, int_max=None):\n\n return np.array(list(map(\n lambda time0: self.getPositions(time0),\n self._time0(int_max=int_max))))", "def alt_bases_from_indices(alt_allele_indices, alternate_bases):\n alleles = [alternate_bases[i] for i in alt_allele_indices]\n # Avoiding '/' to support use in file paths.\n return '-'.join(alleles)", "def base(self):\n if self._base == []:\n self.schreier_sims()\n return self._base", "def agentCounter(gameState, index, depth):\n if index == gameState.getNumAgents():\n return [depth-1, 0]\n else:\n return [depth, index]", "def get_array_of_children(self):\n children = [self.posXposYposZ,self.posXposYnegZ,self.posXnegYposZ,self.posXposYnegZ,self.negXposYposZ,self.negXposYnegZ,self.negXnegYposZ,self.negXnegYnegZ ] \n return children", "def known_organisms():\n return [\"rat\"]", "def IPAG(self):\r\n logger.info(\"IPAG (Inheritance Parent AGgregation)\")\r\n logger.info(\"Step1: Get all parent and child classes\")\r\n inh = self.get_node_by_name(\"inheritance\")\r\n logger.debug(\"inheritance: %s\" % inh)\r\n logger.info(\"Step2: Get all classes with aggregation relation\")\r\n agg = self.get_node_by_name(\"aggregation\")\r\n logger.debug(\"aggregation: %s\" % agg)\r\n logger.info(\"Step3: Find classes that have inheritance and the parent have aggregation with other\")\r\n return self.__IPAG_helper(inh, agg)", "def __init__(self, n, prey_cnt=0, predator_cnt=0):\n # print n, prey_cnt, predator_cnt\n self.grid_size = n\n self.grid = []\n for i in range(n):\n row = [0]*n # row is a list of n zeros\n self.grid.append(row)\n self.init_animals(prey_cnt, predator_cnt)", "def get_nix(self):\n return self.dim", "def get_index_array(self):\n return self.region_pairs", "def how_many(cls):\n #cls.population equivalent to Robot.population\n print(\"We have {:d} robots.\".format(cls.population))", "def __init__(self, n):\n self._n = n\n self._grid = [[False] * n for _ in range(n)]\n # create sites for n-by-n grid and 2 \"virtual\" sites for top and bottom\n # self._uf = QuickFindUF(n * n + 2)\n self._uf = WeightedQuickUnionUF(n * n + 2) # QuickFindUF(n * n + 2)\n # connect top and bottom virtual sites with respecting sides of grid\n self._top_idx = n * n\n self._bottom_idx = n * n + 1\n for i in range(n):\n self._uf.union(self._top_idx, i)\n self._uf.union(self._bottom_idx, (n - 1) * n + i)", "def nD(self):\n return self.locations.shape[0] * len(self.components)", "def GetClassBases(self,cls):\n name = \"\"\n for i in cls:\n if i != \")\":\n name+=i\n\n lst = name.split(\"(\")\n cls_lst = lst[-1].split(\",\")\n if cls_lst:\n return cls_lst\n else:\n return False", "def n_coords(self):\n trans_x = np.arange(-self.trans_dia / 2, self.trans_dia / 2 + 0.002, 0.002)\n a_n = [(trans_x[n + 1] - trans_x[n]) / 2 for n in range(self.N)]\n cx_n = [trans_x[n] + (trans_x[n + 1] - trans_x[n]) / 2 for n in range(self.N)]\n coords = [(x, 0) for x in cx_n]\n d = {'trans_x': trans_x, 'A_n': a_n, 'N_coords': coords}\n return d", "def initialiser(N, dimensions = 2):\r\n \r\n #shape for correct dimensions\r\n shape = tuple([N]) * dimensions\r\n \r\n #randomise spins\r\n lattice = np.random.choice([1,-1], size = shape)\r\n \r\n return lattice", "def find_lms_suffixes(t, n):\n pos = array(\"l\")\n for i in range(n):\n if t[i] == S_TYPE and t[i - 1] == L_TYPE:\n pos.append(i)\n return pos", "def _get_taxa_richness_map(self):\n taxa = self.get_taxon_objects(extant_at_latest_time=True)\n\n if taxa:\n masks = np.stack([taxon.range_mask for taxon in taxa])\n richness_mask = sum(masks).astype(int)\n else:\n richness_mask = np.zeros(self._grid.number_of_nodes, dtype=int)\n\n return richness_mask", "def nits(self):" ]
[ "0.68091816", "0.56342465", "0.54614604", "0.5391642", "0.5375723", "0.53475916", "0.53350717", "0.531773", "0.52842516", "0.52697146", "0.5118117", "0.50960046", "0.50157857", "0.4984859", "0.49818936", "0.4934643", "0.4923983", "0.48903525", "0.48903525", "0.48512107", "0.48498282", "0.48475572", "0.4838769", "0.48179743", "0.4814369", "0.4799596", "0.4796", "0.47553033", "0.47507358", "0.47360468", "0.47310296", "0.47136614", "0.4713197", "0.47075474", "0.46929896", "0.46904296", "0.4679217", "0.46709377", "0.46671546", "0.46612245", "0.46573448", "0.46566963", "0.46566963", "0.46558478", "0.46497917", "0.46435446", "0.4638981", "0.46362844", "0.4633588", "0.46311837", "0.46311632", "0.46286586", "0.46259743", "0.46137366", "0.46096304", "0.46081117", "0.46025643", "0.460112", "0.45979732", "0.4589087", "0.45875078", "0.45808235", "0.45729458", "0.45703357", "0.45661184", "0.45604217", "0.45556968", "0.45537376", "0.45507598", "0.45506203", "0.4550373", "0.45499006", "0.4547367", "0.45451796", "0.45412105", "0.45406651", "0.45398462", "0.45317587", "0.4524932", "0.45236942", "0.4519759", "0.4519272", "0.45122936", "0.45095408", "0.4506367", "0.4506145", "0.45012844", "0.45006037", "0.4496308", "0.44907206", "0.44889742", "0.4487329", "0.44873196", "0.44845676", "0.44843644", "0.44831914", "0.44765204", "0.44758448", "0.44702378", "0.44694602" ]
0.68057257
1
Returns an array for the inheritance at a ychromosome locus with n alleles (where the first allele means "no ychromosome", i.e. female).
def nuclear_inheritance_at_y_locus(n): ary = np.zeros((n,n,n)) # female, male, offspring for male in range(1,n): ary[0,male,0] = ary[0,male,male] = 1 # one daughter, one son return ary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nuclear_inheritance_at_single_locus(n):\n ary = np.zeros((n,n,n))\n for female in range(n):\n for male in range(n):\n for offspring in range(n):\n if female==male==offspring:\n ary[female,male,offspring]=1.\n if female!=male:\n if (offspring==female) or (offspring==male):\n ary[female,male,offspring]=0.5\n return ary", "def basisVar(bases):\n \n var = [] # The ordered list of active coordinates \n k = 0\n for b in bases:\n if np.isscalar(b):\n # A singleton, inactive coordinate\n k += 1\n else:\n # An active basis\n for i in range(b.nd):\n var.append(k)\n k += 1 \n return var", "def coordSubcoord(bases):\n subcoord_of_coord = []\n for b in bases:\n if np.isscalar(b):\n subcoord_of_coord.append(0) # Singleton scalar\n else:\n for i in range(b.nd):\n subcoord_of_coord.append(i)\n return subcoord_of_coord", "def get_occupants(self):\n return [x.get_occupant_type() for x in self.huts]", "def get_codon_arr(chromosome: Chromosome) -> np.ndarray:\n\n seq_len = len(chromosome.sequence)\n arr = np.zeros((seq_len - 2,), dtype=np.int)\n\n for f in chromosome.features:\n\n if f.type != 'CDS':\n continue\n if f.strand == '-':\n continue\n\n protein_len = (f.end - f.start) // 3\n for aa in range(protein_len):\n pos = f.start + (aa * 3) - 1 # -1 to 0-based\n arr[pos] = 1\n\n return arr", "def get_bases():\n\treturn ((MV.ONE,),) + MV.blades[1:]\n\t# return ((MV.ONE,),) + MV.bases[1:]", "def all_baselines():\n for i in range(len(active_ants)):\n ID1 = active_ants[i]\n for j in range(i + 1, len(active_ants[i + 1:])):\n ID2 = active_ants[j]\n print(\"Baseline between antennae \" + str(ID1) + \\\n \" and \" + str(ID2) + \" = \" + str(ant.baseline(ID1, ID2)))", "def gt_bases(self):\n result = []\n for a in self.gt_alleles:\n if a is None:\n result.append(None)\n elif a == 0:\n result.append(self.site.REF)\n else:\n result.append(self.site.ALT[a - 1].value)\n return tuple(result)", "def _makebaselines(self):\n nholes = self.ctrs.shape[0]\n blist = []\n for i in range(nholes):\n for j in range(nholes):\n if i < j:\n blist.append((i, j))\n barray = np.array(blist).astype(np.int)\n #blname = []\n bllist = []\n for basepair in blist:\n # blname.append(\"{0:d}_{1:d}\".format(basepair[0],basepair[1]))\n baseline = self.ctrs[basepair[0]] - self.ctrs[basepair[1]]\n bllist.append(baseline)\n return barray, np.array(bllist)", "def nlegomena(self, n: int) -> list:\n nlegomena_ = [typ for typ, freq in self.items() if freq == n]\n return nlegomena_", "def get_coverage(self):\n if len(self) == 1:\n return self.subacqs[0].get_coverage()\n return np.array([self.subacqs[i].get_coverage() for i in range(len(self))])", "def n(self):\n return len(self.genotypes)", "def _get_elbow_info(self, n_components: int) -> Tuple[range, List[float]]:\n principalDf = self._get_principal_df(n_components)\n return self._generate_intertias(n_components, principalDf)", "def landmarks_from_rectangle(n, maxs):\n # Some random generator\n landmarks = []\n if n >= 4:\n landmarks = [[1,1],\n [maxs[0]-1, 1],\n [maxs[0]-1, maxs[1]-1],\n [1, maxs[1]-1],\n ]\n for i in range(len(landmarks),n):\n landmarks.append(nprnd.rand(2) * maxs)\n return np.array(landmarks).T", "def get_bases():\n\tbss = []\n\tfor es in MV.index:\n\t\tbs = []\n\t\tif es == ():\n\t\t\tbs.append(_1)\n\t\telse:\n\t\t\tfor js in es:\n\t\t\t\tbmv = reduce(operator.mul, map(lambda j: e[j], js))\n\t\t\t\tbs.append(bmv)\n\t\t\t\t\n\t\tbss.append(bs)\n\t\n\treturn bss", "def locations_n(self):\n return self.locations[1]", "def extract_levels(enemy_behavior: List[Any]):\n levels = set()\n levels.add(1)\n for b in enemy_behavior:\n if type(b) == ESBranchLevel:\n levels.add(b.branch_value)\n elif hasattr(b, 'level'):\n levels.add(b.level)\n return levels", "def getBaseDistinctions(self):\n\n return []", "def initialize_dna(self):\n return np.random.rand(1, self.n_genes) * 2 - 1", "def get_nucliators_num_and_proba(self):\n XY = self.XY\n TIMES = self.die_times\n # CHEN'S IMPLEMENTATION\n # nucliators = np.array([True for i in range(len(TIMES))])\n # leaders = np.array([-1 for i in range(len(TIMES))])\n # cells_idx_sorted_by_times = np.arange(0, len(TIMES), 1)\n # for cell_idx in cells_idx_sorted_by_times:\n # # nucliators[cell_idx] = True\n # cell_death = TIMES[cell_idx]\n # neighbors_prior_death = [True for i in range(len(self.neighbors_list[cell_idx]))]\n # for neighbor_idx in self.neighbors_list[cell_idx]:\n # # if nucliators[cell_idx] == True:\n # # break\n # neighbor_death = TIMES[neighbor_idx]\n # if cell_death > neighbor_death:# and leaders[cell_idx] == -1:\n # nucliators[cell_idx] = False\n # # leaders[cell_idx] = cell_idx\n # elif cell_death == neighbor_death and not nucliators[neighbor_idx]:\n # nucliators[cell_idx] = False\n # leaders[cell_idx] = cell_idx\n # else:\n # nucliators[cell_idx] = True\n # # if leaders[neighbor_idx] != -1:\n # # leaders[cell_idx] = leaders[neighbor_idx]\n #\n # self.nucliators = nucliators\n # self.nucliators_num = nucliators.sum()\n # self.nucliation_proba = self.nucliators_num / len(XY)\n\n # MY IMPLEMENTATION\n self.nucliators = self.nucliators_counter.calc_nucliators()\n self.nucliators_num = self.nucliators.sum()\n self.nucliation_proba = self.nucliators_num / len(self.XY)", "def create_n_defenders(n, rs_nb, hp_proportion, hp_unit_cost=0, offset=0, name=\"\"):\n defenders = []\n for i in range(offset,n+offset):\n if(name != \"\"):\n d = Defender(i,rs_nb,hp_proportion=hp_proportion,hp_unit_cost=hp_unit_cost, name=name)\n else:\n d = Defender(i,rs_nb,hp_proportion=hp_proportion,hp_unit_cost=hp_unit_cost)\n defenders.append(d)\n return defenders", "def get_base_coords(self):\n num_bases = len(self.tour)\n base_coords = np.zeros((num_bases,3), dtype=float)\n for i,base in enumerate(self.tour):\n helix_num = base.h\n helix_pos = base.p\n helix = self.helix_list[helix_num]\n base_coords[i] = base.coordinates\n return base_coords", "def getHierarchies():", "def getHierarchies():", "def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))", "def make_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_neuac = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if i == 0:\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_neuac) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 1, base_neuac + i\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods", "def higher_taxonomy(self):\n return self.metadata.groupby(['Higher Taxonomy']\n ).size().reset_index().rename(columns={0: 'Organisms'})", "def agentCounter(gameState, index, depth):\n if index == gameState.getNumAgents():\n return [depth-1, 0]\n else:\n return [depth, index]", "def expected_inheritance(variant_obj):\n manual_models = set()\n for gene in variant_obj.get('genes', []):\n manual_models.update(gene.get('manual_inheritance', []))\n return list(manual_models)", "def get_rooms(self) -> np.array:\n if self._rooms is not None:\n return self._rooms\n rooms = []\n regions = measure.regionprops(self.instance)\n for region in regions:\n # c is the most common value in the region,\n # in this case c is the category of the room (channel 2)\n c = stats.mode(\n self.category[region.coords[:, 0],\n region.coords[:, 1]],\n axis=None\n )[0][0]\n i = stats.mode(\n self.instance[region.coords[:, 0],\n region.coords[:, 1]],\n axis=None\n )[0][0]\n y0, x0, y1, x1 = np.array(region.bbox)\n rooms.append([y0, x0, y1, x1, c, i])\n self._rooms = np.array(rooms, dtype=int)\n return self._rooms", "def known_organisms():\n return [\"rat\"]", "def rectangleindices(self):\n return {r.n for r in self.rectangles}", "def get_age_distribution(school_type, N_classes):\n\tage_bracket = get_age_bracket(school_type)\n\tclasses = list(range(1, N_classes + 1))\n\tN_age_bracket = len(age_bracket)\n\tclasses_per_age_bracket = int(N_classes / N_age_bracket)\n\t\n\tassert N_age_bracket <= N_classes, \\\n\t'not enough classes to accommodate all age brackets in this school type!'\n\t\n\tage_bracket_map = {i:[] for i in age_bracket}\n\t\n\t# easiest case: the number of classes is divisible by the number of floors\n\tif N_classes % N_age_bracket == 0:\n\t\tfor i, age_bracket in enumerate(age_bracket):\n\t\t\tage_bracket_map[age_bracket] = classes[i * classes_per_age_bracket:\\\n\t\t\t\t\t i * classes_per_age_bracket + classes_per_age_bracket]\n\t\t\n\t# if there are leftover classes: assign them one-by-one to the existing \n\t# age brackets, starting with the lowest\n\telse:\n\t\tleftover_classes = N_classes % N_age_bracket\n\t\tclasses_per_age_bracket += 1\n\t\tfor i, age_bracket in enumerate(age_bracket):\n\t\t\tif i < leftover_classes:\n\t\t\t\tage_bracket_map[age_bracket] = \\\n\t\t\t\t\t\tclasses[i * classes_per_age_bracket: \\\n\t\t\t\t\t\ti * classes_per_age_bracket + classes_per_age_bracket]\n\t\t\t# hooray, index magic!\n\t\t\telse:\n\t\t\t\tage_bracket_map[age_bracket] = \\\n\t\t\t\t\tclasses[leftover_classes * classes_per_age_bracket + \\\n\t\t\t\t\t (i - leftover_classes) * (classes_per_age_bracket - 1):\n\t\t\t\t\tleftover_classes * (classes_per_age_bracket) + \\\n\t\t\t\t\t (i - leftover_classes) * (classes_per_age_bracket - 1) + \\\n\t\t\t\t\t classes_per_age_bracket - 1]\n\t\n\t# invert dict for easier use\n\tage_bracket_map_inv = {}\n\tfor age_bracket, classes in age_bracket_map.items():\n\t\tfor c in classes:\n\t\t\tage_bracket_map_inv.update({c:age_bracket}) \n\t\t\t\t\n\treturn age_bracket_map_inv", "def omission_index(n, sample_size):\n \n \"randomly pick some subset of sample_size agents\"\n index = np.sort(np.random.choice(n,sample_size,replace=False))\n \"double up index to choose x and y positions columns. both are used.\"\n index2 = np.repeat(2*index,2) \n \"nudge every second item to take the ith+1 column (y coordinate corresponding to chosen x)\"\n index2[1::2] += 1\n return index, index2", "def _get_taxa_richness_map(self):\n taxa = self.get_taxon_objects(extant_at_latest_time=True)\n\n if taxa:\n masks = np.stack([taxon.range_mask for taxon in taxa])\n richness_mask = sum(masks).astype(int)\n else:\n richness_mask = np.zeros(self._grid.number_of_nodes, dtype=int)\n\n return richness_mask", "def get_training_array(self, window_size: str = None):\n self.window_size = window_size or self.window_size\n y_index = {\n 'A': 0, \n 'C': 1,\n 'G': 2, \n 'T': 3,\n '0/1': 4, '1/0': 4, # heterozygous\n '1/1': 5, # homozygous\n '0/0': 6, # non-variant :: assigned where alignments are not found to be variants. Need to finish populating with bam file.\n # 7 :: complex/non-snp :: assigned to be a variant that is an indel, but not an SNP\n }\n y = [0, 0, 0, 0, 0, 0, 0, 0] # ['A', 'C', 'T', 'G', hom-ref, het, hom-alt, complex-dump]\n Y = {}\n X_initial = []\n Y_initial = []\n position_array = []\n left_offset = math.floor(self.window_size / 2)\n right_offset = math.ceil(self.window_size / 2)\n if not self.window_size % 2: print('shit man, the window needs to be odd; needs to have a middle position')\n if self.bed_file:\n focus_regions = pd.read_csv(self.bed_file, delimiter='\\t', header=None)[[1, 2]].apply(tuple, axis=1).tolist()\n focus_regions = pd.arrays.IntervalArray.from_tuples(focus_regions, closed='both')\n count = 0\n too_complex = set()\n self.variants_called = set()\n if self.vcf_file:\n vcf = pd.read_vcf(self.vcf_file) # Should only have one sample\n if len(vcf.columns) > 10:\n exit(f'ERROR :: VCF file has too many samples')\n vpos = -float('inf')\n for row in vcf.itertuples():\n # if not self.variant_calls.get(row.POS):\n # continue\n if self.bed_file: \n if not any(focus_regions.contains(row.POS-1)): # bed file 0-index\n count += 1\n continue\n y_vec = y[:] # ['A', 'C', 'T', 'G', het, hom, non, complex]\n # get genotype call. default to non-variant\n genotype = row[-1]['GT'].replace('|', '/')\n genotype_index = y_index.get(genotype)\n if not genotype_index:\n continue\n try:\n # HETEROZYGOUS\n if genotype_index == 4:\n y_vec[y_index[row.REF[0]]] = .5\n y_vec[y_index[row.ALT[0]]] = .5\n # y_vec[y_index[row.REF[0]]] = 1\n # y_vec[y_index[row.ALT[0]]] = 1\n # HOMOZYGOUS\n elif genotype_index == 5:\n y_vec[y_index[row.ALT[0]]] = 1\n # y_vec[y_index[row.ALT[0]]] = 1\n # NON-VARIANT\n elif genotype_index == 6:\n y_vec[y_index[row.REF[0]]] = 1\n # y_vec[y_index[row.REF[0]]] = 1\n # COMPLEX\n elif genotype_index == 7:\n # todo: this shouldnt be always in favor of alt\n y_vec[y_index[row.ALT[0]]] = 1 # todo: maybe take avgs if this messes with the output\n # makes sure we get the proper het base call before changing the gt to complex.\n if len(row.REF) > 1 or len(row.ALT) > 1:\n genotype_index = 7\n except:\n # TODO: iupac not supported yet, too much of a slow down.\n continue\n # if abs(row.POS - vpos) < self.minimum_variant_radius:\n # genotype_index = 7\n # try:\n # Y[vpos][4] = 0\n # Y[vpos][5] = 0\n # Y[vpos][6] = 0\n # Y[vpos][7] = 1\n # except:\n # pass\n # if len(row.REF) > 5 or len(row.ALT) > 5:\n # too_complex.add(row.POS)\n # vpos = row.POS\n # continue\n vpos = row.POS\n y_vec[genotype_index] = 1\n Y[row.POS] = y_vec \n self.variants_called.add(row.POS)\n count = 0\n for position in sorted(Y):\n if self.bed_file: \n if not any(focus_regions.contains(position)): # bed file 0-index\n count += 1\n continue\n tp = position - self.contig_start - 1\n if tp < 0: # calls before contig :: incase a bed file was used \n continue\n tensor_stack = np.stack([tensor[tp-left_offset:tp+right_offset] for tensor in self.tensors], axis=2)\n if tensor_stack.size == 0: # calls after contig :: incase a bed file was used\n break \n position_array.append(position)\n X_initial.append(tensor_stack)\n Y_initial.append(Y[position])\n # print('vc skipped', count)\n # false_positives = sorted(set(self.variant_calls) - (set(Y) | too_complex))\n # self.false_positives = false_positives\n # ref_seq_seg = self.ref_seq[self.contig_start-1:self.contig_end]\n # print('false-p', len(false_positives))\n # for position in false_positives[:]:\n else:\n outside, size_catch, fp, amb_base,total=0,0,0,0,0\n for position in sorted(set(self.variant_calls) - self.variants_called):\n total+=1\n p = position - self.contig_start - 1 # numpy array 0-index\n if self.bed_file: \n if not any(focus_regions.contains(position)): # bed file 0-index \n outside+=1\n continue\n y = [0, 0, 0, 0, 0, 0, 1, 0]\n # TODO\n # base_position = y_index.get(self.variant_calls[position]['ref_base'])\n base_position = y_index.get(str(self.ref_seq[position-1])) # bypthon 0-index\n # p = position + self.contig_start\n if base_position != None:\n if p - left_offset < 0: # TODO: circularize if for plasmids\n print('wall hit!')\n continue\n tensor_stack = np.stack([tensor[p-left_offset:p+right_offset] for tensor in self.tensors], axis=2)\n vec = np.transpose(tensor_stack, axes=(0,2,1))\n # if sum(vec[7,:,0]) < 5:\n # size_catch+=1\n # continue\n if tensor_stack.size == 0:\n print(position, 'WARNING ::: contig past end; this should not happen!')\n break\n y[base_position] = 1\n fp+=1\n position_array.append(position)\n Y_initial.append(y) # like this incase we want to modify the base \n X_initial.append(tensor_stack)\n else:\n amb_base += 1\n # print(position, base_position, str(self.ref_seq[position-1]))\n # print('ambygous base catches:', amb_base)\n # print('bed catches:', outside)\n # print('size catches', size_catch)\n # print('fp total', fp)\n # print('total', total)\n Xarray = np.stack(X_initial).astype('float64')\n Yarray = np.stack(Y_initial).astype('float64')\n return Xarray, Yarray, position_array # Xarray, Yarray", "def pentakis(self):\n return self.nlegomena(5)", "def _cohorts(self):\n return ['parentsALL']", "def count_umbiguous_bases(sequence):\n sequence = sequence.upper()\n amb = ['N', 'R', 'Y', 'W', 'S', 'K', 'M']\n return sum({base: sequence.count(base) for base in amb}.values())", "def best_genomes(self, n):\n def key(g):\n return g.fitness\n\n return sorted(self.most_fit_genomes, key=key, reverse=True)[:n]", "def hapax(self):\n return self.nlegomena(1)", "def random(self) -> np.ndarray:\n return random_attitudes(1)", "def n(self):\n return len(self.marks)", "def test_taxonomy(n=5):\n ecoli_file = join(this_dir, \"e_coli_core.xml.gz\")\n ids = [\"Escherichia_coli_{}\".format(i) for i in range(1, n + 1)]\n taxa = pd.DataFrame({\"id\": ids})\n taxa[\"genus\"] = \"Escherichia\"\n taxa[\"species\"] = \"Eschericia coli\"\n taxa[\"reactions\"] = 95\n taxa[\"metabolites\"] = 72\n taxa[\"file\"] = ecoli_file\n return taxa", "def __addNuclideBases():\n with open(os.path.join(context.RES, \"nuclides.dat\")) as f:\n for line in f:\n # Skip header lines\n if line.startswith(\"#\") or line.startswith(\"Z\"):\n continue\n lineData = line.split()\n _z = int(lineData[0])\n _n = int(lineData[1])\n a = int(lineData[2])\n state = int(lineData[3])\n sym = lineData[4].upper()\n mass = float(lineData[5])\n abun = float(lineData[6])\n halflife = lineData[7]\n if halflife == \"inf\":\n halflife = numpy.inf\n else:\n halflife = float(halflife)\n nuSF = float(lineData[8])\n\n element = elements.bySymbol[sym]\n nb = NuclideBase(element, a, mass, abun, state, halflife)\n nb.nuSF = nuSF", "def calculate_occupancy(self):\n # TODO will need to be fixed now that using a dict and changed thresholds\n self.occupancy = [list(x > self.t for x in y) for y in self.counts]\n return self.occupancy", "def ancestry_iris(self):\n return list(self._class_types)", "def chromosomes(self):\n chromosomes_set = set()\n chromosomes = []\n for region in self.regions:\n if region.chromosome not in chromosomes_set:\n chromosomes_set.add(region.chromosome)\n chromosomes.append(region.chromosome)\n return chromosomes", "def evaluate ( self , genome ) :\n\n\t\tassert isinstance( genome , Genome ), 'genome supplied must be of type cc3dtools.Genome!'\n\t\tloci = genome.get_mutated_loci()\n\t\tmatched_phenotypes = []\n\t\tphenotypes = self.phenotypes.items()\n\n\t\tfor locus in loci:\n\t\t\tfor phenotype, region in phenotypes:\n\t\t\t\t# check if the locus is in the region\n\t\t\t\t# 'locus.locus' to get the float value of that mutation rather \n\t\t\t\t# than an object!\n\t\t\t\tif locus.locus > region[0] and locus.locus < region[1]:\n\t\t\t\t\tmatched_phenotypes.append( phenotype )\n\t\treturn Counter( matched_phenotypes )", "def animal_ages(self):\n herb_ages = []\n carn_ages = []\n for cell in self.land_cells.values():\n for herb in cell.herbivores:\n herb_ages.append(herb.age)\n for carn in cell.carnivores:\n carn_ages.append(carn.age)\n if not herb_ages:\n return [carn_ages]\n elif not carn_ages:\n return [herb_ages]\n else:\n return [herb_ages, carn_ages]", "def get_occupant(self):\n\t\tpass", "def _build_genotypes(self):\n x = np.zeros(self.n)\n \n # Frequencies derived from HWE.\n num_hetero = 2 * self.maf * (1 - self.maf) * self.n\n num_homo_minor = self.maf ** 2 * self.n\n \n x[:num_hetero] = 1\n x[num_hetero:num_hetero+num_homo_minor] = 2\n np.random.shuffle(x)\n \n # Add noise for dosage values if needed.\n if self.dosage_var:\n x[x == 0] += np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 0]))\n )\n x[x == 1] += np.random.normal(0, self.dosage_var, len(x[x == 1]))\n x[x == 2] -= np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 2]))\n )\n\n # Mask some values if the call rate is not 1.\n if self.call_rate < 1:\n missing_rate = 1 - self.call_rate\n missing_number = missing_rate * self.n\n missing_idx = np.arange(0, self.n)\n np.random.shuffle(missing_idx)\n missing_idx = missing_idx[:missing_number]\n x[missing_idx] = np.nan\n \n return x", "def get_random_baselines(n_trials=5):\n if not os.path.exists(\"random-baseline/\"):\n os.makedirs(\"random-baseline/\")\n r = 0.3\n ori_sents = joblib.load('data/train-isw-sentences.pkl')\n ori_labels = joblib.load('data/train-isw-labels.pkl')\n assert len(ori_sents) ==len(ori_labels)\n\n # Zip the sents and its tags\n train_set = []\n for sent, label in zip(ori_sents, ori_labels):\n train_set.append((sent, label))\n assert len(train_set) == len(ori_sents)\n\n len_subsample = int(len(train_set)*r)\n\n # Save random samples\n for i in range(n_trials):\n random_set = choices(train_set, k=len_subsample)\n joblib.dump(random_set, 'random-baseline/random-train-r{}.pkl'.format(i))\n\n # Train and eval random baseline models\n for i in range(n_trials):\n train_script = \"python run_ner.py --output_dir random-baseline/trial{}_model/ --max_seq_length 128 --do_train --do_subtrain --subtrain_dir random-baseline/random-train-r{}.pkl\".format(i, i)\n os.system(train_script)\n\n eval_script = \"python run_ner.py --output_dir random-baseline/trial{}_model/ --do_eval --eval_on test --eval_dir random-baseline/eval_monitor/ --it_prefix {}\".format(i, i)\n os.system(eval_script)", "def getInnocents():\n\n cams = CAMS\n for cam in cams:\n image = cam.getImage()\n bloons, sizes = Vision_Processing.GetBalloon.getFriends(image)\n points = []\n for bloon in bloons:\n points.append(np.array([bloon[0], bloon[1]]))\n for i in range(len(bloons)):\n cam.addTarget(points[i])\n cam.addSize(sizes[i])\n targets = triangulate()\n # targets = cartesianToSpheric(targets, place, orientation)\n return targets", "def Nlevels(self):\n return self._nlevels", "def specified_unchanging_attributes(self) -> List[int]:\n indices = []\n\n for idx, (cpi, epi) in enumerate(zip(self.condition, self.effect)):\n if isinstance(epi, ProbabilityEnhancedAttribute):\n if cpi != self.cfg.classifier_wildcard and \\\n epi.does_contain(cpi):\n indices.append(idx)\n else:\n if cpi != self.cfg.classifier_wildcard and \\\n epi == self.cfg.classifier_wildcard:\n indices.append(idx)\n\n return indices", "def _get_ndarray(self, coords):\n with h5py.File(self.hdf5_path, \"r\") as reader:\n profiles = np.stack([\n self._get_profile(chrom, start, end, reader) \\\n for chrom, start, end in coords\n ])\n return profiles", "def get_coverage(self):\n coverage = np.zeros(self.Set.Shape, dtype=np.int8)\n for ig in self.Set:\n igram = self.load_ma(ig)\n coverage[~igram.mask] += 1\n\n return coverage", "def get_uniprot_clusters():\n\tjson_str = \"\"\n\tfor line in open(gpcr_tree_path, 'r'):\n\t\tjson_str += line \n\tgpcr_tree = json.loads(json_str)\n\n\tuniprot_clusters = []\n\t\n\tfor a in gpcr_tree[\"children\"]:\n\t\tfor b in a[\"children\"]:\n\t\t\tcluster = []\n\t\t\tfor uniprot in b[\"children\"]:\n\t\t\t\tuniprot_id = (str(uniprot['name']) + \"_human\").upper()\n\t\t\t\tcluster.append(uniprot_id)\n\t\t\tuniprot_clusters.append(cluster)\n\n\treturn uniprot_clusters", "def __getBaselineList(self):\n\n # cumulative baseline selections do not reflect on the msselectedindices()\n if self._msTool is None:\n self.__selectMS()\n\n \n # If there are any previous antenna selections, use it\n if self._arg['antenna'] != '':\n baselineSelection = {'baseline':self._arg['antenna']}\n try:\n self._msTool.msselect(baselineSelection, onlyparse=False)\n # IMPORTANT: msselectedindices() will always say there are auto-correlation\n # baselines, even when there aren't. In the MMS case, the SubMS creation will\n # issue a MSSelectionNullSelection and not be created. \n baselinelist = self._msTool.msselectedindices()['baselines']\n except:\n baselinelist = []\n else:\n md = msmdtool()\n md.open(self._arg['vis'])\n baselines = md.baselines()\n md.close()\n import numpy as np\n baselinelist = np.vstack(np.where(np.triu(baselines))).T \n \n\n return baselinelist.tolist()", "def vnEy(self):\n if self.dim < 2:\n return None\n return np.array(\n [x for x in [self.nNx, self.nCy, self.nNz] if x is not None],\n dtype=int\n )", "def n_classes(self):\n raise NotImplementedError", "def n_classes(self):\n raise NotImplementedError", "def _get_majority_baseline(self) -> np.ndarray:\n\n self._majority_label = max(set(self.y_training_set_all_),\n key=self.y_training_set_all_.count)\n return np.array([self._majority_label]*len(self.y_training_set_all_))", "def getLevels():", "def nPositions(self, int_max=None):\n\n return np.array(list(map(\n lambda time0: self.getPositions(time0),\n self._time0(int_max=int_max))))", "def get_maximum_y_coordinate_from_generation(self, x: int, z: int) -> int:\n raise NotImplementedError", "def octant_occupancy(x, y, z):\n\n octants = np.zeros(8, dtype=\"int\")\n\n for a,b,c in zip(x,y,z):\n\n if a < 0 and b < 0 and c < 0:\n octants[0] += 1\n elif a < 0 and b < 0 and c > 0:\n octants[1] += 1\n elif a < 0 and b > 0 and c < 0:\n octants[2] += 1\n elif a < 0 and b > 0 and c > 0:\n octants[3] += 1\n elif a > 0 and b < 0 and c < 0:\n octants[4] += 1\n elif a > 0 and b < 0 and c > 0:\n octants[5] += 1\n elif a > 0 and b > 0 and c < 0:\n octants[6] += 1\n elif a > 0 and b > 0 and c > 0:\n octants[7] += 1\n else:\n # Possible because of edge cases, shouldn't come up in calibration\n pass\n\n return octants", "def base_composition(reads, base):\n assert base.upper() in set(\"ACGT\")\n\n \"\"\" Reports nucelotide frequencies at each position in the\n sam sequences\n \"\"\"\n # DNA_Alphabet=[\"A\",\"C\",\"T\",\"G\",\"N\"]\n all_nucs = []\n for read in reads:\n nucs = {} # Dictionary to store nucleotide data.\n seq = read[9]\n for i in range(0, len(seq)):\n nucs[str(i + 1)] = seq[i]\n all_nucs.append(nucs)\n all_items = []\n counts = []\n for dicts in all_nucs:\n for item in dicts.items():\n all_items.append(item)\n all_items.sort(key=operator.itemgetter(0))\n groups = [map(operator.itemgetter(1), list(group))\n for key, group in itertools.groupby(\n all_items, operator.itemgetter(0))]\n for group in groups:\n counts.append(group.count(base))\n\n pos = range(1, len(seq) + 1)\n\n # Create plot.\n plt.figure(1, figsize=(8, 8))\n plt.axes([0.1, 0.1, 0.8, 0.8])\n plt.bar(pos, counts, facecolor='g')\n plt.xlabel(\"Position\")\n plt.ylabel(\"number of mapped reads\")\n plt.title(base)\n plt.show()", "def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults", "def classmx_ptcls(classmx,n):\n\n\tif isinstance(classmx,str) : classmx=EMData(classmx,0)\n\n\tplist=[i.y for i in classmx.find_pixels_with_value(float(n))]\n\n\treturn plist", "def lower_taxonomy(self):\n return self.metadata.groupby(['Higher Taxonomy', 'Lower Taxonomy']\n ).size().reset_index().rename(columns={0: 'Organisms'})", "def get_array_of_children(self):\n children = [self.posXposYposZ,self.posXposYnegZ,self.posXnegYposZ,self.posXposYnegZ,self.negXposYposZ,self.negXposYnegZ,self.negXnegYposZ,self.negXnegYnegZ ] \n return children", "def nb_leafy_rameau(x):\r\n return sum([nb_leafy_rameau_cat(x, cat) for cat in ['small', 'medium', 'large']])", "def test_ontology_term_inheritance(ontology):\n\tassert len(ontology.inherited(\"TO:0000001\")) == 1\n\tassert len(ontology.inherited(\"TO:0000002\")) == 2\n\tassert len(ontology.inherited(\"TO:0000003\")) == 2\n\tassert len(ontology.inherited(\"TO:0000004\")) == 4\n\tassert len(ontology.inherited(\"TO:0000005\")) == 5\n\tassert len(ontology.inherited(\"TO:0000006\")) == 5\n\tassert len(ontology.inherited(\"TO:0000007\")) == 2\n\tassert len(ontology.inherited(\"TO:0000008\")) == 3\n\tassert len(ontology.inherited(\"TO:0000009\")) == 4\n\n\tassert len(ontology.inherited([\"TO:0000002\",\"TO:0000003\"])) == 3\n\tassert len(ontology.inherited([\"TO:0000009\",\"TO:0000005\"])) == 8\n\tassert len(ontology.inherited([\"TO:0000004\",\"TO:0000003\"])) == 4\n\tassert len(ontology.inherited([\"TO:0000002\"])) == 2\n\tassert len(ontology.inherited([])) == 0", "def get_coverage_profile(bam_file, window):\n try:\n bam = HTSeq.BAM_Reader(bam_file)\n coverage = HTSeq.GenomicArray(\"auto\", stranded=False, typecode=\"i\")\n try:\n for alnmt in bam[window]:\n if alnmt.aligned:\n coverage[alnmt.iv]+=1\n except ValueError:\n sys.exit(\"\"\"Exception when reading the BAM file.\n This is common for two situations:\n 1: There is no .bai file for your BAM file \n 2: It is possible that your BAM file and GTF file do not have the same chromosome names. \n Check the chromosome names in the sam or index file and those in the GTF for agreement (and fix as necessary).\"\"\") \n #we now have coverage, which is a generator for tuples. \n #Each tuple has a GenomicInterval and an integer for the read-depth. To eventually plot, we need to make this into a numpy array\n cvg_list = []\n it = coverage.steps() #an iterator\n try:\n step = it.next() #get the first object from the iterator so we can enter the while loop\n while step:\n if step[0].start<=window.end and step[0].end>=window.start: #if step overlaps with window\n if step[0].start<=window.start and step[0].end>=window.end: #if the step is longer than the window (unlikely, but possible)\n cvg_list.extend(window.length*[step[1]])\n elif step[0].start<=window.start:\n cvg_list.extend(abs(step[0].end-window.start)*[step[1]]) \n elif step[0].end>=window.end:\n cvg_list.extend(abs(window.end-step[0].start)*[step[1]])\n else:\n cvg_list.extend(step[0].length*[step[1]]) \n step = it.next()\n except StopIteration: #when the generator is done, it throws an exception, which we catch and move on\n pass\n if len(cvg_list) == 0:\n sys.exit(\"Could not find any coverage data for the genomic region: \"+str(window)+\" in BAM file: \"+str(bam_file))\n cvg = np.array(cvg_list)\n return cvg\n except IOError:\n sys.exit(\"Could not locate the BAM file at \"+str(bam_file))", "def n_levels(self):\n return len(self.scales)", "def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes", "def get_levels(std0, slope, nsigma):\n nslope = nsigma * slope\n levels = [0]\n while levels[-1] <= 1:\n levels.append((levels[-1] * (1 + nslope) + 2 * nsigma * std0) / (1 - nslope))\n levels.pop()\n return levels", "def get_champion(self):\n return max(self.genomes)", "def get_age_bounds(input_dir):\r\n ages = pd.read_csv(os.path.join(input_dir, \"age_bounds.csv\"))\r\n return ages", "def n_y(self, level):\n resolution = self.resolution(level)\n return (self.y_extent // resolution + 63) // 64", "def find_lms_suffixes(t, n):\n pos = array(\"l\")\n for i in range(n):\n if t[i] == S_TYPE and t[i - 1] == L_TYPE:\n pos.append(i)\n return pos", "def getMostFitChromosome(self, chromosomes: ChromList) -> Chrom:\n raise NotImplementedError", "def create_array( n ):", "def random_chromosome(self):\n genes = []\n for i in range(self.chromosome_length):\n genes.append(self.random_gene())\n\n return genes", "def getNextGeneration(self, chromosomes: ChromList) -> ChromList:\n parents = self.select(chromosomes)\n offspring = self.crossover(parents)\n offspring = self.mutate(offspring)\n return parents + offspring", "def _endx(self, parents):\n ALPHA = (1.-2*0.35**2)**0.5/2.\n BETA = 0.35/(self.n_gene-1)**0.5\n\n child = np.empty(self.n_gene+1)\n\n t1 = (parents[1, :self.n_gene]-parents[0, :self.n_gene]) / 2.\n t2 = np.random.normal(scale=ALPHA) * (\n parents[1, :self.n_gene] - parents[0, :self.n_gene]\n )\n t3 = np.sum(\n np.random.normal(scale=BETA, size=self.n_gene)[:, np.newaxis]\n * (\n parents[2:, :self.n_gene] - (\n np.sum(parents[2:, :self.n_gene], axis=0) / self.n_gene\n )\n ), axis=0\n )\n child[:self.n_gene] = t1 + t2 + t3\n\n return child", "def get_classes(self):\n return list(range(self.num_clss))", "def return_occupancies(self, line):\n\n to_send = []\n if line == Line.LINE_GREEN:\n for i in range(len(self.blocks_green_arr)):\n to_send.append(self.blocks_green_arr[i].occupied)\n elif line == Line.LINE_RED:\n for i in range(len(self.blocks_red_arr)):\n to_send.append(self.blocks_red_arr[i].occupied)\n else:\n raise Exception('CTC : TrainSystem.return_occupancies recieved an erronious input')\n return to_send", "def ANA(self):\n MITs = []\n dbx: und.Db = und.open(self.udb_path)\n filter2 = \"Java Class ~Unresolved ~Unknown ~TypeVariable ~Anonymous ~Enum, Java Interface\"\n known_class_entities = dbx.ents(filter2)\n\n for class_entity in known_class_entities:\n if \"Interface\" in class_entity.kindname():\n continue\n mit = class_entity.metric(['MaxInheritanceTree'])['MaxInheritanceTree']\n MITs.append(mit)\n\n dbx.close()\n return sum(MITs) / len(MITs) if len(MITs) > 0 else 0.", "def animal_fitness(self):\n herb_fits = []\n carn_fits = []\n for cell in self.land_cells.values():\n for herb in cell.herbivores:\n herb_fits.append(herb.fitness)\n for carn in cell.carnivores:\n carn_fits.append(carn.fitness)\n if not herb_fits:\n return [carn_fits]\n elif not carn_fits:\n return [herb_fits]\n else:\n return [herb_fits, carn_fits]", "def get_coverage_as_array(self, length):\n coverage = np.array(zeros(length))\n for i in range(0,length):\n coverage[i] = self.coverage(sequence, i)", "def how_many(cls):\n #cls.population equivalent to Robot.population\n print(\"We have {:d} robots.\".format(cls.population))", "def get_basestrings(self):\n baseStrs = set()\n for x in self.xvals():\n for y in self.yvals():\n p = self.get_plaquette(x, y)\n if p is not None and p.base is not None:\n baseStrs.add(p.base)\n return list(baseStrs)", "def all_larceny(x): \n for elem in x:\n if 'Larceny' in elem:\n return 1\n return 0", "def n_classes(self):\n raise NotImplementedError()", "def get_base_frequencies(self):\n return get_base_frequencies(self._dna)", "def subtype_occurences(self):\n\n subtype_counts = Counter()\n\n for seqkey,seqs in self.seqs.iteritems():\n for seq,seqentry in seqs.iteritems():\n\n subtype_counts[seqentry['subtype']] += 1\n\n return subtype_counts", "def get_lineage(self, taxon):\n # important to reinit the second argument to []\n taxon = int(taxon)\n lineage = self._gen_lineage_and_rank(taxon, [])\n lineage = [x[0] for x in lineage]\n return lineage" ]
[ "0.68316793", "0.52946764", "0.52098745", "0.50238895", "0.50112236", "0.499121", "0.4984106", "0.4927073", "0.49246916", "0.487069", "0.48361552", "0.48348838", "0.4824314", "0.48227447", "0.48155507", "0.4785766", "0.47687185", "0.47678024", "0.47588527", "0.4755671", "0.47510064", "0.47273716", "0.46986634", "0.46986634", "0.4692934", "0.46920365", "0.46718374", "0.46690854", "0.46611673", "0.4652881", "0.46513662", "0.4648115", "0.46316427", "0.462962", "0.4618815", "0.46171865", "0.4610829", "0.46076727", "0.4604728", "0.45856422", "0.45805353", "0.45770237", "0.45609143", "0.45591158", "0.45577246", "0.45539272", "0.4551757", "0.45506215", "0.45427075", "0.45384917", "0.45380694", "0.4537676", "0.45354703", "0.45299777", "0.45200768", "0.45117173", "0.4505969", "0.4494983", "0.4491938", "0.448727", "0.44848904", "0.44822457", "0.44822457", "0.44803578", "0.4472762", "0.44701585", "0.44626868", "0.4461295", "0.44555703", "0.44520843", "0.4445169", "0.44439638", "0.44413516", "0.44324666", "0.4432347", "0.44318163", "0.44273108", "0.44207117", "0.44194838", "0.44175237", "0.4417309", "0.44151172", "0.44143963", "0.44112208", "0.44102937", "0.4409669", "0.44040817", "0.44000313", "0.4399394", "0.43920007", "0.43888414", "0.4386277", "0.43821973", "0.4381886", "0.4381323", "0.43811512", "0.4379909", "0.43765667", "0.4374669", "0.43705165" ]
0.7611592
0
Compatible index groups 0/1 (A1,A2,B1,B2) and 2/3 (A3,A4,B3,B4) To determine the appropriate marker state, we will count the number of alleles (during the diploid phase) belonging to index group 1. Gr. 1 Steril. count marker Explanation 0 or 4 S0 no allele involved in incompatibilities, fully fertile (1) 2 S1 all 4 alleles involved in incompatibilities, sterile (1h) 1 or 3 S2 3 alleles involved in incompatibilities, partially sterile (1dh)
def offspring_fertility(n1=4,n2=4): ary = np.zeros( (n1,n2, n1,n2, 3), float ) for i in range(n1): for j in range(n2): for k in range(n1): for l in range(n2): # set group counter to zero (one counter is sufficient) gc1 = 0 for index in [i,j,k,l]: if index in [0,1]: gc1+=1 if gc1==0 or gc1==4: ary[i,j,k,l,0] = 1. # set mark at S0 elif gc1==1 or gc1==3: ary[i,j,k,l,2] = 1. # set mark at S2 else: ary[i,j,k,l,1] = 1. # set mark at S1 return ary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def giniIndex(self, groups):\n n = sum([len(g) for g in groups])\n gini = 0.0\n for g in groups:\n if len(g) == 0: continue\n score = 0\n for c in self.classes:\n p = [r[-1] for r in g].count(c) / len(g)\n score += p * p\n gini += (1 - score) * len(g) / n\n return gini", "def _get_flagging(flagging_file, flag_ant_file, num_integ, n_chan, baseline_names, bad_chan_pct_count):\n\n # Inner: 1-6\n # Mid: 7-30\n # Outer: 31 - 36\n base_idx_map = _build_baseline_index(baseline_names)\n\n # Finding out which antenna has been flagged completely.\n all_ant1, all_ant2, all_flag = [], [], []\n per_integ_flag = []\n baseline_count, baseline_flag = np.zeros((len(baseline_names))), np.zeros((len(baseline_names)))\n integ_ant1, integ_ant2, integ_flag = [], [], []\n integ_num_inner, integ_flag_inner, integ_num_outer, integ_flag_outer = 0, 0, 0, 0\n integ_baseline_count, integ_baseline_flag = np.zeros((len(baseline_names))), np.zeros((len(baseline_names)))\n num_integ_flagged = 0\n print ('Processing ', flagging_file)\n with open(flagging_file, 'r') as f:\n for line in f:\n if \"#\" not in line: # grep -v \"#\"\n if line.startswith('Flagged a total of'):\n flag_total_line = line\n continue\n if \"Flagged\" in line: # grep -v \"Flagged\"\n continue\n tokens = line.split()\n if len(tokens) < 7: # Skip by-channel summaries at the end of the file\n continue\n ant1 = int(tokens[3])\n ant2 = int(tokens[4])\n flag = float(tokens[6])\n if (ant1 < ant2) and (flag == 100): \n # extract non-correlated antenna pairs with 100 percent flagging\n integ_ant1.append(ant1)\n integ_ant2.append(ant2)\n integ_flag.append(flag)\n if ant1 < ant2:\n # Record flagging for each baseline\n base_name = '{}-{}'.format(ant1+1, ant2+1)\n base_idx = base_idx_map[base_name]\n integ_baseline_count[base_idx] += 1\n integ_baseline_flag[base_idx] += flag\n bad_chan_pct_count[int(flag)] += 1\n elif \"# Integration Number:\" in line:\n tokens = line.split()\n integ_num = int(tokens[3])\n flag = float(tokens[5])\n per_integ_flag.append(flag)\n if flag == 100:\n num_integ_flagged += 1\n # totally flagged so don't count individual flagging\n else:\n all_ant1.extend(integ_ant1)\n all_ant2.extend(integ_ant2)\n all_flag.extend(integ_flag)\n baseline_count += integ_baseline_count\n baseline_flag += integ_baseline_flag\n # Reset the integration details ready for the enxt integration (if any)\n integ_ant1, integ_ant2, integ_flag = [], [], []\n integ_baseline_count, integ_baseline_flag = np.zeros((len(baseline_names))), np.zeros((len(baseline_names)))\n\n\n exp_count = (num_integ - num_integ_flagged) * 35 # Number of unflagged integrations times number of non-autocorrelation baselines\n\n # Analyse the flagging data\n ant1, ant2, flag = np.asarray(all_ant1), np.asarray(all_ant2), np.asarray(all_flag)\n\n ant_names = []\n for x in range(0,36):\n count1 = np.count_nonzero(ant1 == x)\n count2 = np.count_nonzero(ant2 == x)\n total_count = count1 + count2\n if total_count == exp_count:\n ant_num = x+1\n ant_name = 'ak{:02d}'.format(ant_num)\n ant_names.append(ant_name)\n\n total_flagged_ant = len(ant_names)\n\n with open(flag_ant_file,'a') as ffile:\n ffile.write(flagging_file[-24:-18])\n if total_flagged_ant > 0:\n ffile.write('\\n')\n for item in ant_names:\n ffile.write(item)\n ffile.write('\\n')\n else:\n ffile.write('\\n none \\n')\n ffile.write('\\n')\n \n flag_pct_integ = 0 if num_integ == 0 else 100* num_integ_flagged / num_integ\n baseline_flag_pct = baseline_flag / baseline_count\n\n # Getting data flagged percentage from the last line of the summary\n str_line = flag_total_line\n if isinstance(str_line, bytes):\n str_line = str_line.decode('utf-8')\n tokens = str_line.split()\n total_flagged_pct = float(tokens[-2]) #data+autocorrelation\n total_uv = float(tokens[7])\n autocorr_flagged_pct = (36 * num_integ * n_chan / total_uv)*100.0\n data_flagged_pct = round(total_flagged_pct - autocorr_flagged_pct, 3)\n\n return data_flagged_pct, total_flagged_ant, flag_ant_file, ant_names, flag_pct_integ, baseline_flag_pct, per_integ_flag", "def simple_de_matching(adata, markers, n_genes=100):\n gene_groups = adata.uns['rank_genes_groups']\n de_genes = pd.DataFrame(data=gene_groups['names']).head(n_genes)\n #print(de_genes.head(10))\n\n matches = check_markers(de_genes, markers)\n for key, value in matches.items():\n print(f'-- cluster {key} --')\n print(value)\n\n return de_genes", "def compute_detection_counts(kinds, valid_mask, aoi_mask, scene_counts):\n scene_counts = np.maximum(scene_counts, 1)\n if len(kinds):\n pairs = (kinds == 'pair_trawlers')\n singles = (kinds == 'single_trawler')\n scales = (kinds == 'pair_trawlers') * 2 + (kinds == 'single_trawler')\n aoi_pts = round((scales * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n aoi_pairs = round((pairs * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n else:\n aoi_pts = aoi_pairs = 0\n return aoi_pts, aoi_pairs", "def _get_indices_2(image_set, min_groups, num_labels=2, num_protected=2):\r\n indices = [[[] for _ in range(num_protected)] for _ in range(num_labels)]\r\n for i, (_, label, _, index) in enumerate(image_set):\r\n indices[label][int(image_set.samples[i][0].split(\"/\")[-1] in min_groups[label])].append(index)\r\n\r\n return indices", "def fetch_group_lasso_datasets():\n\n # helper functions\n\n def find_interaction_index(seq, subseq,\n alphabet=\"ATGC\",\n all_possible_len_n_interactions=None):\n n = len(subseq)\n alphabet_interactions = \\\n [set(p) for\n p in list(itertools.combinations_with_replacement(alphabet, n))]\n\n num_interactions = len(alphabet_interactions)\n if all_possible_len_n_interactions is None:\n all_possible_len_n_interactions = \\\n [set(interaction) for\n interaction in\n list(itertools.combinations_with_replacement(seq, n))]\n\n subseq = set(subseq)\n\n group_index = num_interactions * \\\n all_possible_len_n_interactions.index(subseq)\n value_index = alphabet_interactions.index(subseq)\n\n final_index = group_index + value_index\n return final_index\n\n def create_group_indicies_list(seqlength=7,\n alphabet=\"ATGC\",\n interactions=[1, 2, 3],\n include_extra=True):\n alphabet_length = len(alphabet)\n index_groups = []\n if include_extra:\n index_groups.append(0)\n group_count = 1\n for inter in interactions:\n n_interactions = comb(seqlength, inter)\n n_alphabet_combos = comb(alphabet_length,\n inter,\n repetition=True)\n\n for x1 in range(int(n_interactions)):\n for x2 in range(int(n_alphabet_combos)):\n index_groups.append(int(group_count))\n\n group_count += 1\n return index_groups\n\n def create_feature_vector_for_sequence(seq,\n alphabet=\"ATGC\",\n interactions=[1, 2, 3]):\n feature_vector_length = \\\n sum([comb(len(seq), inter) *\n comb(len(alphabet), inter, repetition=True)\n for inter in interactions]) + 1\n\n feature_vector = np.zeros(int(feature_vector_length))\n feature_vector[0] = 1.0\n for inter in interactions:\n # interactions at the current level\n cur_interactions = \\\n [set(p) for p in list(itertools.combinations(seq, inter))]\n interaction_idxs = \\\n [find_interaction_index(\n seq, cur_inter,\n all_possible_len_n_interactions=cur_interactions) + 1\n for cur_inter in cur_interactions]\n feature_vector[interaction_idxs] = 1.0\n\n return feature_vector\n\n positive_url = \\\n \"http://genes.mit.edu/burgelab/maxent/ssdata/MEMset/train5_hs\"\n negative_url = \\\n \"http://genes.mit.edu/burgelab/maxent/ssdata/MEMset/train0_5_hs\"\n\n pos_file = tempfile.NamedTemporaryFile() #bufsize=0)\n neg_file = tempfile.NamedTemporaryFile() #bufsize=0)\n\n posreq = urllib.request.Request(positive_url)\n with urllib.request.urlopen(posreq) as posresponse:\n pos_page = posresponse.read().decode(\"utf-8\")\n\n negreq = urllib.request.Request(negative_url)\n with urllib.request.urlopen(negreq) as negresponse:\n neg_page = negresponse.read().decode(\"utf-8\")\n\n positive_sequences = [str(line.strip().upper()) for idx, line in\n enumerate(pos_page.strip().split('\\n'))\n if \">\" not in line and idx < 2 * 8000]\n\n negative_sequences = [str(line.strip().upper()) for idx, line in\n enumerate(neg_page.strip().split('\\n'))\n if \">\" not in line and\n idx < 2 * len(positive_sequences)]\n\n assert len(positive_sequences) == len(negative_sequences), \\\n \"lengths were not the same: p={pos} n={neg}\" \\\n .format(pos=len(positive_sequences), neg=len(negative_sequences))\n\n positive_vector_matrix = np.array([create_feature_vector_for_sequence(s)\n for s in positive_sequences])\n negative_vector_matrix = np.array([create_feature_vector_for_sequence(s)\n for s in negative_sequences])\n\n df = pd.DataFrame(data=np.vstack((positive_vector_matrix,\n negative_vector_matrix)))\n df.loc[0:positive_vector_matrix.shape[0], \"Label\"] = 1.0\n df.loc[positive_vector_matrix.shape[0]:, \"Label\"] = 0.0\n\n design_matrix = df\n groups = create_group_indicies_list()\n\n return design_matrix, groups", "def test_can_traverse_wide_grid(self):\n grid = [[\"1\", \"0\", \"1\", \"1\", \"0\", \"1\", \"0\", \"0\", \"1\", \"0\"]]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def _get_valid_index(lons_side1, lons_side2, lons_side3, lons_side4,\n lats_side1, lats_side2, lats_side3, lats_side4,\n lons, lats, radius_of_influence):\n\n # Coarse reduction of data based on extrema analysis of the boundary\n # lon lat values of the target grid\n illegal_lons = (((lons_side1 < -180) | (lons_side1 > 180)).any() or\n ((lons_side2 < -180) | (lons_side2 > 180)).any() or\n ((lons_side3 < -180) | (lons_side3 > 180)).any() or\n ((lons_side4 < -180) | (lons_side4 > 180)).any())\n\n illegal_lats = (((lats_side1 < -90) | (lats_side1 > 90)).any() or\n ((lats_side2 < -90) | (lats_side2 > 90)).any() or\n ((lats_side3 < -90) | (lats_side3 > 90)).any() or\n ((lats_side4 < -90) | (lats_side4 > 90)).any())\n\n if illegal_lons or illegal_lats:\n # Grid boundaries are not safe to operate on\n return np.ones(lons.size, dtype=np.bool)\n\n # Find sum angle sum of grid boundary\n angle_sum = 0\n for side in (lons_side1, lons_side2, lons_side3, lons_side4):\n prev = None\n side_sum = 0\n for lon in side:\n if prev:\n delta = lon - prev\n if abs(delta) > 180:\n delta = (abs(delta) - 360) * (delta // abs(delta))\n angle_sum += delta\n side_sum += delta\n prev = lon\n\n # Buffer min and max lon and lat of interest with radius of interest\n lat_min = min(lats_side1.min(), lats_side2.min(), lats_side3.min(),\n lats_side4.min())\n lat_min_buffered = lat_min - float(radius_of_influence) / R\n lat_max = max(lats_side1.max(), lats_side2.max(), lats_side3.max(),\n lats_side4.max())\n lat_max_buffered = lat_max + float(radius_of_influence) / R\n\n max_angle_s2 = max(abs(lats_side2.max()), abs(lats_side2.min()))\n max_angle_s4 = max(abs(lats_side4.max()), abs(lats_side4.min()))\n lon_min_buffered = (lons_side4.min() -\n float(radius_of_influence) /\n (np.sin(np.radians(max_angle_s4)) * R))\n\n lon_max_buffered = (lons_side2.max() +\n float(radius_of_influence) /\n (np.sin(np.radians(max_angle_s2)) * R))\n\n # From the winding number theorem follows:\n # angle_sum possiblilities:\n # -360: area covers north pole\n # 360: area covers south pole\n # 0: area covers no poles\n # else: area covers both poles\n if round(angle_sum) == -360:\n # Covers NP\n valid_index = (lats >= lat_min_buffered)\n elif round(angle_sum) == 360:\n # Covers SP\n valid_index = (lats <= lat_max_buffered)\n elif round(angle_sum) == 0:\n # Covers no poles\n valid_lats = (lats >= lat_min_buffered) * (lats <= lat_max_buffered)\n\n if lons_side2.min() > lons_side4.max():\n # No date line crossing\n valid_lons = (lons >= lon_min_buffered) * \\\n (lons <= lon_max_buffered)\n else:\n # Date line crossing\n seg1 = (lons >= lon_min_buffered) * (lons <= 180)\n seg2 = (lons <= lon_max_buffered) * (lons >= -180)\n valid_lons = seg1 + seg2\n\n valid_index = valid_lats * valid_lons\n else:\n # Covers both poles don't reduce\n valid_index = np.ones(lons.size, dtype=np.bool)\n\n return valid_index", "def getAlleleCountDict(rec,idx_list=None):\n alleles = defaultdict(int)\n total_sites = 0\n missing_inds = 0\n if idx_list is None:\n idx_list = range(len(rec.samples))\n for j in idx_list:\n samp = rec.samples[j]\n if None in samp.alleles:\n alleles['N'] += len(samp.alleles)\n #missing_inds += 1\n for k in range(len(samp.alleles)):\n b = samp.alleles[k]\n if b is not None:\n alleles[b] += 1\n total_sites+=1\n return alleles", "def test_returns_correct_number_of_islands(self):\n grid = [\n [\"1\", \"1\", \"1\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"1\"],\n [\"1\", \"0\", \"1\", \"0\", \"1\"],\n [\"0\", \"1\", \"1\", \"1\", \"1\"],\n [\"1\", \"0\", \"1\", \"1\", \"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 3)", "def compute_binary_set_mappings(indexing, counts): \n ret = np.zeros_like(indexing)-1\n for vertex,index in enumerate(indexing):\n if counts[index] == 2:\n if ret[index] == -1:\n ret[index] = vertex\n return ret", "def _group_index_correction(group_index):\n if len(group_index)==1:\n #just to make sure 2 letter group_index is available\n group_index=group_index+group_index\n if group_index[1]=='I':\n #Is I for intermediate[not standard] or is it actually L\n group_index=group_index[0]+'L'\n if not group_index[0] in ['S','M','G','C','P','O']:\n group_index=group_index[1]+group_index[1]\n if not group_index[0] in ['S','M','G','C','P','O']:\n #cannot determine make it clay\n #@TODO add fail here\n group_index='C'+group_index[1]\n return group_index", "def GetBondMasks(i, N, Bonds23, Bonds4):\n j = i + 1\n MinID = i*N - (i+1)*(i+2)/2 + j\n MaxID = MinID + N - j - 1\n b23 = Bonds23[logical_and(Bonds23 >= MinID, Bonds23 <= MaxID)] - MinID + j\n b4 = Bonds4[logical_and(Bonds4 >= MinID, Bonds4 <= MaxID)] - MinID + j\n MaskNot23 = ones(N, bool)\n MaskNot23[b23] = False\n Mask4 = zeros(N, bool)\n Mask4[b4] = True\n Mask4[b23] = False\n return MaskNot23, Mask4", "def n_calib_groups(self):\n return None if self.calib_bitmask is None else self.calib_bitmask.nbits", "def part2a_0():\n xs = exampleInput\n phi = Counter({('-BEGIN-', '-FEAT-'): 1.0, ('-FEAT-', 'Beautiful'): 1.0, ('-FEAT-', 'PREV:-BEGIN-'): 1.0, ('-FEAT-', 'NEXT:2'): 1.0, ('-FEAT-', '-CAPITALIZED-'): 1.0, ('-FEAT-', '-POST-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(0, '-BEGIN-', '-FEAT-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n\n phi = Counter({('-FEAT-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:Beautiful'): 1.0, ('-SIZE-', 'NEXT:bedroom'): 1.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 1.0, ('-SIZE-', '2'): 1.0, ('-SIZE-', '-POST-CAPITALIZED-'): 0.0, ('-SIZE-', '-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(1, '-FEAT-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n \n phi = Counter({('-SIZE-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:2'): 1.0, ('-SIZE-', 'bedroom'): 1.0, ('-SIZE-', 'NEXT:-END-'): 1.0, ('-SIZE-', '-CAPITALIZED-'): 0.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(2, '-SIZE-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )", "def main():\n n = 34\n # create the adjacency matrix\n stripped_lines = Util.get_stripped_lines(g_karate_data.splitlines())\n string_rows = [line.split() for line in stripped_lines if line]\n assert len(string_rows) == n\n for row in string_rows:\n assert len(row) == n\n data_rows = [[float(x) for x in string_row] for string_row in string_rows]\n A = np.array(data_rows)\n # create the ordered module indices\n first_cluster_one_based_indices = [1, 3, 4, 14, 2, 8, 20, 18, 22, 13, 12, 6, 7, 17, 5, 11]\n second_cluster_one_based_indices = [25, 32, 26, 29, 24, 28, 9, 34, 33, 19, 16, 31, 15, 10, 23, 30, 21, 27]\n assert len(first_cluster_one_based_indices + second_cluster_one_based_indices) == n\n assert list(sorted(first_cluster_one_based_indices + second_cluster_one_based_indices)) == range(1, n+1)\n ordered_module_indices = []\n for i in range(n):\n if i+1 in first_cluster_one_based_indices:\n ordered_module_indices.append(0)\n else:\n ordered_module_indices.append(1)\n # print the modularity\n Q = get_modularity_other_b(A, ordered_module_indices)\n print 'modularity calculated using my interpretation of the method of the paper', Q\n Q = get_modularity_other_b2(A, ordered_module_indices)\n print 'modularity calculated using a modification of my interpretation of the method of the paper', Q\n Q = get_modularity_other_c(A, ordered_module_indices)\n print 'modularity calculated using the method on wikipedia', Q\n Q = get_eric_modularity(A, ordered_module_indices)\n print 'modularity calculated using the method eric used:', Q\n print 'expected modularity: .375 +/- .025'", "def allele_match_counts(self, map_hit):\n if map_hit.NM == 0:\n l = self.allele_end - self.allele_start + 1\n return l, l\n\n # If there are mismatches, then we can use the extended cigar string to\n # work out how many of those mismatches are in the allele.\n # Example cigar to remind which way round I and D are:\n # read: AGT--TGATCAAGTAC\n # ref: AGTGATGATC----AC\n # cigar: 3M2D5M4I2M\n probe_pos = map_hit.q_st\n total_positions = 0\n matches = 0\n\n if map_hit.strand == -1:\n map_hit.cigar.reverse()\n\n for length, operator in map_hit.cigar:\n if probe_pos > self.allele_end:\n break\n\n if operator == 7 or operator == 8: # 7,8 are \"=\",\"X\" == match/mismatch\n for i in range(length):\n if self.allele_start <= probe_pos <= self.allele_end:\n if operator == 7:\n matches += 1\n total_positions += 1\n probe_pos += 1\n if probe_pos > self.allele_end:\n break\n elif operator == 1: # 1 = I = insertion\n if self.allele_start <= probe_pos <= self.allele_end:\n total_positions += length\n probe_pos += length\n elif operator == 2: # 2 = D = deletion\n if self.allele_start <= probe_pos <= self.allele_end:\n total_positions += length\n else:\n raise RuntimeError(\n f\"Unexpected cigar operator number {operator} with length {length} from cigar\"\n )\n\n if map_hit.strand == -1:\n map_hit.cigar.reverse()\n\n return matches, total_positions", "def test_G_ind(self):\r\n a = array([[29, 11], [273, 191], [8, 31], [64, 64]])\r\n self.assertFloatEqual(G_ind(a)[0], 28.59642)\r\n self.assertFloatEqual(G_ind(a, True)[0], 28.31244)", "def step_count(group_idx):\n cmp_pos = 0\n steps = 1\n if len(group_idx) < 1:\n return 0\n for i in range(len(group_idx)):\n if group_idx[cmp_pos] != group_idx[i]:\n cmp_pos = i\n steps += 1\n return steps", "def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults", "def _extract_geographical_patterns(self):\n # take onehot encoding of zipcodes\n onehot = pd.get_dummies(self.df_transaction['zipcode'], prefix='zipcode')\n rider_id = pd.DataFrame(data={'riderID': self.df_transaction['riderID']})\n frames = [rider_id, onehot]\n df_onehot = pd.concat(frames, axis=1)\n\n # count zipcodes\n df_rider_geo_count = df_onehot.groupby(['riderID'])[list(onehot.columns.values)].sum().reset_index()\n df_rider_geo_count['geo_row_sum'] = df_rider_geo_count.iloc[:, 1:].sum(axis=1)\n\n return df_rider_geo_count", "def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])", "def getcolorcodeALA15(ramapath, N, ssize=5):\n\n from analyse_ala_15 import AngleCategorizer\n\n nResidues = 15\n #angles = np.loadtxt('rama_dataset_ala_15.xvg', skiprows=32, usecols=range(0, 2), delimiter=' ')\n angles = np.loadtxt(os.path.join(ramapath, 'rama_dataset_ala_15_1500.xvg'), skiprows=32, usecols=range(0, 2), delimiter=' ')\n nSamples = angles.shape[0]/15\n angles.resize(nSamples, nResidues, 2)\n angCat = AngleCategorizer(angles)\n angCat.categorize()\n angCat.countConfigurations()\n colInd = angCat.getColorMatrix()\n alphaInd = angCat.getAlphaVals()\n\n marker = list()\n patchlist = list()\n\n marker.append('o')\n marker.append('o')\n marker.append('o')\n\n import matplotlib.patches as mpatches\n patchlist.append(mpatches.Patch(color='black', label=r'$\\alpha$'))\n patchlist.append(mpatches.Patch(color='blue', label=r'$\\beta$-1'))\n patchlist.append(mpatches.Patch(color='red', label=r'$\\beta$-2'))\n\n alpha = plt.scatter(0, 1, c='k', marker=marker[0], s=ssize, label=r'$\\alpha$')\n beta1 = plt.scatter(0, 1, c='b', marker=marker[1], s=ssize, label=r'$\\beta\\textnormal{-}1$')\n beta2 = plt.scatter(0, 1, c='r', marker=marker[2], s=ssize, label=r'$\\beta\\textnormal{-}2$')\n plt.close()\n\n patchlist = [alpha, beta1, beta2]\n\n return colInd, marker, patchlist, alphaInd", "def _find_max_number_of_grouping(cls, reserved_seats, k):\n # print(reserved_seats)\n n = len(reserved_seats)\n count_groups = 0\n count_empty_contigous_seats = 0\n i = 0\n while i < n:\n if reserved_seats[i] != 0:\n # print('continue', i)\n count_empty_contigous_seats = 0\n i += 1\n continue\n\n count_empty_contigous_seats += 1\n # print('empty', i, count_empty_contigous_seats)\n if count_empty_contigous_seats >= k:\n count_groups += 1\n # print('found', i, count_groups)\n\n if ((i + 1) % len(cls._PLANE_ROW)) == 0:\n # print('new row', i)\n count_empty_contigous_seats = 0\n\n i += 1\n\n return count_groups", "def get_out_idx():\n exacz = pd.read_csv(f'{home}/ref/exac/exac_zscore_mimssense+stopgain_gn_checked.txt', sep='\\t')\n exacz = exacz[['gn', 'conseq', 'exac_z', 'exac_zrank']]\n\n gdi = pd.read_csv(f'{home}/work/generisk/gdi/gdi_score_pnas_gn_checked.txt', sep=\"\\t\")\n gdi = gdi[['gn', 'gdi', 'gdi_phred_raw']]\n gdi['gdi_rank'] = 100 - round(gdi['gdi'].rank() / len(gdi.index) * 100, 2)\n\n rvis = pd.read_csv(f\"{home}/ref/rvis/rvis_lite.txt\", sep='\\t')\n\n out_idx = pd.merge(exacz, gdi, on='gn', how='outer')\n out_idx = pd.merge(out_idx, rvis, on='gn', how='outer')\n\n # merge with omim\n omim = pd.read_csv(f\"{home}/ref/omim/omim_dedup.tsv\", sep='\\t', usecols='gn,inher'.split(\",\"))\n out_idx = pd.merge(out_idx, omim, on='gn', how='left')\n out_idx['inher'] = out_idx['inher'].fillna('na')\n\n # 18090\n out_idx = out_idx.loc[out_idx.conseq == 'missense_variant'].drop('conseq', axis=1)\n\n out_idx.to_pickle(f'{home}/gr/final/out_idx.pk')\n\n return out_idx", "def count_match_mismatch(data, index):\n\tsubjects = set()\n\tnew_data = []\n\tfor line in data:\n\t\tif line[\"mark_name\"] == \"match_3D7_\" + str(index):\n\t\t\tsubjects.add(line[\"subject\"])\n\t\t\tnew_data.append(line)\n\tmatch_map = {}\n\tmismatch_map = {}\t\n\tfor subject in subjects:\n\t\tmatch_map[subject] = 0\n\t\tmismatch_map[subject] = 0\t\t\n\t\tfor line in new_data:\n\t\t\tif line[\"subject\"] == subject and line[\"mark_value\"] == \"1\":\n\t\t\t\tmatch_map[subject] = 1\n\t\t\tif line[\"subject\"] == subject and line[\"mark_value\"] == \"0\":\n\t\t\t\tmismatch_map[subject] = 1\t\t\t\t\n\tmatch_count = 0\n\tmismatch_count = 0\t\n\tfor subject, value in match_map.items():\n\t\tmatch_count += value\n\tfor subject, value in mismatch_map.items():\n\t\tmismatch_count += value\t\t\n\treturn (match_count, mismatch_count)", "def test_3_2_4D_rec_splits(self):\n check = [(-2.0, 3.0, -1.0, 3.0), (9.0, 10.0, 1.0, 5.0),\n (9.0, 3.0, -1.0, 3.0), (9.0, 10.0, -1.0, 3.0),\n (9.0, 10.0, 1.0, 3.0), (9.0, 10.0, -1.0, 5.0),\n (9.0, 3.0, 1.0, 3.0), (9.0, 3.0, 1.0, 5.0),\n (9.0, 3.0, -1.0, 5.0), (-2.0, 10.0, -1.0, 3.0),\n (-2.0, 10.0, 1.0, 3.0), (-2.0, 10.0, 1.0, 5.0),\n (-2.0, 10.0, -1.0, 5.0), (-2.0, 3.0, 1.0, 3.0),\n (-2.0, 3.0, 1.0, 5.0), (-2.0, 3.0, -1.0, 5.0),\n (3.5, 6.5, 0.0, 4.0), (-2.0, 6.5, 0.0, 4.0),\n (-2.0, 3.0, 0.0, 4.0), (-2.0, 3.0, -1.0, 4.0),\n (-2.0, 3.0, 0.0, 3.0), (-2.0, 6.5, -1.0, 4.0),\n (-2.0, 6.5, -1.0, 3.0), (-2.0, 6.5, 0.0, 3.0),\n (3.5, 3.0, 0.0, 4.0), (3.5, 3.0, -1.0, 4.0),\n (3.5, 3.0, -1.0, 3.0), (3.5, 3.0, 0.0, 3.0),\n (3.5, 6.5, -1.0, 4.0), (3.5, 6.5, -1.0, 3.0),\n (3.5, 6.5, 0.0, 3.0), (0.75, 4.75, -0.5, 3.5),\n (9.0, 6.5, 0.0, 4.0), (9.0, 10.0, 0.0, 4.0),\n (9.0, 10.0, 1.0, 4.0), (9.0, 10.0, 0.0, 5.0),\n (9.0, 6.5, 1.0, 4.0), (9.0, 6.5, 1.0, 5.0),\n (9.0, 6.5, 0.0, 5.0), (3.5, 10.0, 0.0, 4.0),\n (3.5, 10.0, 1.0, 4.0), (3.5, 10.0, 1.0, 5.0),\n (3.5, 10.0, 0.0, 5.0), (3.5, 6.5, 1.0, 4.0),\n (3.5, 6.5, 1.0, 5.0), (3.5, 6.5, 0.0, 5.0),\n (6.25, 8.25, 0.5, 4.5), (9.0, 3.0, 0.0, 4.0),\n (9.0, 3.0, -1.0, 4.0), (9.0, 3.0, 0.0, 3.0),\n (9.0, 6.5, -1.0, 4.0), (9.0, 6.5, -1.0, 3.0),\n (9.0, 6.5, 0.0, 3.0), (6.25, 4.75, -0.5, 3.5),\n (9.0, 10.0, -1.0, 4.0), (9.0, 10.0, 0.0, 3.0),\n (3.5, 10.0, -1.0, 4.0), (3.5, 10.0, -1.0, 3.0),\n (3.5, 10.0, 0.0, 3.0), (6.25, 8.25, -0.5, 3.5),\n (9.0, 6.5, 1.0, 3.0), (3.5, 10.0, 1.0, 3.0),\n (3.5, 6.5, 1.0, 3.0), (6.25, 8.25, 0.5, 3.5),\n (9.0, 6.5, -1.0, 5.0), (3.5, 10.0, -1.0, 5.0),\n (3.5, 6.5, -1.0, 5.0), (6.25, 8.25, -0.5, 4.5),\n (9.0, 3.0, 1.0, 4.0), (3.5, 3.0, 1.0, 4.0),\n (3.5, 3.0, 1.0, 3.0), (6.25, 4.75, 0.5, 3.5),\n (9.0, 3.0, 0.0, 5.0), (3.5, 3.0, 1.0, 5.0),\n (3.5, 3.0, 0.0, 5.0), (6.25, 4.75, 0.5, 4.5),\n (3.5, 3.0, -1.0, 5.0), (6.25, 4.75, -0.5, 4.5),\n (-2.0, 10.0, 0.0, 4.0), (-2.0, 10.0, -1.0, 4.0),\n (-2.0, 10.0, 0.0, 3.0), (0.75, 8.25, -0.5, 3.5),\n (-2.0, 10.0, 1.0, 4.0), (-2.0, 6.5, 1.0, 4.0),\n (-2.0, 6.5, 1.0, 3.0), (0.75, 8.25, 0.5, 3.5),\n (-2.0, 10.0, 0.0, 5.0), (-2.0, 6.5, 1.0, 5.0),\n (-2.0, 6.5, 0.0, 5.0), (0.75, 8.25, 0.5, 4.5),\n (-2.0, 6.5, -1.0, 5.0), (0.75, 8.25, -0.5, 4.5),\n (-2.0, 3.0, 1.0, 4.0), (0.75, 4.75, 0.5, 3.5),\n (-2.0, 3.0, 0.0, 5.0), (0.75, 4.75, 0.5, 4.5),\n (0.75, 4.75, -0.5, 4.5), (3.5, 4.75, -0.5, 3.5),\n (3.5, 6.5, -0.5, 3.5), (3.5, 6.5, 0.0, 3.5),\n (3.5, 6.5, -0.5, 4.0), (3.5, 4.75, 0.0, 3.5),\n (3.5, 4.75, 0.0, 4.0), (3.5, 4.75, -0.5, 4.0),\n (0.75, 6.5, -0.5, 3.5), (0.75, 6.5, 0.0, 3.5),\n (0.75, 6.5, 0.0, 4.0), (0.75, 6.5, -0.5, 4.0),\n (0.75, 4.75, 0.0, 3.5), (0.75, 4.75, 0.0, 4.0),\n (0.75, 4.75, -0.5, 4.0), (2.125, 5.625, -0.25, 3.75),\n (-2.0, 4.75, -0.5, 3.5), (-2.0, 3.0, -0.5, 3.5),\n (-2.0, 3.0, -1.0, 3.5), (-2.0, 3.0, -0.5, 3.0),\n (-2.0, 4.75, -1.0, 3.5), (-2.0, 4.75, -1.0, 3.0),\n (-2.0, 4.75, -0.5, 3.0), (0.75, 3.0, -0.5, 3.5),\n (0.75, 3.0, -1.0, 3.5), (0.75, 3.0, -1.0, 3.0),\n (0.75, 3.0, -0.5, 3.0), (0.75, 4.75, -1.0, 3.5),\n (0.75, 4.75, -1.0, 3.0), (0.75, 4.75, -0.5, 3.0),\n (-0.625, 3.875, -0.75, 3.25), (-2.0, 6.5, -0.5, 3.5),\n (-2.0, 6.5, 0.0, 3.5), (-2.0, 6.5, -0.5, 4.0),\n (-2.0, 4.75, 0.0, 3.5), (-2.0, 4.75, 0.0, 4.0),\n (-2.0, 4.75, -0.5, 4.0), (-0.625, 5.625, -0.25, 3.75),\n (-2.0, 3.0, 0.0, 3.5), (-2.0, 3.0, -0.5, 4.0),\n (0.75, 3.0, 0.0, 3.5), (0.75, 3.0, 0.0, 4.0),\n (0.75, 3.0, -0.5, 4.0), (-0.625, 3.875, -0.25, 3.75),\n (-2.0, 4.75, -1.0, 4.0), (0.75, 3.0, -1.0, 4.0),\n (0.75, 4.75, -1.0, 4.0), (-0.625, 3.875, -0.75, 3.75),\n (-2.0, 4.75, 0.0, 3.0), (0.75, 3.0, 0.0, 3.0),\n (0.75, 4.75, 0.0, 3.0), (-0.625, 3.875, -0.25, 3.25),\n (-2.0, 6.5, -1.0, 3.5), (0.75, 6.5, -1.0, 3.5),\n (0.75, 6.5, -1.0, 4.0), (-0.625, 5.625, -0.75, 3.75),\n (-2.0, 6.5, -0.5, 3.0), (0.75, 6.5, -1.0, 3.0),\n (0.75, 6.5, -0.5, 3.0), (-0.625, 5.625, -0.75, 3.25),\n (0.75, 6.5, 0.0, 3.0), (-0.625, 5.625, -0.25, 3.25),\n (3.5, 3.0, -0.5, 3.5), (3.5, 3.0, 0.0, 3.5),\n (3.5, 3.0, -0.5, 4.0), (2.125, 3.875, -0.25, 3.75),\n (3.5, 3.0, -1.0, 3.5), (3.5, 4.75, -1.0, 3.5),\n (3.5, 4.75, -1.0, 4.0), (2.125, 3.875, -0.75, 3.75),\n (3.5, 3.0, -0.5, 3.0), (3.5, 4.75, -1.0, 3.0),\n (3.5, 4.75, -0.5, 3.0), (2.125, 3.875, -0.75, 3.25),\n (3.5, 4.75, 0.0, 3.0), (2.125, 3.875, -0.25, 3.25),\n (3.5, 6.5, -1.0, 3.5), (2.125, 5.625, -0.75, 3.75),\n (3.5, 6.5, -0.5, 3.0), (2.125, 5.625, -0.75, 3.25),\n (2.125, 5.625, -0.25, 3.25), (3.5, 8.25, 0.5, 4.5),\n (3.5, 6.5, 0.5, 4.5), (3.5, 6.5, 0.0, 4.5),\n (3.5, 6.5, 0.5, 4.0), (3.5, 8.25, 0.0, 4.5),\n (3.5, 8.25, 0.0, 4.0), (3.5, 8.25, 0.5, 4.0),\n (6.25, 6.5, 0.5, 4.5), (6.25, 6.5, 0.0, 4.5),\n (6.25, 6.5, 0.0, 4.0), (6.25, 6.5, 0.5, 4.0),\n (6.25, 8.25, 0.0, 4.5), (6.25, 8.25, 0.0, 4.0),\n (6.25, 8.25, 0.5, 4.0), (4.875, 7.375, 0.25, 4.25),\n (9.0, 8.25, 0.5, 4.5), (9.0, 10.0, 0.5, 4.5),\n (9.0, 10.0, 1.0, 4.5), (9.0, 10.0, 0.5, 5.0),\n (9.0, 8.25, 1.0, 4.5), (9.0, 8.25, 1.0, 5.0),\n (9.0, 8.25, 0.5, 5.0), (6.25, 10.0, 0.5, 4.5),\n (6.25, 10.0, 1.0, 4.5), (6.25, 10.0, 1.0, 5.0),\n (6.25, 10.0, 0.5, 5.0), (6.25, 8.25, 1.0, 4.5),\n (6.25, 8.25, 1.0, 5.0), (6.25, 8.25, 0.5, 5.0),\n (7.625, 9.125, 0.75, 4.75), (9.0, 6.5, 0.5, 4.5),\n (9.0, 6.5, 0.0, 4.5), (9.0, 6.5, 0.5, 4.0),\n (9.0, 8.25, 0.0, 4.5), (9.0, 8.25, 0.0, 4.0),\n (9.0, 8.25, 0.5, 4.0), (7.625, 7.375, 0.25, 4.25),\n (9.0, 10.0, 0.0, 4.5), (9.0, 10.0, 0.5, 4.0),\n (6.25, 10.0, 0.0, 4.5), (6.25, 10.0, 0.0, 4.0),\n (6.25, 10.0, 0.5, 4.0), (7.625, 9.125, 0.25, 4.25),\n (9.0, 8.25, 1.0, 4.0), (6.25, 10.0, 1.0, 4.0),\n (6.25, 8.25, 1.0, 4.0), (7.625, 9.125, 0.75, 4.25),\n (9.0, 8.25, 0.0, 5.0), (6.25, 10.0, 0.0, 5.0),\n (6.25, 8.25, 0.0, 5.0), (7.625, 9.125, 0.25, 4.75),\n (9.0, 6.5, 1.0, 4.5), (6.25, 6.5, 1.0, 4.5),\n (6.25, 6.5, 1.0, 4.0), (7.625, 7.375, 0.75, 4.25),\n (9.0, 6.5, 0.5, 5.0), (6.25, 6.5, 1.0, 5.0),\n (6.25, 6.5, 0.5, 5.0), (7.625, 7.375, 0.75, 4.75),\n (6.25, 6.5, 0.0, 5.0), (7.625, 7.375, 0.25, 4.75),\n (3.5, 10.0, 0.5, 4.5), (3.5, 10.0, 0.0, 4.5),\n (3.5, 10.0, 0.5, 4.0), (4.875, 9.125, 0.25, 4.25),\n (3.5, 10.0, 1.0, 4.5), (3.5, 8.25, 1.0, 4.5),\n (3.5, 8.25, 1.0, 4.0), (4.875, 9.125, 0.75, 4.25),\n (3.5, 10.0, 0.5, 5.0), (3.5, 8.25, 1.0, 5.0),\n (3.5, 8.25, 0.5, 5.0), (4.875, 9.125, 0.75, 4.75),\n (3.5, 8.25, 0.0, 5.0), (4.875, 9.125, 0.25, 4.75),\n (3.5, 6.5, 1.0, 4.5), (4.875, 7.375, 0.75, 4.25),\n (3.5, 6.5, 0.5, 5.0), (4.875, 7.375, 0.75, 4.75),\n (4.875, 7.375, 0.25, 4.75), (6.25, 6.5, -0.5, 3.5),\n (6.25, 6.5, 0.0, 3.5), (6.25, 6.5, -0.5, 4.0),\n (6.25, 4.75, 0.0, 3.5), (6.25, 4.75, 0.0, 4.0),\n (6.25, 4.75, -0.5, 4.0), (4.875, 5.625, -0.25, 3.75),\n (9.0, 4.75, -0.5, 3.5), (9.0, 3.0, -0.5, 3.5),\n (9.0, 3.0, -1.0, 3.5), (9.0, 3.0, -0.5, 3.0),\n (9.0, 4.75, -1.0, 3.5), (9.0, 4.75, -1.0, 3.0),\n (9.0, 4.75, -0.5, 3.0), (6.25, 3.0, -0.5, 3.5),\n (6.25, 3.0, -1.0, 3.5), (6.25, 3.0, -1.0, 3.0),\n (6.25, 3.0, -0.5, 3.0), (6.25, 4.75, -1.0, 3.5),\n (6.25, 4.75, -1.0, 3.0), (6.25, 4.75, -0.5, 3.0),\n (7.625, 3.875, -0.75, 3.25), (9.0, 6.5, -0.5, 3.5),\n (9.0, 6.5, 0.0, 3.5), (9.0, 6.5, -0.5, 4.0),\n (9.0, 4.75, 0.0, 3.5), (9.0, 4.75, 0.0, 4.0),\n (9.0, 4.75, -0.5, 4.0), (7.625, 5.625, -0.25, 3.75),\n (9.0, 3.0, 0.0, 3.5), (9.0, 3.0, -0.5, 4.0),\n (6.25, 3.0, 0.0, 3.5), (6.25, 3.0, 0.0, 4.0),\n (6.25, 3.0, -0.5, 4.0), (7.625, 3.875, -0.25, 3.75),\n (9.0, 4.75, -1.0, 4.0), (6.25, 3.0, -1.0, 4.0),\n (6.25, 4.75, -1.0, 4.0), (7.625, 3.875, -0.75, 3.75),\n (9.0, 4.75, 0.0, 3.0), (6.25, 3.0, 0.0, 3.0),\n (6.25, 4.75, 0.0, 3.0), (7.625, 3.875, -0.25, 3.25),\n (9.0, 6.5, -1.0, 3.5), (6.25, 6.5, -1.0, 3.5),\n (6.25, 6.5, -1.0, 4.0), (7.625, 5.625, -0.75, 3.75),\n (9.0, 6.5, -0.5, 3.0), (6.25, 6.5, -1.0, 3.0),\n (6.25, 6.5, -0.5, 3.0), (7.625, 5.625, -0.75, 3.25),\n (6.25, 6.5, 0.0, 3.0), (7.625, 5.625, -0.25, 3.25),\n (4.875, 3.875, -0.25, 3.75), (4.875, 3.875, -0.75, 3.75),\n (4.875, 3.875, -0.75, 3.25), (4.875, 3.875, -0.25, 3.25),\n (4.875, 5.625, -0.75, 3.75), (4.875, 5.625, -0.75, 3.25),\n (4.875, 5.625, -0.25, 3.25), (3.5, 8.25, -0.5, 3.5),\n (3.5, 8.25, 0.0, 3.5), (3.5, 8.25, -0.5, 4.0),\n (6.25, 8.25, 0.0, 3.5), (6.25, 8.25, -0.5, 4.0),\n (4.875, 7.375, -0.25, 3.75), (9.0, 8.25, -0.5, 3.5),\n (9.0, 10.0, -0.5, 3.5), (9.0, 10.0, -1.0, 3.5),\n (9.0, 10.0, -0.5, 3.0), (9.0, 8.25, -1.0, 3.5),\n (9.0, 8.25, -1.0, 3.0), (9.0, 8.25, -0.5, 3.0),\n (6.25, 10.0, -0.5, 3.5), (6.25, 10.0, -1.0, 3.5),\n (6.25, 10.0, -1.0, 3.0), (6.25, 10.0, -0.5, 3.0),\n (6.25, 8.25, -1.0, 3.5), (6.25, 8.25, -1.0, 3.0),\n (6.25, 8.25, -0.5, 3.0), (7.625, 9.125, -0.75, 3.25),\n (9.0, 8.25, 0.0, 3.5), (9.0, 8.25, -0.5, 4.0),\n (7.625, 7.375, -0.25, 3.75), (9.0, 10.0, 0.0, 3.5),\n (9.0, 10.0, -0.5, 4.0), (6.25, 10.0, 0.0, 3.5),\n (6.25, 10.0, -0.5, 4.0), (7.625, 9.125, -0.25, 3.75),\n (9.0, 8.25, -1.0, 4.0), (6.25, 10.0, -1.0, 4.0),\n (6.25, 8.25, -1.0, 4.0), (7.625, 9.125, -0.75, 3.75),\n (9.0, 8.25, 0.0, 3.0), (6.25, 10.0, 0.0, 3.0),\n (6.25, 8.25, 0.0, 3.0), (7.625, 9.125, -0.25, 3.25),\n (7.625, 7.375, -0.75, 3.75), (7.625, 7.375, -0.75, 3.25),\n (7.625, 7.375, -0.25, 3.25), (3.5, 10.0, -0.5, 3.5),\n (3.5, 10.0, 0.0, 3.5), (3.5, 10.0, -0.5, 4.0),\n (4.875, 9.125, -0.25, 3.75), (3.5, 10.0, -1.0, 3.5),\n (3.5, 8.25, -1.0, 3.5), (3.5, 8.25, -1.0, 4.0),\n (4.875, 9.125, -0.75, 3.75), (3.5, 10.0, -0.5, 3.0),\n (3.5, 8.25, -1.0, 3.0), (3.5, 8.25, -0.5, 3.0),\n (4.875, 9.125, -0.75, 3.25), (3.5, 8.25, 0.0, 3.0),\n (4.875, 9.125, -0.25, 3.25), (4.875, 7.375, -0.75, 3.75),\n (4.875, 7.375, -0.75, 3.25), (4.875, 7.375, -0.25, 3.25),\n (3.5, 8.25, 0.5, 3.5), (3.5, 6.5, 0.5, 3.5),\n (6.25, 6.5, 0.5, 3.5), (4.875, 7.375, 0.25, 3.75),\n (9.0, 8.25, 0.5, 3.5), (9.0, 10.0, 0.5, 3.5),\n (9.0, 10.0, 1.0, 3.5), (9.0, 10.0, 0.5, 3.0),\n (9.0, 8.25, 1.0, 3.5), (9.0, 8.25, 1.0, 3.0),\n (9.0, 8.25, 0.5, 3.0), (6.25, 10.0, 0.5, 3.5),\n (6.25, 10.0, 1.0, 3.5), (6.25, 10.0, 1.0, 3.0),\n (6.25, 10.0, 0.5, 3.0), (6.25, 8.25, 1.0, 3.5),\n (6.25, 8.25, 1.0, 3.0), (6.25, 8.25, 0.5, 3.0),\n (7.625, 9.125, 0.75, 3.25), (9.0, 6.5, 0.5, 3.5),\n (7.625, 7.375, 0.25, 3.75), (7.625, 9.125, 0.25, 3.75),\n (7.625, 9.125, 0.75, 3.75), (7.625, 9.125, 0.25, 3.25),\n (9.0, 6.5, 1.0, 3.5), (6.25, 6.5, 1.0, 3.5),\n (7.625, 7.375, 0.75, 3.75), (9.0, 6.5, 0.5, 3.0),\n (6.25, 6.5, 1.0, 3.0), (6.25, 6.5, 0.5, 3.0),\n (7.625, 7.375, 0.75, 3.25), (7.625, 7.375, 0.25, 3.25),\n (3.5, 10.0, 0.5, 3.5), (4.875, 9.125, 0.25, 3.75),\n (3.5, 10.0, 1.0, 3.5), (3.5, 8.25, 1.0, 3.5),\n (4.875, 9.125, 0.75, 3.75), (3.5, 10.0, 0.5, 3.0),\n (3.5, 8.25, 1.0, 3.0), (3.5, 8.25, 0.5, 3.0),\n (4.875, 9.125, 0.75, 3.25), (4.875, 9.125, 0.25, 3.25),\n (3.5, 6.5, 1.0, 3.5), (4.875, 7.375, 0.75, 3.75),\n (3.5, 6.5, 0.5, 3.0), (4.875, 7.375, 0.75, 3.25),\n (4.875, 7.375, 0.25, 3.25), (3.5, 8.25, -0.5, 4.5),\n (3.5, 6.5, -0.5, 4.5), (6.25, 6.5, -0.5, 4.5),\n (4.875, 7.375, -0.25, 4.25), (9.0, 8.25, -0.5, 4.5),\n (9.0, 10.0, -0.5, 4.5), (9.0, 10.0, -1.0, 4.5),\n (9.0, 10.0, -0.5, 5.0), (9.0, 8.25, -1.0, 4.5),\n (9.0, 8.25, -1.0, 5.0), (9.0, 8.25, -0.5, 5.0),\n (6.25, 10.0, -0.5, 4.5), (6.25, 10.0, -1.0, 4.5),\n (6.25, 10.0, -1.0, 5.0), (6.25, 10.0, -0.5, 5.0),\n (6.25, 8.25, -1.0, 4.5), (6.25, 8.25, -1.0, 5.0),\n (6.25, 8.25, -0.5, 5.0), (7.625, 9.125, -0.75, 4.75),\n (9.0, 6.5, -0.5, 4.5), (7.625, 7.375, -0.25, 4.25),\n (7.625, 9.125, -0.25, 4.25), (7.625, 9.125, -0.75, 4.25),\n (7.625, 9.125, -0.25, 4.75), (9.0, 6.5, -1.0, 4.5),\n (6.25, 6.5, -1.0, 4.5), (7.625, 7.375, -0.75, 4.25),\n (9.0, 6.5, -0.5, 5.0), (6.25, 6.5, -1.0, 5.0),\n (6.25, 6.5, -0.5, 5.0), (7.625, 7.375, -0.75, 4.75),\n (7.625, 7.375, -0.25, 4.75), (3.5, 10.0, -0.5, 4.5),\n (4.875, 9.125, -0.25, 4.25), (3.5, 10.0, -1.0, 4.5),\n (3.5, 8.25, -1.0, 4.5), (4.875, 9.125, -0.75, 4.25),\n (3.5, 10.0, -0.5, 5.0), (3.5, 8.25, -1.0, 5.0),\n (3.5, 8.25, -0.5, 5.0), (4.875, 9.125, -0.75, 4.75),\n (4.875, 9.125, -0.25, 4.75), (3.5, 6.5, -1.0, 4.5),\n (4.875, 7.375, -0.75, 4.25), (3.5, 6.5, -0.5, 5.0),\n (4.875, 7.375, -0.75, 4.75), (4.875, 7.375, -0.25, 4.75),\n (3.5, 4.75, 0.5, 3.5), (3.5, 4.75, 0.5, 4.0),\n (6.25, 4.75, 0.5, 4.0), (4.875, 5.625, 0.25, 3.75),\n (9.0, 4.75, 0.5, 3.5), (9.0, 3.0, 0.5, 3.5),\n (9.0, 3.0, 1.0, 3.5), (9.0, 3.0, 0.5, 3.0),\n (9.0, 4.75, 1.0, 3.5), (9.0, 4.75, 1.0, 3.0),\n (9.0, 4.75, 0.5, 3.0), (6.25, 3.0, 0.5, 3.5),\n (6.25, 3.0, 1.0, 3.5), (6.25, 3.0, 1.0, 3.0),\n (6.25, 3.0, 0.5, 3.0), (6.25, 4.75, 1.0, 3.5),\n (6.25, 4.75, 1.0, 3.0), (6.25, 4.75, 0.5, 3.0),\n (7.625, 3.875, 0.75, 3.25), (9.0, 4.75, 0.5, 4.0),\n (7.625, 5.625, 0.25, 3.75), (9.0, 3.0, 0.5, 4.0),\n (6.25, 3.0, 0.5, 4.0), (7.625, 3.875, 0.25, 3.75),\n (9.0, 4.75, 1.0, 4.0), (6.25, 3.0, 1.0, 4.0),\n (6.25, 4.75, 1.0, 4.0), (7.625, 3.875, 0.75, 3.75),\n (7.625, 3.875, 0.25, 3.25), (7.625, 5.625, 0.75, 3.75),\n (7.625, 5.625, 0.75, 3.25), (7.625, 5.625, 0.25, 3.25),\n (3.5, 3.0, 0.5, 3.5), (3.5, 3.0, 0.5, 4.0),\n (4.875, 3.875, 0.25, 3.75), (3.5, 3.0, 1.0, 3.5),\n (3.5, 4.75, 1.0, 3.5), (3.5, 4.75, 1.0, 4.0),\n (4.875, 3.875, 0.75, 3.75), (3.5, 3.0, 0.5, 3.0),\n (3.5, 4.75, 1.0, 3.0), (3.5, 4.75, 0.5, 3.0),\n (4.875, 3.875, 0.75, 3.25), (4.875, 3.875, 0.25, 3.25),\n (4.875, 5.625, 0.75, 3.75), (4.875, 5.625, 0.75, 3.25),\n (4.875, 5.625, 0.25, 3.25), (3.5, 4.75, 0.5, 4.5),\n (3.5, 4.75, 0.0, 4.5), (6.25, 4.75, 0.0, 4.5),\n (4.875, 5.625, 0.25, 4.25), (9.0, 4.75, 0.5, 4.5),\n (9.0, 3.0, 0.5, 4.5), (9.0, 3.0, 1.0, 4.5),\n (9.0, 3.0, 0.5, 5.0), (9.0, 4.75, 1.0, 4.5),\n (9.0, 4.75, 1.0, 5.0), (9.0, 4.75, 0.5, 5.0),\n (6.25, 3.0, 0.5, 4.5), (6.25, 3.0, 1.0, 4.5),\n (6.25, 3.0, 1.0, 5.0), (6.25, 3.0, 0.5, 5.0),\n (6.25, 4.75, 1.0, 4.5), (6.25, 4.75, 1.0, 5.0),\n (6.25, 4.75, 0.5, 5.0), (7.625, 3.875, 0.75, 4.75),\n (9.0, 4.75, 0.0, 4.5), (7.625, 5.625, 0.25, 4.25),\n (9.0, 3.0, 0.0, 4.5), (6.25, 3.0, 0.0, 4.5),\n (7.625, 3.875, 0.25, 4.25), (7.625, 3.875, 0.75, 4.25),\n (9.0, 4.75, 0.0, 5.0), (6.25, 3.0, 0.0, 5.0),\n (6.25, 4.75, 0.0, 5.0), (7.625, 3.875, 0.25, 4.75),\n (7.625, 5.625, 0.75, 4.25), (7.625, 5.625, 0.75, 4.75),\n (7.625, 5.625, 0.25, 4.75), (3.5, 3.0, 0.5, 4.5),\n (3.5, 3.0, 0.0, 4.5), (4.875, 3.875, 0.25, 4.25),\n (3.5, 3.0, 1.0, 4.5), (3.5, 4.75, 1.0, 4.5),\n (4.875, 3.875, 0.75, 4.25), (3.5, 3.0, 0.5, 5.0),\n (3.5, 4.75, 1.0, 5.0), (3.5, 4.75, 0.5, 5.0),\n (4.875, 3.875, 0.75, 4.75), (3.5, 4.75, 0.0, 5.0),\n (4.875, 3.875, 0.25, 4.75), (4.875, 5.625, 0.75, 4.25),\n (4.875, 5.625, 0.75, 4.75), (4.875, 5.625, 0.25, 4.75),\n (3.5, 4.75, -0.5, 4.5), (4.875, 5.625, -0.25, 4.25),\n (9.0, 4.75, -0.5, 4.5), (9.0, 3.0, -0.5, 4.5),\n (9.0, 3.0, -1.0, 4.5), (9.0, 3.0, -0.5, 5.0),\n (9.0, 4.75, -1.0, 4.5), (9.0, 4.75, -1.0, 5.0),\n (9.0, 4.75, -0.5, 5.0), (6.25, 3.0, -0.5, 4.5),\n (6.25, 3.0, -1.0, 4.5), (6.25, 3.0, -1.0, 5.0),\n (6.25, 3.0, -0.5, 5.0), (6.25, 4.75, -1.0, 4.5),\n (6.25, 4.75, -1.0, 5.0), (6.25, 4.75, -0.5, 5.0),\n (7.625, 3.875, -0.75, 4.75), (7.625, 5.625, -0.25, 4.25),\n (7.625, 3.875, -0.25, 4.25), (7.625, 3.875, -0.75, 4.25),\n (7.625, 3.875, -0.25, 4.75), (7.625, 5.625, -0.75, 4.25),\n (7.625, 5.625, -0.75, 4.75), (7.625, 5.625, -0.25, 4.75),\n (3.5, 3.0, -0.5, 4.5), (4.875, 3.875, -0.25, 4.25),\n (3.5, 3.0, -1.0, 4.5), (3.5, 4.75, -1.0, 4.5),\n (4.875, 3.875, -0.75, 4.25), (3.5, 3.0, -0.5, 5.0),\n (3.5, 4.75, -1.0, 5.0), (3.5, 4.75, -0.5, 5.0),\n (4.875, 3.875, -0.75, 4.75), (4.875, 3.875, -0.25, 4.75),\n (4.875, 5.625, -0.75, 4.25), (4.875, 5.625, -0.75, 4.75),\n (4.875, 5.625, -0.25, 4.75), (0.75, 8.25, 0.0, 3.5),\n (0.75, 8.25, 0.0, 4.0), (0.75, 8.25, -0.5, 4.0),\n (2.125, 7.375, -0.25, 3.75), (-2.0, 8.25, -0.5, 3.5),\n (-2.0, 10.0, -0.5, 3.5), (-2.0, 10.0, -1.0, 3.5),\n (-2.0, 10.0, -0.5, 3.0), (-2.0, 8.25, -1.0, 3.5),\n (-2.0, 8.25, -1.0, 3.0), (-2.0, 8.25, -0.5, 3.0),\n (0.75, 10.0, -0.5, 3.5), (0.75, 10.0, -1.0, 3.5),\n (0.75, 10.0, -1.0, 3.0), (0.75, 10.0, -0.5, 3.0),\n (0.75, 8.25, -1.0, 3.5), (0.75, 8.25, -1.0, 3.0),\n (0.75, 8.25, -0.5, 3.0), (-0.625, 9.125, -0.75, 3.25),\n (-2.0, 8.25, 0.0, 3.5), (-2.0, 8.25, 0.0, 4.0),\n (-2.0, 8.25, -0.5, 4.0), (-0.625, 7.375, -0.25, 3.75),\n (-2.0, 10.0, 0.0, 3.5), (-2.0, 10.0, -0.5, 4.0),\n (0.75, 10.0, 0.0, 3.5), (0.75, 10.0, 0.0, 4.0),\n (0.75, 10.0, -0.5, 4.0), (-0.625, 9.125, -0.25, 3.75),\n (-2.0, 8.25, -1.0, 4.0), (0.75, 10.0, -1.0, 4.0),\n (0.75, 8.25, -1.0, 4.0), (-0.625, 9.125, -0.75, 3.75),\n (-2.0, 8.25, 0.0, 3.0), (0.75, 10.0, 0.0, 3.0),\n (0.75, 8.25, 0.0, 3.0), (-0.625, 9.125, -0.25, 3.25),\n (-0.625, 7.375, -0.75, 3.75), (-0.625, 7.375, -0.75, 3.25),\n (-0.625, 7.375, -0.25, 3.25), (2.125, 9.125, -0.25, 3.75),\n (2.125, 9.125, -0.75, 3.75), (2.125, 9.125, -0.75, 3.25),\n (2.125, 9.125, -0.25, 3.25), (2.125, 7.375, -0.75, 3.75),\n (2.125, 7.375, -0.75, 3.25), (2.125, 7.375, -0.25, 3.25),\n (0.75, 6.5, 0.5, 3.5), (0.75, 6.5, 0.5, 4.0),\n (0.75, 8.25, 0.5, 4.0), (2.125, 7.375, 0.25, 3.75),\n (-2.0, 8.25, 0.5, 3.5), (-2.0, 10.0, 0.5, 3.5),\n (-2.0, 10.0, 1.0, 3.5), (-2.0, 10.0, 0.5, 3.0),\n (-2.0, 8.25, 1.0, 3.5), (-2.0, 8.25, 1.0, 3.0),\n (-2.0, 8.25, 0.5, 3.0), (0.75, 10.0, 0.5, 3.5),\n (0.75, 10.0, 1.0, 3.5), (0.75, 10.0, 1.0, 3.0),\n (0.75, 10.0, 0.5, 3.0), (0.75, 8.25, 1.0, 3.5),\n (0.75, 8.25, 1.0, 3.0), (0.75, 8.25, 0.5, 3.0),\n (-0.625, 9.125, 0.75, 3.25), (-2.0, 6.5, 0.5, 3.5),\n (-2.0, 6.5, 0.5, 4.0), (-2.0, 8.25, 0.5, 4.0),\n (-0.625, 7.375, 0.25, 3.75), (-2.0, 10.0, 0.5, 4.0),\n (0.75, 10.0, 0.5, 4.0), (-0.625, 9.125, 0.25, 3.75),\n (-2.0, 8.25, 1.0, 4.0), (0.75, 10.0, 1.0, 4.0),\n (0.75, 8.25, 1.0, 4.0), (-0.625, 9.125, 0.75, 3.75),\n (-0.625, 9.125, 0.25, 3.25), (-2.0, 6.5, 1.0, 3.5),\n (0.75, 6.5, 1.0, 3.5), (0.75, 6.5, 1.0, 4.0),\n (-0.625, 7.375, 0.75, 3.75), (-2.0, 6.5, 0.5, 3.0),\n (0.75, 6.5, 1.0, 3.0), (0.75, 6.5, 0.5, 3.0),\n (-0.625, 7.375, 0.75, 3.25), (-0.625, 7.375, 0.25, 3.25),\n (2.125, 9.125, 0.25, 3.75), (2.125, 9.125, 0.75, 3.75),\n (2.125, 9.125, 0.75, 3.25), (2.125, 9.125, 0.25, 3.25),\n (2.125, 7.375, 0.75, 3.75), (2.125, 7.375, 0.75, 3.25),\n (2.125, 7.375, 0.25, 3.25), (0.75, 6.5, 0.5, 4.5),\n (0.75, 6.5, 0.0, 4.5), (0.75, 8.25, 0.0, 4.5),\n (2.125, 7.375, 0.25, 4.25), (-2.0, 8.25, 0.5, 4.5),\n (-2.0, 10.0, 0.5, 4.5), (-2.0, 10.0, 1.0, 4.5),\n (-2.0, 10.0, 0.5, 5.0), (-2.0, 8.25, 1.0, 4.5),\n (-2.0, 8.25, 1.0, 5.0), (-2.0, 8.25, 0.5, 5.0),\n (0.75, 10.0, 0.5, 4.5), (0.75, 10.0, 1.0, 4.5),\n (0.75, 10.0, 1.0, 5.0), (0.75, 10.0, 0.5, 5.0),\n (0.75, 8.25, 1.0, 4.5), (0.75, 8.25, 1.0, 5.0),\n (0.75, 8.25, 0.5, 5.0), (-0.625, 9.125, 0.75, 4.75),\n (-2.0, 6.5, 0.5, 4.5), (-2.0, 6.5, 0.0, 4.5),\n (-2.0, 8.25, 0.0, 4.5), (-0.625, 7.375, 0.25, 4.25),\n (-2.0, 10.0, 0.0, 4.5), (0.75, 10.0, 0.0, 4.5),\n (-0.625, 9.125, 0.25, 4.25), (-0.625, 9.125, 0.75, 4.25),\n (-2.0, 8.25, 0.0, 5.0), (0.75, 10.0, 0.0, 5.0),\n (0.75, 8.25, 0.0, 5.0), (-0.625, 9.125, 0.25, 4.75),\n (-2.0, 6.5, 1.0, 4.5), (0.75, 6.5, 1.0, 4.5),\n (-0.625, 7.375, 0.75, 4.25), (-2.0, 6.5, 0.5, 5.0),\n (0.75, 6.5, 1.0, 5.0), (0.75, 6.5, 0.5, 5.0),\n (-0.625, 7.375, 0.75, 4.75), (0.75, 6.5, 0.0, 5.0),\n (-0.625, 7.375, 0.25, 4.75), (2.125, 9.125, 0.25, 4.25),\n (2.125, 9.125, 0.75, 4.25), (2.125, 9.125, 0.75, 4.75),\n (2.125, 9.125, 0.25, 4.75), (2.125, 7.375, 0.75, 4.25),\n (2.125, 7.375, 0.75, 4.75), (2.125, 7.375, 0.25, 4.75),\n (0.75, 6.5, -0.5, 4.5), (2.125, 7.375, -0.25, 4.25),\n (-2.0, 8.25, -0.5, 4.5), (-2.0, 10.0, -0.5, 4.5),\n (-2.0, 10.0, -1.0, 4.5), (-2.0, 10.0, -0.5, 5.0),\n (-2.0, 8.25, -1.0, 4.5), (-2.0, 8.25, -1.0, 5.0),\n (-2.0, 8.25, -0.5, 5.0), (0.75, 10.0, -0.5, 4.5),\n (0.75, 10.0, -1.0, 4.5), (0.75, 10.0, -1.0, 5.0),\n (0.75, 10.0, -0.5, 5.0), (0.75, 8.25, -1.0, 4.5),\n (0.75, 8.25, -1.0, 5.0), (0.75, 8.25, -0.5, 5.0),\n (-0.625, 9.125, -0.75, 4.75), (-2.0, 6.5, -0.5, 4.5),\n (-0.625, 7.375, -0.25, 4.25), (-0.625, 9.125, -0.25, 4.25),\n (-0.625, 9.125, -0.75, 4.25), (-0.625, 9.125, -0.25, 4.75),\n (-2.0, 6.5, -1.0, 4.5), (0.75, 6.5, -1.0, 4.5),\n (-0.625, 7.375, -0.75, 4.25), (-2.0, 6.5, -0.5, 5.0),\n (0.75, 6.5, -1.0, 5.0), (0.75, 6.5, -0.5, 5.0),\n (-0.625, 7.375, -0.75, 4.75), (-0.625, 7.375, -0.25, 4.75),\n (2.125, 9.125, -0.25, 4.25), (2.125, 9.125, -0.75, 4.25),\n (2.125, 9.125, -0.75, 4.75), (2.125, 9.125, -0.25, 4.75),\n (2.125, 7.375, -0.75, 4.25), (2.125, 7.375, -0.75, 4.75),\n (2.125, 7.375, -0.25, 4.75), (0.75, 4.75, 0.5, 4.0),\n (2.125, 5.625, 0.25, 3.75), (-2.0, 4.75, 0.5, 3.5),\n (-2.0, 3.0, 0.5, 3.5), (-2.0, 3.0, 1.0, 3.5),\n (-2.0, 3.0, 0.5, 3.0), (-2.0, 4.75, 1.0, 3.5),\n (-2.0, 4.75, 1.0, 3.0), (-2.0, 4.75, 0.5, 3.0),\n (0.75, 3.0, 0.5, 3.5), (0.75, 3.0, 1.0, 3.5),\n (0.75, 3.0, 1.0, 3.0), (0.75, 3.0, 0.5, 3.0),\n (0.75, 4.75, 1.0, 3.5), (0.75, 4.75, 1.0, 3.0),\n (0.75, 4.75, 0.5, 3.0), (-0.625, 3.875, 0.75, 3.25),\n (-2.0, 4.75, 0.5, 4.0), (-0.625, 5.625, 0.25, 3.75),\n (-2.0, 3.0, 0.5, 4.0), (0.75, 3.0, 0.5, 4.0),\n (-0.625, 3.875, 0.25, 3.75), (-2.0, 4.75, 1.0, 4.0),\n (0.75, 3.0, 1.0, 4.0), (0.75, 4.75, 1.0, 4.0),\n (-0.625, 3.875, 0.75, 3.75), (-0.625, 3.875, 0.25, 3.25),\n (-0.625, 5.625, 0.75, 3.75), (-0.625, 5.625, 0.75, 3.25),\n (-0.625, 5.625, 0.25, 3.25), (2.125, 3.875, 0.25, 3.75),\n (2.125, 3.875, 0.75, 3.75), (2.125, 3.875, 0.75, 3.25),\n (2.125, 3.875, 0.25, 3.25), (2.125, 5.625, 0.75, 3.75),\n (2.125, 5.625, 0.75, 3.25), (2.125, 5.625, 0.25, 3.25),\n (0.75, 4.75, 0.0, 4.5), (2.125, 5.625, 0.25, 4.25),\n (-2.0, 4.75, 0.5, 4.5), (-2.0, 3.0, 0.5, 4.5),\n (-2.0, 3.0, 1.0, 4.5), (-2.0, 3.0, 0.5, 5.0),\n (-2.0, 4.75, 1.0, 4.5), (-2.0, 4.75, 1.0, 5.0),\n (-2.0, 4.75, 0.5, 5.0), (0.75, 3.0, 0.5, 4.5),\n (0.75, 3.0, 1.0, 4.5), (0.75, 3.0, 1.0, 5.0),\n (0.75, 3.0, 0.5, 5.0), (0.75, 4.75, 1.0, 4.5),\n (0.75, 4.75, 1.0, 5.0), (0.75, 4.75, 0.5, 5.0),\n (-0.625, 3.875, 0.75, 4.75), (-2.0, 4.75, 0.0, 4.5),\n (-0.625, 5.625, 0.25, 4.25), (-2.0, 3.0, 0.0, 4.5),\n (0.75, 3.0, 0.0, 4.5), (-0.625, 3.875, 0.25, 4.25),\n (-0.625, 3.875, 0.75, 4.25), (-2.0, 4.75, 0.0, 5.0),\n (0.75, 3.0, 0.0, 5.0), (0.75, 4.75, 0.0, 5.0),\n (-0.625, 3.875, 0.25, 4.75), (-0.625, 5.625, 0.75, 4.25),\n (-0.625, 5.625, 0.75, 4.75), (-0.625, 5.625, 0.25, 4.75),\n (2.125, 3.875, 0.25, 4.25), (2.125, 3.875, 0.75, 4.25),\n (2.125, 3.875, 0.75, 4.75), (2.125, 3.875, 0.25, 4.75),\n (2.125, 5.625, 0.75, 4.25), (2.125, 5.625, 0.75, 4.75),\n (2.125, 5.625, 0.25, 4.75), (2.125, 5.625, -0.25, 4.25),\n (-2.0, 4.75, -0.5, 4.5), (-2.0, 3.0, -0.5, 4.5),\n (-2.0, 3.0, -1.0, 4.5), (-2.0, 3.0, -0.5, 5.0),\n (-2.0, 4.75, -1.0, 4.5), (-2.0, 4.75, -1.0, 5.0),\n (-2.0, 4.75, -0.5, 5.0), (0.75, 3.0, -0.5, 4.5),\n (0.75, 3.0, -1.0, 4.5), (0.75, 3.0, -1.0, 5.0),\n (0.75, 3.0, -0.5, 5.0), (0.75, 4.75, -1.0, 4.5),\n (0.75, 4.75, -1.0, 5.0), (0.75, 4.75, -0.5, 5.0),\n (-0.625, 3.875, -0.75, 4.75), (-0.625, 5.625, -0.25, 4.25),\n (-0.625, 3.875, -0.25, 4.25), (-0.625, 3.875, -0.75, 4.25),\n (-0.625, 3.875, -0.25, 4.75), (-0.625, 5.625, -0.75, 4.25),\n (-0.625, 5.625, -0.75, 4.75), (-0.625, 5.625, -0.25, 4.75),\n (2.125, 3.875, -0.25, 4.25), (2.125, 3.875, -0.75, 4.25),\n (2.125, 3.875, -0.75, 4.75), (2.125, 3.875, -0.25, 4.75),\n (2.125, 5.625, -0.75, 4.25), (2.125, 5.625, -0.75, 4.75),\n (2.125, 5.625, -0.25, 4.75)]\n nn_checks = {(0.75, 3.0, -0.5, 4.0): [(0.75, 4.75, -0.5, 4.0),\n (0.75, 4.75, -0.5, 4.5),\n (2.125, 3.875, -0.25, 3.75),\n (2.125, 3.875, -0.75, 3.75),\n (2.125, 3.875, -0.25, 4.25),\n (2.125, 3.875, -0.75, 4.25),\n (3.5, 3.0, 0.0, 4.0),\n (3.5, 3.0, -0.5, 4.0),\n (0.75, 3.0, 0.0, 4.0),\n (0.75, 3.0, -0.5, 4.5),\n (-2.0, 3.0, -0.5, 4.0),\n (-2.0, 3.0, 0.0, 4.0),\n (-0.625, 3.875, -0.25, 4.25),\n (-0.625, 3.875, -0.75, 4.25),\n (-0.625, 3.875, -0.75, 3.75),\n (0.75, 3.0, -0.5, 3.5),\n (-2.0, 3.0, -1.0, 4.0),\n (0.75, 3.0, -1.0, 4.0),\n (3.5, 3.0, -1.0, 4.0),\n (0.75, 4.75, -0.5, 3.5),\n (-0.625, 3.875, -0.25, 3.75)],\n (-2.0, 3.0, -1.0, 3.0): [(-0.625, 3.875, -0.75, 3.25),\n (0.75, 3.0, -0.5, 3.0),\n (-2.0, 4.75, -1.0, 3.0),\n (-2.0, 4.75, -1.0, 3.5),\n (0.75, 3.0, -1.0, 3.0),\n (0.75, 4.75, -1.0, 3.5),\n (0.75, 3.0, -1.0, 3.5),\n (0.75, 4.75, -1.0, 3.0),\n (-2.0, 3.0, -0.5, 3.0),\n (-2.0, 3.0, -0.5, 3.5),\n (0.75, 4.75, -0.5, 3.0),\n (-2.0, 3.0, -1.0, 3.5),\n (-2.0, 4.75, -0.5, 3.5),\n (-2.0, 4.75, -0.5, 3.0),\n (0.75, 3.0, -0.5, 3.5)],\n (-0.625, 5.625, -0.75, 4.25): [(0.75, 4.75, -0.5, 4.0),\n (0.75, 4.75, -0.5, 4.5),\n (-2.0, 6.5, -1.0, 4.5),\n (-2.0, 6.5, -1.0, 4.0),\n (0.75, 6.5, -1.0, 4.0),\n (0.75, 6.5, -1.0, 4.5),\n (0.75, 4.75, -1.0, 4.5),\n (0.75, 4.75, -1.0, 4.0),\n (-2.0, 4.75, -1.0, 4.0),\n (-2.0, 4.75, -1.0, 4.5),\n (-2.0, 4.75, -0.5, 4.5),\n (-2.0, 4.75, -0.5, 4.0),\n (0.75, 6.5, -0.5, 4.0),\n (0.75, 6.5, -0.5, 4.5),\n (-2.0, 6.5, -0.5, 4.5),\n (-2.0, 6.5, -0.5, 4.0)]}\n\n init_triangulation(4, 2, check, nn_checks,\n bounds=[(-2, 9), (3, 10), (-1, 1), (3, 5)])", "def test_align_idx(self):\n self.amp4.rotateAng([5, 5, 5], ang='deg')\n al = align(self.amp3, self.amp4, mv=[0, 1, 2, 3], sv=[0, 1, 2, 3], method='idxPoints')\n all(self.assertAlmostEqual(al.m.vert[i, 0], al.s.vert[i, 0], delta=0.1) for i in range(al.s.vert.shape[0]))", "def get_Flagging(flagging_file, n_Rec, nChan, exp_count):\n\n line = subprocess.check_output(['grep','Flagged', flagging_file]) # grab the summary line\n str_line = line.decode('utf-8')\n TOKS = str_line.split()\n total_flagged_pct = float(TOKS[-2]) #data+autocorrelation\n total_uv = float(TOKS[7])\n\n # Getting data flagged percentage\n \n autocorr_flagged_pct = (36 * n_Rec * n_Chan / total_uv)*100.0\n data_flagged_pct = round(total_flagged_pct - autocorr_flagged_pct, 3)\n\n # Finding out which antenna has been flagged completely.\n ANT1, ANT2, FLAG = [], [], [] \n with open(flagging_file, 'r') as f:\n for line in f:\n if \"#\" not in line: # grep -v \"#\"\n if \"Flagged\" not in line: # grep -v \"Flagged\"\n if len(line.split())>2: # avoid new channel-wise summaries at end of flagSummary file\n TOKS=line.split()\n ant1 = int(TOKS[3])\n ant2 = int(TOKS[4])\n flag = float(TOKS[6])\n if (ant1 < ant2) and (flag == 100): # extract non-correlated antenna pairs with 100 percent flagging\n ANT1.append(ant1)\n ANT2.append(ant2)\n FLAG.append(flag)\n\n ant1, ant2, flag = np.asarray(ANT1), np.asarray(ANT2), np.asarray(FLAG)\n \n ANT_NAME = []\n for x in range(0,36):\n count1 = np.count_nonzero(ant1 == x)\n count2 = np.count_nonzero(ant2 == x)\n total_count = count1 + count2\n if total_count == exp_count:\n ant_num = x+1\n ant_name = 'ak'+ str(ant_num)\n ANT_NAME.append(ant_name)\n\n total_flagged_ant = len(ANT_NAME)\n \n flag_ant_file = 'flagged_antenna.txt'\n ffile = open(fig_dir + '/'+ flag_ant_file,'a')\n \n if total_flagged_ant > 1:\n ffile.write(flagging_file[-24:-18])\n ffile.write('\\n')\n for item in ANT_NAME:\n ffile.write(item)\n ffile.write('\\n')\n else:\n ffile.write(flagging_file[-24:-18])\n ffile.write('\\n none \\n')\n\n ffile.close()\n \n return data_flagged_pct, total_flagged_ant, flag_ant_file", "def test_can_traverse_tall_grid(self):\n grid = [\n [\"0\"],\n [\"1\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def count_accuracy(G_true, G):\n B_true = G_true != 0# nx.to_numpy_array(G_true) != 0\n B = G != 0# nx.to_numpy_array(G) != 0\n d = B.shape[0]\n # linear index of nonzeros\n pred = np.flatnonzero(B)\n cond = np.flatnonzero(B_true)\n cond_reversed = np.flatnonzero(B_true.T)\n cond_skeleton = np.concatenate([cond, cond_reversed])\n # true pos\n true_pos = np.intersect1d(pred, cond, assume_unique=True)\n\n # false pos\n false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)\n # reverse\n extra = np.setdiff1d(pred, cond, assume_unique=True)\n reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)\n # compute ratio\n pred_size = len(pred)\n cond_neg_size = 0.5 * d * (d - 1) - len(cond)\n fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1)\n tpr = float(len(true_pos)) / max(len(cond), 1)\n fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1)\n # structural hamming distance\n B_lower = np.tril(B + B.T)\n pred_lower = np.flatnonzero(B_lower)\n cond_lower = np.flatnonzero(np.tril(B_true + B_true.T))\n extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)\n missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)\n shd = len(extra_lower) + len(missing_lower) + len(reverse)\n return shd, tpr, fpr, fdr, pred_size", "def visitspec(load,plate,mjd,fiber,gridfile='apg_rvsynthgrid',apstar=False) :\n grid = fits.open(os.environ['APOGEE_DIR']+'/data/synthgrid/'+gridfile+'.fits')\n if gridfile == 'apg_rvsynthgrid' : hdu=1\n elif gridfile == 'apg_rvsynthgrid_v2': hdu=0\n elif apstar : hdu=2\n else : hdu=1\n gridspec=grid[hdu].data\n gridwave = 10.**spectra.fits2vector(grid[hdu].header,2)\n griderr = np.ones(gridspec.shape[0])\n #for ispec in range(gridspec.shape[1]) :\n # cont = norm.cont(gridspec[:,ispec],griderr)\n # gridspec[:,ispec] /= cont\n\n data = load.apVisit(plate,mjd,fiber)\n\n # compare with DR14 \n comp(a,b,domatch=False,out='plots/dr14all')\n grid.append(['dr14all_1.png',''])\n xtit.append('all stars: DR14 (dotted) and test DR16 (solid)')\n\n comp(a,b,domatch=True,out='plots/dr14match')\n grid.append(['dr14match_1.png','dr14match_2.png'])\n xtit.append('same stars: DR14 (dotted) and test DR16 (solid)')\n # set bad pixels to nan\n shape=data[1].data.shape\n spec = copy.copy(data[1].data).flatten()\n specerr = copy.copy(data[2].data)\n specwave=data[4].data\n pixmask=bitmask.PixelBitMask()\n bd = np.where( ((data[3].data & pixmask.badval()) > 0) | \n ((data[3].data & pixmask.getval('SIG_SKYLINE')) > 0) ) [0]\n spec[bd] = np.nan\n spec = spec.reshape(shape)\n\n # continuum normalize and sample to grid\n outspec = np.full(len(gridwave),np.nan)\n if not apstar :\n # apVisit wavelengths are reversed\n spec=np.flip(spec)\n specwave=np.flip(specwave)\n specerr=np.flip(specerr)\n for ichip in range(3) :\n cont = norm.cont(spec[ichip,:],specerr[ichip,:])\n spec[ichip,:] /= cont\n gd=np.where(np.isfinite(spec[ichip,:]))[0]\n ip= interpolate.InterpolatedUnivariateSpline(specwave[ichip,gd],spec[ichip,gd],k=3)\n out = ip(gridwave)\n gd = np.where( (gridwave > specwave[ichip,0]) & (gridwave < specwave[ichip,-1]) )[0]\n outspec[gd] = out[gd]\n plt.plot(specwave[ichip,:],spec[ichip,:])\n plt.plot(gridwave[gd],out[gd])\n plt.show()\n\n for ispec in range(gridspec.shape[1]) :\n print(ispec)\n bd=np.where(np.isnan(outspec))\n outspec[bd]=1.\n out=correlate(outspec,gridspec[:,ispec])\n pdb.set_trace()", "def check_group(group):\n # Get the true classification from the longest reads\n true_species = group[group['file'].eq(f'OG_reads_{sample_letter}')]['classification'].iloc[0]\n print(true_species)\n # return a 1 if it's true across the group and 0 if not\n group['positives']= np.where(group['classification']==true_species, 1,0)\n # add our calcualtions to the results dictionary\n for row in group[['positives', 'file']].to_dict(orient=\"records\"):\n positive = row[\"positives\"]\n if positive:\n results[row[\"file\"]][0] += 1\n else:\n results[row[\"file\"]][1] += 1", "def count_islands(grid):\n grid_copy = list(grid)\n count = 0\n for i in range(0, len(grid_copy)):\n for j in range (0, len(grid_copy[0])):\n if grid[i][j] and grid_copy[i][j]:\n _dfs(grid_copy, i, j)\n count += 1\n return count", "def pairData_NumAntPerStat(input_files, num_ant_per_stat=1, bad_antennas=[]):\n num_stations = len(input_files)\n even_bad_antennas = [A[0] for A in bad_antennas]\n \n antennas = []\n for file_index in range(num_stations):\n ant_names = input_files[file_index].get_antenna_names()\n num_antennas_in_station = len( ant_names )\n for x in range(num_ant_per_stat):\n if x*2 < num_antennas_in_station and ant_names[x*2] not in even_bad_antennas:\n antennas.append( [file_index,x*2] )\n \n return np.array(antennas, dtype=int)", "def construct_indu_index_mapping(df):\n industries_to_index = {}\n industries = df[\"ggroup\"].dropna().astype(int).unique()\n industries = industries.tolist()\n quarters = (df[\"year\"].astype(\"str\") + \" q\" + df[\"quarter\"].astype(\"str\")).unique()\n for i in range(df.shape[0]):\n row = df.iloc[i, :]\n if math.isnan(row[\"ggroup\"]):\n continue\n industries_to_index[int(row[\"ggroup\"])] = industries_to_index.get(int(row[\"ggroup\"]), set())\n industries_to_index[int(row[\"ggroup\"])].add(i)\n return industries_to_index", "def _init_index(self):\n\n if self._check_idx:\n self._index = bamnostic.bai.Bai(self._index_path)\n self.__nocoordinate = self._index.n_no_coor\n self.__mapped = sum(self._index.unmapped[mapped].n_mapped for mapped in self._index.unmapped) + self.nocoordinate\n self.__unmapped = sum(self._index.unmapped[unmapped].n_unmapped for unmapped in self._index.unmapped) + self.nocoordinate", "def index_valid_star_entries(star_catalog,target,tol,log,valid_cat=False):\n\n idx1 = np.where(star_catalog['cal_ref_mag_ip'] > 0.0)[0]\n idx2 = np.where(star_catalog['cal_ref_mag_ip'] <= 22.0)[0]\n idx3 = np.where(star_catalog['cal_ref_mag_rp'] > 0.0)[0]\n idx4 = np.where(star_catalog['cal_ref_mag_rp'] <= 22.0)[0]\n idx5 = np.where(star_catalog['cal_ref_mag_gp'] > 0.0)[0]\n idx6 = np.where(star_catalog['cal_ref_mag_gp'] <= 22.0)[0]\n\n det_idx = set(idx1).intersection(set(idx2))\n det_idx = det_idx.intersection(set(idx3))\n det_idx = det_idx.intersection(set(idx4))\n det_idx = det_idx.intersection(set(idx5))\n det_idx = det_idx.intersection(set(idx6))\n\n log.info('Identified '+str(len(det_idx))+\\\n ' detected stars with valid measurements in gri')\n\n if valid_cat == False:\n return list(det_idx), None, None\n\n idx4 = np.where(star_catalog['imag'] > 0.0)[0]\n idx5 = np.where(star_catalog['rmag'] > 0.0)[0]\n idx6 = np.where(star_catalog['gmag'] > 0.0)[0]\n\n cat_idx = det_idx.intersection(set(idx4))\n cat_idx = cat_idx.intersection(set(idx5))\n cat_idx = list(cat_idx.intersection(set(idx6)))\n det_idx = list(det_idx)\n\n log.info('Identified '+str(len(cat_idx))+\\\n ' detected stars with valid catalogue entries in gri')\n\n close_idx = find_stars_close_to_target(star_catalog, target, tol, log)\n\n close_cat_idx = list(set(cat_idx).intersection(set(close_idx)))\n\n log.info('Identified '+str(len(close_cat_idx))+\\\n ' stars close to the target with valid catalogue entries in gri')\n\n return det_idx, cat_idx, close_cat_idx", "def _marker_diag(self, auto_replace = False):\n\t\t\n\n\t\ttry:\n\t\t\tself.parameters['conditions']\n\t\texcept:\n\t\t\tprint('THere are no sorting parameters ... you must run self._sort()')\n\t\t\treturn\n\n\t\tnum_missing_markers = (self.parameters['conditions']*self.parameters['trials']) - self.markers.size \n\t\t\n\t\tif self.parameters['conditions']*self.parameters['trials'] == (self.markers.size):\n\t\t\tprint(\n\t\t\t\t'Stimulus parameters appear to be appropriate. Running this function is '\n\t\t\t\t'unlikely to be necessary. If anomalies are found, timing errors during the '\n\t\t\t\t'experimen are likely to be the culprit, and perhaps irretrivable.')\n\t\t\n\t\toutput = {}\n\n\t\tmarker_diff = np.r_[0.0, ( np.subtract( self.markers[1:], self.markers[:-1]))]\n\t\t\n\t\tmultiple = np.around((marker_diff / float(np.median(marker_diff))) - 1, 1)\n\t\n\t\tanomaly = np.greater(marker_diff, np.ones_like(marker_diff) * 1.2 * np.median(marker_diff))\n\t\t\n\t\ttable = pd.DataFrame({'markers': self.markers, 'multiple': multiple, \n\t\t\t\t\t\t\t 'difference': marker_diff, 'anomalous': anomaly})\n\n\t\t\n\t\t# Anomalous markers, require insertion\n\t\t\n\t\tanomalies = table[table['anomalous'] == True]\n\t\toutput['bad_marks'] = anomalies\n\t\toutput['bad_marks_index'] = anomalies.index\n\t\toutput['num_mark_found'] = np.around(anomalies['multiple'].sum(), 1)\n\t\toutput['num_missing_mark'] = num_missing_markers\n\t\toutput['num_missing_mark_at_beg/end'] = max(output['num_missing_mark']-output['num_mark_found'], 0)\n\t\toutput['maximum_marer_diff'] = marker_diff.max()\n\t\toutput['median_marker_diff'] = np.median(marker_diff)\n\n\t\tif output['num_missing_mark_at_beg/end'] > 0:\n\t\t\toutput['spikes_before_beg'] = self.spikes[self.spikes < self.markers[0]]\n\n\t\t\toutput['spikes_after_end'] = self.spikes[self.spikes > self.markers[-1]]\n\t\t\t\n#==============================================================================\n# Add:\n# \n# Auto Replace function. Create a parameter reflecting, and do not touch the original\n# file.\n# Capacity to tell whether marker missing from front or end of experiment.\n# Use average spike rates to test, or require feedback from teh original data file. \n#==============================================================================\n\t\t\t\n\t\treturn output", "def degree_of_map(self):\n if 2*self.component1.genus == self.space.genus and self.space.n == 0:\n return 2\n else:\n return 1", "def calc_gt_indices(batch_keys_gt, batch_assignments_gt):\n _, indices_gt = torch.max(\n batch_assignments_gt, 2\n ) # get ground truth matches from source to target\n indices_gt += (\n 1\n ) # remember that indices start counting from 1 for 0 is used to store empty key points\n mask_gt = (batch_keys_gt[:, :, 2] > 0).long() # get the valid key point masks\n indices_gt = indices_gt * mask_gt\n key_num_gt = mask_gt.sum(dim=1).float()\n return indices_gt, key_num_gt", "def test_toplevel_query_lat_mappings(self):\n\n patient = Semiology('Aphasia', Laterality.NEUTRAL, Laterality.NEUTRAL)\n patient.data_frame = self.df\n all_combined_gifs = patient.query_lateralisation(\n one_map_dummy)\n\n self.assertIs(type(all_combined_gifs), pd.DataFrame)\n assert not all_combined_gifs.empty\n\n labels = ['Gif Parcellations', 'pt #s']\n all_combined_gifs = all_combined_gifs.astype(\n {'Gif Parcellations': 'int32', 'pt #s': 'int32'})\n new_all_combined_gifindexed = all_combined_gifs.loc[:, labels]\n\n new_all_combined_gifindexed.set_index(\n 'Gif Parcellations', inplace=True)\n\n # new_all_combined_gifindexed.to_csv(r'D:\\aphasia_fixture.csv')\n # load fixture:\n fixture = pd.read_excel(\n dummy_data_path,\n header=0,\n usecols='A:B',\n sheet_name='fixture_aphasia',\n index_col=0,\n engine=\"openpyxl\",\n )\n # fixture.sort_index(inplace=True)\n assert((new_all_combined_gifindexed.shape) == (fixture.shape))\n# print('new_all_combined_gifindexed.shape is: ',\n# new_all_combined_gifindexed.shape)\n# print('fixture.shape.shape is: ', fixture.shape)\n\n assert(new_all_combined_gifindexed.index == fixture.index).all()\n assert(new_all_combined_gifindexed.values == fixture.values).all()", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def testCondition(df, indexCol, dmatDf, gbCol, gbValues=None, countCol='Cells', min_count=3):\n\n if gbValues is None:\n gbValues = sorted(df[gbCol].unique())\n\n cnts = df.groupby([indexCol, gbCol])[countCol].agg(np.sum).unstack(gbCol, fill_value=0)[gbValues]\n uIndices = list(df[indexCol].dropna().unique())\n dmat = dmatDf.loc[:, uIndices].loc[uIndices, :]\n compressedDmat = distance.squareform(dmat.values)\n Z = sch.linkage(compressedDmat, method='complete')\n members = getClusterMembers(Z)\n resDf = testHClusters(cnts, members, gbValues, min_count=min_count)\n return Z, resDf, np.array(uIndices)", "def step_indices(group_idx):\n ilen = step_count(group_idx) + 1\n indices = np.empty(ilen, np.int64)\n indices[0] = 0\n indices[-1] = group_idx.size\n cmp_pos = 0\n ri = 1\n for i in range(len(group_idx)):\n if group_idx[cmp_pos] != group_idx[i]:\n cmp_pos = i\n indices[ri] = i\n ri += 1\n return indices", "def get_lig_dihedrals(np_xyz, lig_ndx, close_ndxs, inp):\n n_at1, n_at2 = np.sum(inp.lig1_n_per_bead), np.sum(inp.lig2_n_per_bead)\n n_core = int(len(np_xyz) - inp.lig1_num*n_at1 - inp.lig2_num*n_at2)\n core_xyz = np_xyz[:n_core]\n\n lig1_dihedrals, lig2_dihedrals = [], []\n\n if n_at1 >= 3:\n for i in range(inp.lig1_num):\n ndx0 = n_core + i*n_at1\n ndx1 = ndx0*1\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = close_ndxs[lig_ndx[0][i]]#np.argsort(cdist([np_xyz[ndx1]], core_xyz))[0,0]\n dihedral = [ndx4, ndx1, ndx2, ndx3]\n lig1_dihedrals.append(dihedral)\n for j in range(n_at1-4):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = ndx1 + 3\n dihedral = [ndx1, ndx2, ndx3, ndx4]\n lig1_dihedrals.append(dihedral)\n\n if n_at2 >= 3:\n for i in range(inp.lig2_num):\n ndx0 = n_core + n_at1*inp.lig1_num + i*n_at2\n ndx1 = ndx0*1\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = close_ndxs[lig_ndx[1][i]]#np.argsort(cdist([np_xyz[ndx1]], core_xyz))[0,0]\n dihedral = [ndx4, ndx1, ndx2, ndx3]\n lig2_dihedrals.append(dihedral)\n for j in range(n_at2-4):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = ndx1 + 3\n dihedral = [ndx1, ndx2, ndx3, ndx4]\n lig2_dihedrals.append(dihedral)\n\n return (lig1_dihedrals, lig2_dihedrals)", "def _create_two_group_jackknife_indexes(x0, x1, is_paired):\n\n if is_paired and len(x0) == len(x1):\n out = list(zip([j for j in create_jackknife_indexes(x0)],\n [i for i in create_jackknife_indexes(x1)]\n )\n )\n else:\n jackknife_c = list(zip([j for j in create_jackknife_indexes(x0)],\n [i for i in create_repeated_indexes(x1)]\n )\n )\n\n jackknife_t = list(zip([i for i in create_repeated_indexes(x0)],\n [j for j in create_jackknife_indexes(x1)]\n )\n )\n out = jackknife_c + jackknife_t\n del jackknife_c\n del jackknife_t\n\n return out", "def get_indices(max_passes: int) -> Iterable[Tuple[int, int]]:\n base_indices = (0, max_passes)\n pw_indices = (max_passes, max_passes * 2)\n ip_indices = (max_passes * 2, max_passes * 3)\n strand_indices = (max_passes * 3, max_passes * 4)\n ccs_indices = (max_passes * 4, max_passes * 4 + 1)\n sn_indices = (max_passes * 4 + 1, max_passes * 4 + 5)\n return base_indices, pw_indices, ip_indices, strand_indices, ccs_indices, sn_indices", "def annotate_ISM(data_df, REFERENCE, position_list, reference_genbank_name=\"data/covid-19-genbank.gb\"):\n seq_list = data_df['sequence'].values.tolist()\n \n seq_index = []\n index = 0\n for base in REFERENCE[1]:\n if base == '-':\n seq_index.append(index)\n else:\n index += 1\n seq_index.append(index)\n reference_local_index_map = np.array(seq_index)\n mapped_reference_index = []\n for index, entropy in position_list:\n mapped_reference_index.append((index, reference_local_index_map[index], entropy))\n REFERENCE_ISM = ''.join([REFERENCE[1][item[0]] for item in position_list])\n logging.info('Reference ISM: {}.'.format(REFERENCE_ISM))\n \n gene_dict = load_gene_dict(reference_genbank_name)\n reference_raw = REFERENCE[1].replace('-', '')\n res = OrderedDict()\n res['Ref position'] = []\n res['Entropy'] = []\n res['Gene'] = []\n res['Is silent'] = []\n res['AA position'] = []\n for align_index, ref_index, entropy in mapped_reference_index:\n codon, codon_idx, name, codon_pos = find_SNP(ref_index, gene_dict, reference_raw)\n base_freq = Counter([item[align_index] for item in seq_list]).most_common()\n for alt_base, count in base_freq:\n if alt_base != reference_raw[ref_index-1]:\n break\n if codon is None:\n if_silence = True\n else:\n alt_codon = list(codon)\n alt_codon[codon_idx] = alt_base\n alt_codon = ''.join(alt_codon)\n ref_aa = translate(codon)\n ism_aa = translate(alt_codon)\n if ref_aa == ism_aa:\n if_silence = True\n else:\n if_silence = False\n res['Ref position'].append(ref_index)\n res['Entropy'].append(entropy)\n if name is None:\n name = 'Non-coding'\n res['Gene'].append(name)\n res['Is silent'].append(if_silence)\n if codon_pos is None:\n res['AA position'].append('NaN')\n else:\n res['AA position'].append('{}{}{}'.format(ref_aa, codon_pos, ism_aa))\n annotation_df = pd.DataFrame.from_dict(res)\n return annotation_df", "def group_liberties(self, index):\n group = self.get(index).group\n liberties = set()\n for i in group.members:\n cardinal_indices = self.cardinal_indices(i)\n for ci in cardinal_indices:\n if self.get(ci) is None:\n liberties.add(ci)\n return len(liberties)", "def _update_farness_map(self,ind):", "def rank_chanels():\r\n \r\n all_paths = [['data_bci\\\\row_data\\\\subject1\\\\'], ['data_bci\\\\row_data\\\\subject2\\\\'],['data_bci\\\\row_data\\\\subject3\\\\']]\r\n\r\n train_subjects = ['01']\r\n test_subject = '02'\r\n freq = 512\r\n\r\n cutoff_beggining = 0\r\n columns_to_read = ['Fp1', 'AF3' ,'F7', 'F3', 'FC1', 'FC5', 'T7', 'C3', 'CP1', 'CP5',\r\n 'P7', 'P3', 'Pz', 'PO3', 'O1', 'Oz', 'O2', 'PO4', 'P4', 'P8', 'CP6',\r\n 'CP2', 'C4', 'T8', 'FC6', 'FC2', 'F4', 'F8', 'AF4', 'Fp2', 'Fz', 'Cz','class']\r\n seq_len = 0\r\n cut_step = 0\r\n num_perseg = freq\r\n num_overlap = int(num_perseg/2)\r\n min_freq=8\r\n max_freq=45\r\n k = 3\r\n\r\n First_iter = True\r\n for path in all_paths:\r\n train_full_data, train_full_data_filtered, train_full_anots, test_full_data, test_sliced_full_filtered, test_full_annoations = read_filter(path, train_subjects,test_subject, columns_to_read, cutoff_beggining, seq_len, cut_step)\r\n\r\n psd_signals = eval_psd_not_modulated(train_full_data, num_perseg, num_overlap, freq, min_freq, max_freq) \r\n chanels_acc = iterate_over_chanels(psd_signals, train_full_anots, k)\r\n if First_iter:\r\n accuracy = chanels_acc\r\n First_iter = False\r\n else:\r\n accuracy += chanels_acc\r\n accuracy = accuracy/len(all_paths)\r\n sorted_indexies = np.argsort(accuracy)[::-1]\r\n\r\n\r\n #indexis_above_treshohld = [idx for idx in sorted_indexies if accuracy[idx]> min_accary]\r\n return sorted_indexies", "def key_klifs_residues(numbering):\n if numbering == None:\n print(\"The structure was not found in the klifs database.\")\n key_res = None\n return key_res\n\n key_res = dict() #initialize key_res (which read from the 0-based numbering list)\n for i in range(5):\n key_res[f'group{i}'] = list()\n ## feature group 0: A-loop backbone dihedrals\n key_res['group0'].append(numbering[83]) # start of A-loop\n\n ## feature group 1: P-loop backbone dihedrals\n key_res['group1'].append(numbering[3]) # res0 in P-loop\n key_res['group1'].append(numbering[4]) # res1 in P-loop\n key_res['group1'].append(numbering[5]) # res2 in P-loop\n key_res['group1'].append(numbering[6]) # res3 in P-loop\n key_res['group1'].append(numbering[7]) # res4 in P-loop\n key_res['group1'].append(numbering[8]) # res5 in P-loop\n\n ## feature group 2: aC-related features\n #angle between aC and aE helices\n key_res['group2'].append(numbering[19]) # res0 in aC\n key_res['group2'].append(numbering[29]) # res10 in aC\n key_res['group2'].append(numbering[62]) # end of aE\n\n # key salt bridge\n key_res['group2'].append(numbering[16]) # K in beta III\n key_res['group2'].append(numbering[23]) # E in aC\n\n ## feature group 3: DFG-related features\n key_res['group3'].append(numbering[79]) # X-DFG\n key_res['group3'].append(numbering[80]) # DFG-Asp\n key_res['group3'].append(numbering[81]) # DFG-Phe\n key_res['group3'].append(numbering[27]) # ExxxX\n\n ## feature group 4: the FRET distance\n # not in the list of 85 (equivalent to Aura\"S284\"), use the 100% conserved beta III K as a reference\n key_res['group4'].append(numbering[16] + 120)\n\n # not in the list of 85 (equivalent to Aura\"L225\"), use the 100% conserved beta III K as a reference\n key_res['group4'].append(numbering[16] + 61)\n\n return key_res", "def pairData_NumAntPerStat_PolO(input_files, num_ant_per_stat=1, bad_antennas=[]):\n num_stations = len(input_files)\n even_bad_antennas = [A[0] for A in bad_antennas]\n \n antennas = []\n for file_index in range(num_stations):\n ant_names = input_files[file_index].get_antenna_names()\n num_antennas_in_station = len( ant_names )\n for x in range(num_ant_per_stat):\n if x*2 < num_antennas_in_station and ant_names[x*2] not in even_bad_antennas:\n antennas.append( [file_index,(x*2)+1] )\n \n return np.array(antennas, dtype=int)", "def comp_amplification_index(self):\n \n self.grid_tuning_in=self.inputs.grid_tuning_in\n self.grid_tuning_out=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[0:self.n_e**2,:]).T) \n self.grid_tuning_out_inhib=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[self.n_e**2:,:]).T)\n\n self.grid_amp_index=self.grid_tuning_out/self.grid_tuning_in", "def check_k4(list_of_mats, n):\n four_list = list(itertools.combinations([i for i in range(n)], 4))\n coordinate_list = [list(itertools.combinations(four, 2)) for four in four_list]\n possible_mats = []\n for mats in list_of_mats:\n flag = 0\n for coordinate in coordinate_list:\n if sum([mats[cor[0]][cor[1]] for cor in coordinate]) == 6:\n flag = 1\n break\n if flag == 0:\n possible_mats.append(mats)\n return possible_mats", "def culggroup_thickestdonecount(As, Rps, group, dones):\n pairs = sorted(((get_culg_dimension(As, Rps, l), dones[l], l)\n for l in group),\n reverse=True)\n count = len(tuple(itt.takewhile(lambda p: p[1], pairs)))\n return count", "def _sort_index(self):\n\n allAltPos = np.array(sorted(list(set(list(self.data['altitude'])))))[::-1]\n allAziPos = np.array(sorted(list(set(list(self.data['azimuth'])))))\n\n indON = [[None for azi in allAziPos] for alt in allAltPos]; indOFF = [[None for azi in allAziPos] for alt in allAltPos]\n\n for i, traceItem in enumerate(self.data):\n alt = traceItem['altitude'];azi = traceItem['azimuth'];sign = traceItem['sign']\n for j, altPos in enumerate(allAltPos):\n for k, aziPos in enumerate(allAziPos):\n if alt==altPos and azi==aziPos:\n if sign==1:\n if indON[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign: 1!')\n else: indON[j][k]=i\n\n if sign==-1:\n if indOFF[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign:-1!')\n else: indOFF[j][k]=i\n\n indON = np.array([np.array(x) for x in indON]); indOFF = np.array([np.array(x) for x in indOFF])\n\n return indON,indOFF,allAltPos,allAziPos", "def identify_expressed_gRNA_families(gRNAs, mRNAs, init_seq_len):\n gRNA_families = {'family_no':[], 'family_end':[], 'family_id':[]}\n strand_name = {'coding':'', 'template':'t'}\n index = []\n\n gRNAs['gene_mRNA_end'] = gRNAs['mRNA_end']+gRNAs['rel_pos'].apply(lambda x: 0 if x is pd.NA else x)\n gRNAs['gene_mRNA_end'] = gRNAs['gene_mRNA_end'].astype('Int32')\n gRNAs['tmp'] = gRNAs.apply(lambda x: x['cassette_label']+strand_name[x['strand']], axis=1)\n\n for mRNA_name, mRNA in sorted(mRNAs.items()):\n # get all gRNAs with an init_pos for this mRNA\n # nonexpressed gRNAs can be in an editing group if they have a init_seq. this is because\n # they have transcripts in the init_position but not enough to be called expressed\n # gRNAs without an init_seq have no transcripts within the initiation site\n # these are added to a group below\n mask1 = gRNAs['mRNA_name'] == mRNA_name\n mask2 = gRNAs['init_seq'].notnull()\n g = gRNAs[mask1 & mask2]\n\n # positions where the start of expressed gRNAs align to mRNA\n a = np.zeros(mRNA['length']+100)\n i = np.array(g['gene_mRNA_end']-1, dtype=int)\n for ii in range(init_seq_len):\n a[i-ii] = 1\n a = ''.join([str(int(i)) for i in a])\n g_end = 'gene_mRNA_end'\n\n tmp_g = []\n family_no = 0\n\n # find regions where groups of gRNAs anchor to mRNA starting from 3' end of edited mRNA\n for m in re.finditer('1+', a):\n s, e = m.start(0), m.end(0)\n # get all gRNAs that anchor at this region\n anchor_group = g[(g[g_end] >= s) & (g[g_end] <= e)]\n\n if len(anchor_group) == 0:\n continue\n\n # for each cassette position of these gRNAs create a dictionary of cassette position and editing position\n cas_pos = {}\n for _, gRNA in anchor_group.iterrows():\n pos = gRNA['tmp']\n if pos not in cas_pos:\n cas_pos[pos] = gRNA[g_end]\n cas_pos[pos] = max(gRNA[g_end], cas_pos[pos])\n\n # group gRNAs with the same cassette position ordered by editing position\n for pos, end in sorted(cas_pos.items(), key=lambda kv: kv[1]):\n group = anchor_group.query('tmp == @pos')\n index.extend(group.index.values)\n gRNA_families['family_no'].extend([family_no]*len(group))\n gRNA_families['family_end'].extend([end]*len(group))\n gRNA_families['family_id'].extend([f'{mRNA_name}-{pos}-{int(end)}']*len(group))\n tmp_g.append((family_no, end, f'{mRNA_name}-{pos}-{int(end)}'))\n family_no += 1\n\n # gRNAs without an init_seq\n mask2 = gRNAs['init_seq'].isnull()\n unknown = gRNAs[mask1 & mask2]\n # for each unknown gRNA\n for idx, gRNA in unknown.iterrows():\n # search for a group that ends just after mRNA_end of this unknown gRMA\n for f_no, gene_mRNA_end, family_id in sorted(tmp_g, key=itemgetter(1)):\n [g_mRNA_name, g_pos, g_end] = family_id.split('-')\n if g_mRNA_name == mRNA_name and gRNA['mRNA_end']-1 <= gene_mRNA_end and gRNA['cassette_label'] == g_pos:\n index.append(idx)\n gRNA_families['family_no'].append(f_no)\n gRNA_families['family_end'].append(gene_mRNA_end)\n gRNA_families['family_id'].append(f'{family_id}')\n break\n else:\n # no suitable gRNA found, so make a unique family for this non-expressed gRNA \n index.append(idx)\n gRNA_families['family_no'].append(family_no)\n gRNA_families['family_end'].append(gRNA['mRNA_end'])\n gRNA_families['family_id'].append(f'{mRNA_name}-{gRNA[\"cassette_label\"]}-{gRNA[\"mRNA_end\"]}')\n family_no += 1\n\n gRNAs = gRNAs.drop(['tmp'], axis=1)\n gRNAs = gRNAs.join(pd.DataFrame(gRNA_families, index=index))\n gRNAs['family_no'] = gRNAs['family_no'].astype('Int64')\n gRNAs['family_end'] = gRNAs['family_end'].astype('Int64')\n return gRNAs", "def compute_map(self):\n number_of_orders = 0\n orders = []\n for i, line in enumerate(self.__grid):\n for j, column in enumerate(line):\n if self.__grid[i][j][\"humans\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(self.__grid[i][j][\"humans\"])\n orders.append(0)\n orders.append(0)\n if self.__grid[i][j][\"vampires\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(self.__grid[i][j][\"vampires\"])\n orders.append(0)\n if self.__grid[i][j][\"werewolves\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(0)\n orders.append(self.__grid[i][j][\"werewolves\"])\n return number_of_orders, orders", "def showRecommendations(self):\n\t\t#rn_im_index = np.where( df_index == 10561)[0][0] #similar color but no similar shape\n\t\t\n\t\t#rn_im_index = np.where( df_index == 22472)[0][0] # similar color but no similar shape\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(61706, 0.16241728944546732), (94073, 0.15613203034271395), (61836, 0.15494992784841455), (61835, 0.15494992784841452), (61825, 0.15163383319000062), (61745, 0.15031672266647675), (26848, 0.14479933826475058), (61760, 0.14353241349060006)]\n\n\t\tOutput Color\n\t\t[(22492, 0.72863097869032856), (22482, 0.66834821692729429), (3351, 0.45135804324105538), (29982, 0.40733726762782918), (85603, 0.40595375826379132), (22502, 0.38204339162468243), (29913, 0.36735985661014864), (29581, 0.3669268043422747)]\n\n\t\t\"\"\"\n\t\t\n\t\t#rn_im_index = np.where( df_index == 26746)[0][0] #Similar shape and similar color\n\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(27380, 0.1817530749164192), (29457, 0.1353165149065198), (1336, 0.12885937891206711), (27355, 0.12241573468787358), (29704, 0.12009259771972887), (29603, 0.11196184515165516), (29594, 0.11196184515165516), (26809, 0.11097441686854403)]\n\n\t\tOutput Color\n\t\t[(26809, 0.80634030626051745), (27380, 0.79789790693763663), (27355, 0.79542468562323521), (27018, 0.74331190002098657), (27197, 0.73454915804315535), (26913, 0.73410853271216192), (26905, 0.73410853271216192), (27617, 0.73098284820738935)]\n\n\t\t\"\"\"\n\n\t\t#rn_im_index = np.where( df_index == 27288)[0][0] #blurry image\n\t\t#rn_im_index = np.where( df_index == 27294)[0][0] # Similar Color and similar shape\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(27133, 0.35485652442453264), (27128, 0.32115384345167203), (27151, 0.25627343126278629), (27145, 0.25366123246450772), (27237, 0.25131923154633229), (27303, 0.22385072157466906), (27139, 0.22229444866797674), (27299, 0.22049959456469045)]\n\n\t\tOutput Color\n\t\t[(27133, 0.96240728970715483), (27128, 0.96009243888171958), (27145, 0.94268324228267275), (27303, 0.93286490646887354), (27139, 0.9244608465512546), (27237, 0.87199166625029467), (27049, 0.86531150055386774), (27066, 0.86139090244063599)]\n\n\t\t\"\"\"\n\n\t\t#rn_im_index = np.where( df_index == 52528)[0][0] # some have similar shape and some have similar color\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(93975, 0.31989999912901967), (61835, 0.31528273207820834), (61836, 0.31528273207820828), (61745, 0.31261425625988493), (61825, 0.31226105280375738), (61706, 0.31006537435901937), (61760, 0.29497111365575518), (94073, 0.28643748527418661)]\n\t\t\n\t\tOutput Color\n\t\t[(52542, 0.7633360888150692), (27402, 0.7582411610565466), (59301, 0.71242045321505865), (27329, 0.69968585913071302), (52539, 0.6996578131078881), (27335, 0.69215065941368603), (52469, 0.69152133535379212), (52473, 0.68799897765402473)]\n\n\t\tOutput c2d\n\t\t[(85620, 39705.292103093299), (52469, 38947.56038916672), (93975, 37706.480789897578), (52542, 37604.001320837888), (27402, 36709.321927197598), (27118, 36164.067396937884), (63718, 35906.648243400079), (63709, 35906.648243400079)]\n\t\n\n\t\t\"\"\"\n\t\t# Similar in color but dissimilar in shape\n\t\t#rn_im_index = np.where( df_index == 94380)[0][0] # Similar with color. Similar with shape. Very good with shape. Good Recommendations 52469(Shape) 94383 (color)\n\t\t\n\t\t\"\"\"\n\t\tOutput shape\n\t\t[(52469, 0.22380221768394279), (61836, 0.17343131445222859), (61835, 0.17343131445222859), (61825, 0.1713416617900273), (61745, 0.16700001977657994), (35922, 0.16614680579871874), (61715, 0.16380442450621885), (61706, 0.16194776280945139)]\n\t\t\n\t\tOutput Color\n\t\t[(94383, 0.69238692936637536), (26960, 0.58939898313472816), (26957, 0.58939898313472816), (29412, 0.58436143235370375), (29371, 0.58436143235370375), (29453, 0.5745231714319865), (29616, 0.57270906625007156), (29970, 0.57018718322031081)]\n\n\t\tOutput c2d\n\t\t[(94383, 37226.57203206882), (52558, 37007.251051234598), (26960, 36448.333956681076), (26957, 36448.333956681076), (1441, 36380.413117473567), (50197, 35994.006084886816), (94057, 35671.971168930344), (27533, 35061.385308567049)]\n\t\n\t\t\"\"\"\n\n\t\t#rn_im_index = np.where( df_index == 94080)[0][0] # some have similar shape and some have similar color\n\t\t\"\"\"\n\t\tOutput c2d\n\t\t[(57755, 29305.613736454678), (61797, 28828.064153886309), (61731, 28828.064153886309), (29417, 27874.375538422293), (63771, 27596.578857622582), (63765, 27596.578857622582), (63758, 27442.936837903482), (63750, 27442.936837903482)]\n\n\t\t\"\"\"\n\n\t\t# Completely random image that doesn't have similar images\n\t\t#rn_im_index = np.where( df_index == 1334)[0][0]\n\t\tdf = self.df\n\t\tdf_index = df.index.values\n\t\trn_im_index = random.randint(0, df.shape[0])\n\n\t\tprint \"random image index: {} id:{}\".format(rn_im_index, df_index[rn_im_index])\n\n\t\ti = rn_im_index\n\t\tindex_x = df.iloc[0:i,i].index\n\t\tindex_y = df.iloc[i,i:df.index.values.size].index\n\n\t\tvalues_x = df.iloc[0:i,i].values\n\t\tvalues_y = df.iloc[i,i:df.index.values.size].values\n\n\t\tindex = np.concatenate((index_x, index_y),axis=0)\n\t\tvalues = np.concatenate((values_x,values_y),axis=0)\n\n\t\tzipped = zip(index,values)\n\t\tzipped_sorted = sorted(zipped, key=lambda x: x[1])[::-1][0:8]\n\t\t#zipped_sorted = sorted(zipped, key=lambda x: x[1])[0:8]\n\t\tprint zipped_sorted\n\t\tindex , values = zip(*zipped_sorted)\n\t\t#print index\n\t\ttop_n_similar_images = map(int,list(index))\n\t\t#return df, duplicated_items\n\n\t\t# Filter out threshold less than 0.5\n\t\t#if self.mode == 'RGB':\n\t\tindex_aux = []\n\t\ti = 0\n\t\tfor im_id in top_n_similar_images:\n\t\t\tif self.mode == 'RGB' and values[i] > 0.5:\n\t\t\t\tindex_aux.append(im_id)\n\t\t\telif self.mode == 'L' and values[i] > 0.1:\n\t\t\t\tindex_aux.append(im_id)\n\t\t\ti += 1\n\n\t\ttop_n_similar_images = index_aux\n\n\t\tif len(top_n_similar_images) > 0 or self.mode == 'L':\n\t\t\n\t\t\t#print top_n_similar_images\n\t\t\ttop_n_similar_images = self.removeDuplicates(top_n_similar_images)\n\t\t\t#print top_n_similar_images\n\t\n\t\t\t#top_n_similar_images = df.sort_values(by=[rn_im_index],ascending = False).loc[:,rn_im_index][0:10].index.values\n\t\t\t\n\t\t\toutput = open(self.data_path + 'X_original.pkl', 'r')\n\t\t\tX_original = cPickle.load(output)\n\t\t\toutput.close()\n\t\t\t\n\t\t\t#print top_n_similar_images[0]\n\t\t\tindex = np.asarray(index,dtype='int64')\n\t\t\t\n\t\t\tif self.mode == 'RGB':\n\t\t\t\tself.reconstructImage(X_original[rn_im_index]).show()\n\t\t\telif self.mode == 'L':\n\t\t\t\tim_base = X_original[rn_im_index] * 256\n\t\t\t\tim_base = np.asarray(im_base, dtype='float64')\n\t\t\t\tim_base = filter.sobel(im_base)\n\t\n\t\t\t\tio.imshow(im_base)\n\t\t\t\tio.show()\t\n\n\t\t\tfor i in xrange(0,len(top_n_similar_images)):\n\t\t\t\tindex_i = np.where( df_index == top_n_similar_images[i])[0][0]\n\n\t\t\t\tif self.mode == 'L':\n\t\t\t\t\tim_i = X_original[index_i] * 256\n\t\t\t\t\tim_i = np.asarray(im_i, dtype='float64')\n\t\t\t\t\tim_i = filter.sobel(im_i)\n\t\n\t\t\t\t\tio.imshow(im_i)\n\t\t\t\t\tio.show()\n\n\t\t\t\telif self.mode == 'RGB':\n\t\t\t\t\tself.reconstructImage(X_original[index_i]).show()\n\t\telse:\n\t\t\tprint \"There are no image higher than the minimum threshold\"", "def build_label_mapping(\n grouped_targeted_labels: List[Set[str]],\n nontargeted_labels: Optional[Set[str]] = None,\n) -> Dict[str, int]:\n mapping = {\n label: i + 1\n for i, label_group in enumerate(grouped_targeted_labels)\n for label in label_group\n }\n\n if nontargeted_labels:\n mapping.update({label: 0 for label in nontargeted_labels})\n\n return mapping", "def _red_detect_(self, nslice = 0, thresh = 2.0):\n zk_1 = 's_' + format(nslice, '03d')\n zk_2 = 's_' + format(nslice+1, '03d')\n\n zf_1 = self.z_dense[zk_1]\n zf_2 = self.z_dense[zk_2]\n\n # extract the y and x coordinates\n y1 = zf_1[:,0]\n x1 = zf_1[:,1]\n\n y2 = zf_2[:,0]\n x2 = zf_2[:,1]\n\n\n # create a meshgrid\n [YC, YR] = np.meshgrid(y2, y1)\n [XC, XR] = np.meshgrid(x2, x1)\n\n\n dist_block = np.sqrt((YC-YR)**2 + (XC-XR)**2)\n red_pair = np.where(dist_block <= thresh) # find out where the distance between cell i in plane k and cell j in plane k+1 is below the threshold.\n\n ind1 = red_pair[0] # the indices in the first frame\n ind2 = red_pair[1] # the indices in the second frame\n\n\n # select those with markers > 0 and markers < 0\n marker_1 = zf_1[ind1, 3]\n\n\n new_idx = (marker_1 == 0) # select those with zero-markers, which are never counted before. These are new cells. marker_1 needs to be updated.\n pool_new = ind1[new_idx] # select the indices in the first frame where new redundancies are detected \n pool_new_cov = ind2[new_idx] # select the indices in the second frame where new redundancies are detected.\n\n\n pool_exist = ind1[~new_idx] # among the detected redundancies, find those already marked.\n pool_exist_cov = ind2[~new_idx] # correspondingly, find those already marked in the adjacent slice\n\n n_new = len(pool_new)\n n_exist = len(pool_exist)\n if self.verbose:\n print(n_new, \"new redundancies, \", n_exist, \"existing redundancies\")\n\n for n_count in np.arange(n_new):\n # build the new keys\n # also, we need to assign each new key an identity number which is unique.\n n_ind1 = pool_new[n_count] # find the indices in the first slice that contains new redundancies\n n_ind2 = pool_new_cov[n_count] # find the indices in the following slice \n pr_number = nslice * 1000 + n_ind1\n pr_key = 'sl_' + str(pr_number) # build a key \n new_sl = Simple_list(nslice) # create a simple list with z_marker = nslice, nslice is the index of the first z-slice \n new_sl.add([nslice, zf_1[n_ind1, 4]])\n new_sl.add([nslice+1, zf_2[n_ind2, 4]])\n zf_1[n_ind1, 3] = pr_number # assign the new pr_number to zf_1\n zf_2[n_ind2, 3] = pr_number # assigne the same new pr_number to zf_2\n\n self.redundancy_pool[pr_key] = new_sl # stored into the redundancy pool\n\n\n for n_count in np.arange(n_exist):\n # search for the existing keys\n n_ind1 = pool_exist[n_count]\n n_ind2 = pool_exist_cov[n_count]\n pr_number = int(zf_1[n_ind1, 3])# catch up the pr_number\n pr_key = 'sl_' + str(pr_number) # this pr_key should already exist in the pool. \n\n self.redundancy_pool[pr_key].add([nslice+1, zf_2[n_ind2, 4]])\n zf_2[n_ind2, 3] = pr_number # update the pr_number in the adjacent slice", "def test_3_1_4D_rec_init(self):\n check = [(-1.0, -15.0, 0.0, 3.0), (4.0, -10.0, 1.0, 5.0),\n (4.0, -15.0, 0.0, 3.0), (4.0, -10.0, 0.0, 3.0),\n (4.0, -10.0, 1.0, 3.0), (4.0, -10.0, 0.0, 5.0),\n (4.0, -15.0, 1.0, 3.0), (4.0, -15.0, 1.0, 5.0),\n (4.0, -15.0, 0.0, 5.0), (-1.0, -10.0, 0.0, 3.0),\n (-1.0, -10.0, 1.0, 3.0), (-1.0, -10.0, 1.0, 5.0),\n (-1.0, -10.0, 0.0, 5.0), (-1.0, -15.0, 1.0, 3.0),\n (-1.0, -15.0, 1.0, 5.0), (-1.0, -15.0, 0.0, 5.0),\n (1.5, -12.5, 0.5, 4.0)]\n nn_checks = {(4.0, -15.0, 1.0, 5.0): [(-1, -15, 1, 5), (4, -15, 0, 5),\n (-1, -15, 0, 3), (4, -15, 1, 3),\n (4, -10, 1, 5), (-1, -15, 0, 5),\n (4, -15, 0, 3), (-1, -15, 1, 3),\n (1.5, -12.5, 0.5, 4.0)],\n (4.0, -15.0, 0.0, 5.0): [(4, -15, 1, 5), (4, -10, 0, 5),\n (-1, -15, 0, 3), (4, -10, 1, 5),\n (-1, -15, 0, 5), (4, -15, 0, 3),\n (1.5, -12.5, 0.5, 4.0)],\n (1.5, -12.5, 0.5, 4.0): [(4, -15, 1, 5), (-1, -10, 0, 5),\n (4, -10, 0, 5), (-1, -15, 1, 5),\n (4, -10, 1, 3), (-1, -10, 1, 5),\n (4, -15, 0, 5), (-1, -15, 1, 3),\n (-1, -15, 0, 3), (4, -15, 1, 3),\n (-1, -10, 0, 3), (4, -10, 1, 5),\n (-1, -10, 1, 3), (-1, -15, 0, 5),\n (4, -15, 0, 3), (4, -10, 0, 3)]}\n\n init_triangulation(4, 0, check, nn_checks,\n bounds=[(-1, 4), (-15, -10), (0, 1), (3, 5)])", "def get_caseX_indices_matching_symbol_list(result,match_symbols = [\"NA\"]):\n #\n # Sub-function\n #\n def get_caseX_indices_matching_symbol(result,match_symbol = \"NA\"):\n \"\"\" get a list of indices of case4 (see: @caseX) elements from @result\n\n Args:\n result: @@result: an @uncollasped punnet square, which represents the @punnet imported as an R-like dataframe from: ../R/punnet.csv\n match_symbol: default: \"NA\", the string that represents elements in the @@punnet that have been killed, carrying a critically-short @telomere, T0\n Returns:\n @todo\n\n \"\"\"\n\n caseX_matching = np.in1d(result.ravel(),[match_symbol]).reshape(result.shape) # Case4: (_T0)&(__): critically short, lethal T0-carring diploid individuals die\n caseX_indices_tmp = np.where(caseX_matching) # locations (indicies)\n caseX_indices = zip(caseX_indices_tmp[0],caseX_indices_tmp[1])\n\n return caseX_indices\n #\n # Main\n #\n caseX_indices = [] # indexes are (x,y) coordinates on the punnet (result) that the symbol is found in, e.g. (2,6)\n\n symbol_to_index = {}\n\n for symbol in match_symbols:\n\n #..Generate matching indices on @Punnet-table (result) \n match_index = get_caseX_indices_matching_symbol(result,symbol)\n\n #..Multiple matching indices to symbol AND symbol is a gamete frequency \n if (len(match_index)>1) and (not symbol == \"NA\"):\n\n symbol_to_index[symbol] = match_index\n\n #..Multiple matching indices to symbol AND symbol==\"NA\" (i.e. death)\n elif (len(match_index)>1) and (symbol == \"NA\"):\n caseX_indices = match_index\n symbol_to_index[symbol] = match_index\n break\n\n #..Only one matching index to symbol\n else:\n caseX_indices.append(match_index)\n symbol_to_index[symbol] = match_index\n\n return caseX_indices, symbol_to_index", "def test_digi_scenarios(self):\n # -- data group has no digitizer devices ----\n _map = self.map\n self.assertBasics(_map)\n self.assertEqual(_map, {})\n\n # -- data group has all mappable devices ----\n self.f.add_module(\"SIS 3301\", {})\n self.f.add_module(\"SIS crate\", {})\n _map = self.map\n self.assertBasics(_map)\n\n # check all controls were mapped\n self.assertEqual(len(_map), 2)\n self.assertIn(\"SIS 3301\", _map)\n self.assertIn(\"SIS crate\", _map)\n\n # the data group has mappable and unknown digitizers ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS 3301\", {})\n self.f[\"Raw data + config\"].create_group(\"Not known\")\n _map = self.map\n self.assertBasics(_map)\n\n # check correct diagnostics were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS 3301\", _map)\n self.assertNotIn(\"Not known\", _map)\n\n # delete unknown group\n del self.f[\"Raw data + config/Not known\"]\n\n # the data group has a dataset ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS crate\", {})\n data = np.empty((2, 100), dtype=np.float32)\n self.f[\"Raw data + config\"].create_dataset(\"A dataset\", data=data)\n _map = self.map\n self.assertBasics(_map)\n\n # check correct diagnostics were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS crate\", _map)\n self.assertNotIn(\"A dataset\", _map)\n\n # delete dataset\n del self.f[\"Raw data + config/A dataset\"]\n\n # the data group has a mappable digitizer but ----\n # mapping fails ----\n self.f.remove_all_modules()\n self.f.add_module(\"SIS 3301\", {})\n self.f.add_module(\"SIS crate\", {})\n\n # remove a dataset from 'SIS 3301'\n # - this will cause mapping of 'Waveform' to fail\n #\n sis_group = self.f[\"Raw data + config/SIS 3301\"]\n for name in sis_group:\n if isinstance(sis_group[name], h5py.Dataset):\n del sis_group[name]\n\n # check map\n _map = self.map\n self.assertBasics(_map)\n\n # check correct controls were mapped\n self.assertEqual(len(_map), 1)\n self.assertIn(\"SIS crate\", _map)\n self.assertNotIn(\"SIS 3301\", _map)", "def count_automorphisms(g: Graph) -> int:\n\n def generate_mapping(g: Graph, h: Graph):\n \"\"\"\n Generates the corresponding mapping from vertex to vertex for the isomorphism between graphs g and h.\n We map g to h.\n :param g: A graph\n :param h: A graph\n :return: A permutation with the mapping from g to h\n \"\"\"\n mapping = [0] * len(g.vertices)\n for v_g in g:\n for v_h in h:\n if v_g.colornum == v_h.colornum:\n mapping[v_g.label] = v_h.label\n return permutation(len(mapping), mapping=mapping)\n\n def generate_automorphisms(g: Graph, h: Graph, d: list[Vertex], i: list[Vertex]):\n \"\"\"\n Is called recursively to traverse through the branching tree and to find all automorphisms.\n :param g: A copy of the original graph\n :param h: Another copy of the original graph\n :param d: A list with pre-colored vertices for graph g\n :param i: A list with pre-colored vertices for graph h\n \"\"\"\n\n # Refine the graphs g and h.\n color_refinement([g, h])\n\n # Make sure that the colors are balanced, and check for a bijection.\n if not is_balanced(g, h):\n return\n if is_bijection(g, h):\n\n # Generate the mapping from g -> h.\n p = generate_mapping(g, h)\n\n # If the permutation cannot be generated by this generating set, we need to add it.\n if not is_member(generating_set, p):\n generating_set.append(p)\n\n # We can now back to the last trivial ancestor nodes in the branching tree.\n while [v.label for v in d] != [v.label for v in i]:\n # We remove the vertices from d and i and mark them as 'used'.\n # This should prevent the algorithm from trying to re-explore a branch that may be skipped.\n # FIXME: This strategy seems too aggressive, the results are sometimes off by a factor 2 or 4\n d.pop().pre_labeled = True\n i.pop().pre_labeled = True\n\n return\n\n c, next_color = get_c([g, h])\n for v_g in g:\n if v_g.colornum == c:# and not v_g.pre_labeled:\n x = v_g\n break\n\n for v_h in h:\n if v_h.colornum == c and not v_h.pre_labeled:\n g1 = g + Graph(False)\n h1 = h + Graph(False)\n g1.vertices[g.vertices.index(x)].colornum = next_color\n h1.vertices[h.vertices.index(v_h)].colornum = next_color\n d.append(x)\n i.append(v_h)\n generate_automorphisms(g1, h1, d, i)\n\n generating_set = []\n graph_copy_1 = g + Graph(False)\n graph_copy_2 = g + Graph(False)\n for v in graph_copy_1.vertices:\n v.pre_labeled = False\n for v in graph_copy_2.vertices:\n v.pre_labeled = False\n generate_automorphisms(graph_copy_1, graph_copy_2, [], [])\n return compute_order(generating_set)", "def allign_alleles(df):\n d = {'A': 0, 'C': 1, 'G': 2, 'T': 3}\n a = [] # array of alleles\n for colname in ['A1_ref', 'A2_ref', 'A1_x', 'A2_x', 'A1_y', 'A2_y']:\n tmp = np.empty(len(df[colname]), dtype=int)\n for k, v in d.items():\n tmp[np.array(df[colname]) == k] = v\n a.append(tmp)\n matched_alleles_x = (((a[0] == a[2]) & (a[1] == a[3])) |\n ((a[0] == 3 - a[2]) & (a[1] == 3 - a[3])))\n reversed_alleles_x = (((a[0] == a[3]) & (a[1] == a[2])) |\n ((a[0] == 3 - a[3]) & (a[1] == 3 - a[2])))\n matched_alleles_y = (((a[0] == a[4]) & (a[1] == a[5])) |\n ((a[0] == 3 - a[4]) & (a[1] == 3 - a[5])))\n reversed_alleles_y = (((a[0] == a[5]) & (a[1] == a[4])) |\n ((a[0] == 3 - a[5]) & (a[1] == 3 - a[4])))\n df['Z_x'] *= -2 * reversed_alleles_x + 1\n df['Z_y'] *= -2 * reversed_alleles_y + 1\n df.where(pd.Series(((matched_alleles_x|reversed_alleles_x)&(matched_alleles_y|reversed_alleles_y))), inplace=True)\n df.dropna(inplace=True)", "def test_gleckler_index(self):\n\n # generate sample data\n # sample data\n tmp = np.zeros((5, 3, 1))\n tmp[:,0,0] = np.ones(5)*1.\n tmp[:,1,0] = np.ones(5)*2.\n tmp[:,2,0] = np.ones(5)*5.\n\n # The data is like ...\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n #| 1 | 2 | 5 |\n\n x = self.D.copy()\n x._temporal_subsetting(0, 4)\n\n x.data = np.ma.array(tmp, mask=tmp!=tmp)\n x.std = np.ones(x.data.shape)\n x.time[0] = pl.datestr2num('2000-02-15')\n x.time[1] = pl.datestr2num('2000-03-15')\n x.time[2] = pl.datestr2num('2000-04-15')\n x.time[3] = pl.datestr2num('2000-05-15')\n x.time[4] = pl.datestr2num('2000-06-15')\n\n y = self.D.copy()\n y._temporal_subsetting(0, 4)\n tmp = np.ones(x.data.shape) # sample data 2\n y.data = np.ma.array(tmp, mask=tmp!=tmp)\n y.time[0] = pl.datestr2num('2000-02-15')\n y.time[1] = pl.datestr2num('2000-03-15')\n y.time[2] = pl.datestr2num('2000-04-15')\n y.time[3] = pl.datestr2num('2000-05-15')\n y.time[4] = pl.datestr2num('2000-06-15')\n\n # Case 1: same area weights\n # cell area\n tmp = np.ones((3, 1))\n x.cell_area = tmp*1.\n\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #| 1-1 | 2-1 | 5-1 |\n #===================\n #| 0 | 5 | 5*4**2=5*16. = 80 |\n #==> E2 = sqrt(85./(15.))\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt(((85./15.) * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b')\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt(((85./15.) * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n\n\n # Case 2: Different area weights\n # cell area\n tmp = np.ones((3, 1))\n tmp[1, 0] = 2.\n x.cell_area = tmp*1.\n\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #| 1-1=0 | 2-1=1 | 5-1=16 |\n #--------------------------\n # w = 0.25 w = 0.5 w=0.25|\n #--------------------------\n\n # 0.25*0 + 0.5 * 1 + 0.25 * 16 = 0 + 0.5 + 4 = 4.5\n # the mean of that is 4.5 for each timestep\n # mean because the overall weights are calculated as such that\n # they give a total weight if 1\n\n # diagnostic\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt((4.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt((4.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n # Case 3: use different std\n x.std = np.ones(x.data.shape)\n x.std[:, 2, 0] = 0.5\n\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #| 1-1=0 | 2-1=1 | 5-1=16 / 0.5 |\n #--------------------------------\n # w = 0.25 w = 0.5 w=0.25|\n # 0 + 0.5 + 0.25*32 = 0.5 + 8 = 8.5\n\n D = GlecklerPlot()\n r = D.calc_index(x, y, 'a', 'b', time_weighting=False)\n\n wt = np.ones(5) / 5.\n ref = np.sqrt((8.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error\n\n wt = np.asarray([29., 31., 30., 31., 30.])\n wt = wt / wt.sum()\n ref = np.sqrt((8.5 * wt).sum())\n t = np.abs(1. - r / ref)\n self.assertLess(t, 0.000001) # relative error", "def Group(self) -> _n_5_t_0:", "def Group(self) -> _n_5_t_0:", "def countMatches(g1, g2):\n if g1 is None or g2 is None or len(g1) == 0 or len(g1[0]) == 0: # sanity check\n return 0\n count = 0\n for i in range(len(g1)):\n for j in range(len(g1[0])):\n if g1[i][j] == g2[i][j] == 1 and search_grid(g1, g2, i, j):\n count = count + 1\n return count", "def stippled_countless2d(data):\n sections = []\n\n # This loop splits the 2D array apart into four arrays that are\n # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),\n # and (1,1) representing the A, B, C, and D positions from Figure 1.\n factor = (2, 2)\n for offset in np.ndindex(factor):\n part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]\n sections.append(part)\n\n a, b, c, d = sections\n\n ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization\n ab_ac |= b * (b == c) # PICK(B,C)\n nonzero = a + (a == 0) * (b + (b == 0) * c)\n return ab_ac + (ab_ac == 0) * (d +\n (d == 0) * nonzero) # AB || AC || BC || D", "def update_cnt_map(self,s):\r\n cnts = []\r\n num_grid = self.cnt_map.shape[0]*self.cnt_map.shape[1]\r\n old_coverage =num_grid- self.cnt_map.flatten().tolist().count(0)\r\n for sj in s:\r\n grid_s = self.get_gridState(sj)\r\n self.cnt_map[grid_s[0], grid_s[1]] += 1\r\n cnts.append(self.cnt_map[grid_s[0], grid_s[1]])\r\n\r\n self.map_coverage = num_grid - self.cnt_map.flatten().tolist().count(0)\r\n print(\"Coverage:\",self.map_coverage)\r\n print(\"Change of coverage:\",self.map_coverage-old_coverage)\r\n\r\n return cnts", "def calc_asymmetric_unit_cell_indexes(n_abc, full_symm_elems):\n n_a, n_b, n_c = n_abc[0], n_abc[1], n_abc[2]\n\n point_index = numpy.stack(numpy.meshgrid(\n numpy.arange(n_a), numpy.arange(n_b), numpy.arange(n_c),\n indexing=\"ij\"), axis=0)\n point_index = point_index.reshape(point_index.shape[0], numpy.prod(point_index.shape[1:]))\n \n elem_r = full_symm_elems[4:13]\n elem_b = full_symm_elems[:4]\n\n r_ind = calc_m_v(\n numpy.expand_dims(elem_r, axis=1),\n numpy.expand_dims(point_index, axis=2), flag_m=False, flag_v=False)[0]\n\n div, mod = numpy.divmod(numpy.expand_dims(n_abc, axis=1), numpy.expand_dims(elem_b[3], axis=0))\n if not(numpy.all(mod == 0)):\n raise KeyError(\"Symmetry elements do not match with number of points\")\n point_index_s = numpy.mod(r_ind + numpy.expand_dims(div * elem_b[:3], axis=1),\n numpy.expand_dims(numpy.expand_dims(n_abc, axis=1), axis=2))\n value_index_s = n_c*n_b*point_index_s[0] + n_c*point_index_s[1] + point_index_s[2]\n value_index_s_sorted = numpy.sort(value_index_s, axis=1)\n\n a, ind_a_u_c, counts_a_u_c = numpy.unique(\n value_index_s_sorted[:, 0], return_index=True, return_counts=True)\n\n point_index_s_a_u_c = point_index[:, ind_a_u_c]\n\n return point_index_s_a_u_c, counts_a_u_c", "def _series_merging_map(self, map_list, feature_option=\"sift\"):\n print(\" --- Start ---\")\n # Transform state into 3 specified values\n for i in range(len(map_list)):\n map_list[i] = cv2.cvtColor(map_list[i], cv2.COLOR_RGB2GRAY)\n map_list[i] = MF._transform_state(map_list[i])\n \n\n map_ref = map_list[0]\n for i in range(len(map_list)-1):\n map_align = map_list[i+1]\n\n \n if feature_option == \"orb\":\n orb = cv2.ORB_create()\n key_points_1, descriptor_1 = orb.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = orb.detectAndCompute(map_align, None)\n \n elif feature_option == \"surf\":\n surf = cv2.xfeatures2d.SURF_create(400)\n key_points_1, descriptor_1 = surf.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = surf.detectAndCompute(map_align, None)\n else:\n siftDetector = cv2.xfeatures2d.SIFT_create()\n key_points_1, descriptor_1 = siftDetector.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = siftDetector.detectAndCompute(map_align, None)\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(descriptor_1, descriptor_2, k=2)\n\n good = []\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good.append(m)\n \n pts_1, pts_2 = [], []\n for i in good:\n query_idx = i.queryIdx\n train_idx = i.trainIdx\n\n pts_1.append([\n key_points_1[query_idx].pt[0],\n key_points_1[query_idx].pt[1],\n ])\n pts_2.append([\n key_points_2[train_idx].pt[0],\n key_points_2[train_idx].pt[1],\n ])\n \n pts1 = np.array(pts_1)\n pts2 = np.array(pts_2)\n\n # relation, value, _ = RMM._ransac_find_rotation_translation(pts_set_1=pts2, pts_set_2=pts1, sigma=0.5, max_iter=5000)\n # print(\"- Inlier Percent: %f\"%value)\n # # Because the coordinates between the maps and the SIFT features are different:\n # # SIFT Features: Right: +x, Down: +y\n # # Maps: Down: +x, Right: +y\n # # Hence the dx and dy should be changed.\n # dx = relation[1]\n # dy = relation[0]\n # dyaw = relation[2]\n # print(\"- (x, y, t): (%f, %f, %f)\"%(dx,dy,dyaw))\n\n # # index, agr, dis = RMM._similarity_index(x=[dy, dx, dyaw], map1=map_ref, map2=map_align)\n # # print(\"Similarity Index: %f\\nAgree Number: %f\\nDisargee Number: %f\"%(index, agr, dis))\n # index, agr, dis, _ = RMM._similarity_index_2(x=[dx, dy, dyaw], map1=map_ref, map2=map_align)\n # print(\"- Similarity Index: %f\\n- Agree Number: %f\\n- Disargee Number: %f\"%(index, agr, dis))\n \n # map_merged = MF._merging_map(dx=dx, dy=dy, dtheta=dyaw, map1=map_ref, map2=map_align)\n # map_ref = map_merged.astype(np.uint8)\n # map_ref = MF._modify_map_size(merged_map=map_ref)\n\n relation, value, _ = RANSAC_Map_Merging()._ransac_find_all(pts_set_1=pts2, pts_set_2=pts1, sigma=5, max_iter=2000)\n dx = relation[1]\n dy = relation[0]\n dyaw = relation[2]\n dr = relation[3]\n print(\"- Inlier Percent: %f\"%value)\n print(\"- (dx, dy, dyaw, dr) = %f, %f, %f, %f\"%(dx,dy,dyaw, dr))\n map_merged = MAP_Function()._merging_map_ratio(dx=dx, dy=dy, dtheta=dyaw, dr=dr, map1=map_ref, map2=map_align)\n map_ref = map_merged.astype(np.uint8)\n map_ref = MF._modify_map_size(merged_map=map_ref)\n\n # return map_ref, (dx, dy, dyaw)\n return map_ref, (dx, dy, dyaw, dr)", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def genumerate(projection, right_most_path, dfs_codes, min_label, db, mapper):\n\tpm_backward = {}\n\tpm_forward = {}\n\tp_graph = projection_to_graph(dfs_codes, mapper)\n\n\tfor p in projection:\t# holds number of graphs of this pattern f_gk = [0 0 1] |gk|\n\t\th = history()\n\t\th.build(p)\n\t\tpm_backward = get_backward(p, right_most_path, h, pm_backward, \n\t\t\t\t\t\tdfs_codes, db)\n\t\tpm_forward = get_first_forward(p, right_most_path, h, pm_forward,\n\t\t\t\t\t\tdfs_codes, db, min_label)\n\t\tpm_forward = get_other_forward(p, right_most_path, h, pm_forward,\n\t\t\t\t\t\tdfs_codes, db, min_label)\n\treturn pm_backward, pm_forward", "def kmer_frequencies(kmertable_all, kmertable_filtered, kmertable_nonDT_hi, kmertable_nonDT_lo, data_mm, codon_seqs):\n\n def codon_bgfreq(codon_seqs, data_mm):\n \"\"\"\n get codon background frequencies from mRNA seqs\n seqs: dictionary of yeast mRNA sequences\n data_mc: dictionary of multi-mapping boolean\n \"\"\"\n codon_counts = np.zeros(( len(codons_nonstop) ))\n list_orfs = list( data_mm.keys() )\n\n for ix, orf in enumerate(list_orfs):\n current_seq = codon_seqs[orf]\n current_mm = data_mm[orf]\n\n for pos in range( len(current_mm) ):\n if current_mm[pos] and current_seq[pos] in codons_nonstop:\n current_index = codons_nonstop.index(current_seq[pos])\n codon_counts[current_index] += 1\n codon_counts = np.around( codon_counts / np.sum(codon_counts), 5)\n\n return codon_counts\n\n\n def codonfreqs_kmerdf(kmertable):\n \"\"\"\n get codon frequencies from kmertable\n \"\"\" \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)\n\n #kmertable_threshold = kmertable_all[kmertable_all['threshold']==1]\n kmertable_all2 = kmertable_all[kmertable_all['threshold']==0]\n\n\n cc_bg = codon_bgfreq(codon_seqs, data_mm)\n cc_all = codonfreqs_kmerdf(kmertable_all2)\t\t\t# without hits\n cc_theta = codonfreqs_kmerdf(kmertable_filtered)\n cc_nDT_hi = codonfreqs_kmerdf(kmertable_nonDT_hi) # min 16 max 4 at 1090\n cc_nDT_lo = codonfreqs_kmerdf(kmertable_nonDT_lo) # min 16 max 4 at 1090\n\n output = pd.DataFrame({'codon': list(codons_nonstop), \n 'kmer_theta': list(cc_theta), \n 'redundant': list(cc_all), \n 'background': list(cc_bg),\n 'nDThi': list(cc_nDT_hi),\n 'nDTlo': list(cc_nDT_lo) } ) \n output.to_csv(\"../data/figures/figure3/kmer_frequencies.txt\", header=True, index=False, sep='\\t')\n\n return output", "def build_amg_index_sets(L_sizes):\n neqns = L_sizes[0][0]\n velocityDOF=[]\n for start in range(1,3):\n velocityDOF.append(np.arange(start=start,\n stop=1+neqns,\n step=3,\n dtype='i'))\n velocityDOF_full=np.vstack(velocityDOF).transpose().flatten()\n velocity_u_DOF = []\n velocity_u_DOF.append(np.arange(start=0,\n stop=2*neqns//3,\n step=2,\n dtype='i'))\n velocity_u_DOF_full = np.vstack(velocity_u_DOF).transpose().flatten()\n velocity_v_DOF = []\n velocity_v_DOF.append(np.arange(start=1,\n stop=1+2*neqns//3,\n step=2,\n dtype='i'))\n velocity_v_DOF_full = np.vstack(velocity_v_DOF).transpose().flatten()\n isvelocity = PETSc.IS()\n isvelocity.createGeneral(velocityDOF_full)\n isu = PETSc.IS()\n isu.createGeneral(velocity_u_DOF_full)\n isv = PETSc.IS()\n isv.createGeneral(velocity_v_DOF_full)\n return [isvelocity, isu, isv]", "def test_4d_two_index_freq():\n fmask = \"common_data/4d_pipe/ft_2index/test%02d%03d.ft4\"\n dic,data = ng.pipe.read_lowmem(fmask)\n\n fname = \"common_data/4d_pipe/ft_2index/test04005.ft4\"\n sdic,sdata = ng.pipe.read(fname)\n\n assert data.shape == (8, 16, 16, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2,3],2) == -2703.98\n assert round(data[5,9,11,891],2) == 5212.07\n check_ppm_limits(dic,data,0,[321.03, -65.77])\n check_ppm_limits(dic,data,1,[321.03, -93.40])\n check_ppm_limits(dic,data,2,[232.62, -16.04])\n check_ppm_limits(dic,data,3,[298.92, -98.83])\n \n # check the slice\n assert sdata.shape == (16, 4096)\n assert sdata.dtype == 'float32'\n assert round(sdata[1,2],2) == 602.70\n assert round(sdata[12,900],2) == 2717.60\n check_ppm_limits(sdic,sdata,0,[232.62, -16.04])\n check_ppm_limits(sdic,sdata,1,[298.92, -98.83])\n\n # slice/data matching\n assert_array_equal(data[3,4],sdata)\n\n lowmem_write_readback_4D(dic,data)", "def _get_group_index(self, index):\n\n g_index = 0\n for group in self.groups:\n if group[0] == index:\n g_index = group[1]\n break\n return g_index", "def test_4_2_5D_rec_splits(self):\n check = [(0.3, -3.9, -1.5, -3.0, -9.5), (1.0, 11.8, -1.1, 5.0, 11000.5),\n (1.0, -3.9, -1.5, -3.0, -9.5), (1.0, 11.8, -1.5, -3.0, -9.5),\n (1.0, 11.8, -1.1, -3.0, -9.5), (1.0, 11.8, -1.1, 5.0, -9.5),\n (1.0, 11.8, -1.1, -3.0, 11000.5), (1.0, 11.8, -1.5, 5.0, -9.5),\n (1.0, 11.8, -1.5, 5.0, 11000.5),\n (1.0, 11.8, -1.5, -3.0, 11000.5),\n (1.0, -3.9, -1.1, -3.0, -9.5), (1.0, -3.9, -1.1, 5.0, -9.5),\n (1.0, -3.9, -1.1, 5.0, 11000.5),\n (1.0, -3.9, -1.1, -3.0, 11000.5), (1.0, -3.9, -1.5, 5.0, -9.5),\n (1.0, -3.9, -1.5, 5.0, 11000.5),\n (1.0, -3.9, -1.5, -3.0, 11000.5),\n (0.3, 11.8, -1.5, -3.0, -9.5), (0.3, 11.8, -1.1, -3.0, -9.5),\n (0.3, 11.8, -1.1, 5.0, -9.5), (0.3, 11.8, -1.1, 5.0, 11000.5),\n (0.3, 11.8, -1.1, -3.0, 11000.5), (0.3, 11.8, -1.5, 5.0, -9.5),\n (0.3, 11.8, -1.5, 5.0, 11000.5),\n (0.3, 11.8, -1.5, -3.0, 11000.5),\n (0.3, -3.9, -1.1, -3.0, -9.5), (0.3, -3.9, -1.1, 5.0, -9.5),\n (0.3, -3.9, -1.1, 5.0, 11000.5),\n (0.3, -3.9, -1.1, -3.0, 11000.5), (0.3, -3.9, -1.5, 5.0, -9.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, -3.9, -1.5, -3.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5), (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, -3.0, 5495.5), (0.3, -3.9, -1.5, 1.0, -9.5),\n (0.3, -3.9, -1.3, -3.0, 5495.5), (0.3, -3.9, -1.3, -3.0, -9.5),\n (0.3, -3.9, -1.3, 1.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.3, 3.95, -1.5, -3.0, 5495.5),\n (0.3, 3.95, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, 5495.5),\n (0.3, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.3, 1.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5),\n (0.65, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.5, -3.0, -9.5),\n (0.65, -3.9, -1.5, 1.0, -9.5),\n (0.65, -3.9, -1.3, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, -9.5),\n (0.65, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.3, -3.0, -9.5),\n (0.65, 3.95, -1.3, 1.0, -9.5),\n (0.475, 0.025000000000000133, -1.4, -1.0, 2743.0),\n (1.0, 3.95, -1.3, 1.0, 5495.5),\n (1.0, 11.8, -1.3, 1.0, 5495.5), (1.0, 11.8, -1.1, 1.0, 5495.5),\n (1.0, 11.8, -1.1, 5.0, 5495.5),\n (1.0, 11.8, -1.1, 1.0, 11000.5),\n (1.0, 11.8, -1.3, 5.0, 5495.5),\n (1.0, 11.8, -1.3, 5.0, 11000.5),\n (1.0, 11.8, -1.3, 1.0, 11000.5),\n (1.0, 3.95, -1.1, 1.0, 5495.5),\n (1.0, 3.95, -1.1, 5.0, 5495.5),\n (1.0, 3.95, -1.1, 5.0, 11000.5),\n (1.0, 3.95, -1.1, 1.0, 11000.5),\n (1.0, 3.95, -1.3, 5.0, 5495.5),\n (1.0, 3.95, -1.3, 5.0, 11000.5),\n (1.0, 3.95, -1.3, 1.0, 11000.5),\n (0.65, 11.8, -1.3, 1.0, 5495.5),\n (0.65, 11.8, -1.1, 1.0, 5495.5),\n (0.65, 11.8, -1.1, 5.0, 5495.5),\n (0.65, 11.8, -1.1, 5.0, 11000.5),\n (0.65, 11.8, -1.1, 1.0, 11000.5),\n (0.65, 11.8, -1.3, 5.0, 5495.5),\n (0.65, 11.8, -1.3, 5.0, 11000.5),\n (0.65, 11.8, -1.3, 1.0, 11000.5),\n (0.65, 3.95, -1.1, 1.0, 5495.5),\n (0.65, 3.95, -1.1, 5.0, 5495.5),\n (0.65, 3.95, -1.1, 5.0, 11000.5),\n (0.65, 3.95, -1.1, 1.0, 11000.5),\n (0.65, 3.95, -1.3, 5.0, 5495.5),\n (0.65, 3.95, -1.3, 5.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.825, 7.875, -1.2000000000000002, 3.0, 8248.0),\n (1.0, -3.9, -1.3, 1.0, 5495.5), (1.0, -3.9, -1.5, 1.0, 5495.5),\n (1.0, -3.9, -1.5, -3.0, 5495.5), (1.0, -3.9, -1.5, 1.0, -9.5),\n (1.0, -3.9, -1.3, -3.0, 5495.5), (1.0, -3.9, -1.3, -3.0, -9.5),\n (1.0, -3.9, -1.3, 1.0, -9.5),\n (1.0, 3.95, -1.5, 1.0, 5495.5),\n (1.0, 3.95, -1.5, -3.0, 5495.5),\n (1.0, 3.95, -1.5, -3.0, -9.5),\n (1.0, 3.95, -1.5, 1.0, -9.5),\n (1.0, 3.95, -1.3, -3.0, 5495.5),\n (1.0, 3.95, -1.3, -3.0, -9.5),\n (1.0, 3.95, -1.3, 1.0, -9.5),\n (0.825, 0.025000000000000133, -1.4, -1.0, 2743.0),\n (1.0, 11.8, -1.5, 1.0, 5495.5),\n (1.0, 11.8, -1.5, -3.0, 5495.5), (1.0, 11.8, -1.5, 1.0, -9.5),\n (1.0, 11.8, -1.3, -3.0, 5495.5), (1.0, 11.8, -1.3, -3.0, -9.5),\n (1.0, 11.8, -1.3, 1.0, -9.5),\n (0.65, 11.8, -1.5, 1.0, 5495.5),\n (0.65, 11.8, -1.5, -3.0, 5495.5),\n (0.65, 11.8, -1.5, -3.0, -9.5),\n (0.65, 11.8, -1.5, 1.0, -9.5),\n (0.65, 11.8, -1.3, -3.0, 5495.5),\n (0.65, 11.8, -1.3, -3.0, -9.5),\n (0.65, 11.8, -1.3, 1.0, -9.5),\n (0.825, 7.875, -1.4, -1.0, 2743.0),\n (1.0, 11.8, -1.1, -3.0, 5495.5), (1.0, 11.8, -1.1, 1.0, -9.5),\n (1.0, 3.95, -1.1, -3.0, 5495.5),\n (1.0, 3.95, -1.1, -3.0, -9.5),\n (1.0, 3.95, -1.1, 1.0, -9.5),\n (0.65, 11.8, -1.1, -3.0, 5495.5),\n (0.65, 11.8, -1.1, -3.0, -9.5),\n (0.65, 11.8, -1.1, 1.0, -9.5),\n (0.65, 3.95, -1.1, -3.0, 5495.5),\n (0.65, 3.95, -1.1, -3.0, -9.5),\n (0.65, 3.95, -1.1, 1.0, -9.5),\n (0.825, 7.875, -1.2000000000000002, -1.0, 2743.0),\n (1.0, 11.8, -1.3, 5.0, -9.5),\n (1.0, 3.95, -1.1, 5.0, -9.5),\n (1.0, 3.95, -1.3, 5.0, -9.5),\n (0.65, 11.8, -1.1, 5.0, -9.5),\n (0.65, 11.8, -1.3, 5.0, -9.5),\n (0.65, 3.95, -1.1, 5.0, -9.5),\n (0.65, 3.95, -1.3, 5.0, -9.5),\n (0.825, 7.875, -1.2000000000000002, 3.0, 2743.0),\n (1.0, 11.8, -1.3, -3.0, 11000.5),\n (1.0, 3.95, -1.1, -3.0, 11000.5),\n (1.0, 3.95, -1.3, -3.0, 11000.5),\n (0.65, 11.8, -1.1, -3.0, 11000.5),\n (0.65, 11.8, -1.3, -3.0, 11000.5),\n (0.65, 3.95, -1.1, -3.0, 11000.5),\n (0.65, 3.95, -1.3, -3.0, 11000.5),\n (0.825, 7.875, -1.2000000000000002, -1.0, 8248.0),\n (1.0, 11.8, -1.5, 5.0, 5495.5),\n (1.0, 3.95, -1.5, 5.0, 5495.5),\n (1.0, 3.95, -1.5, 5.0, -9.5),\n (0.65, 11.8, -1.5, 5.0, 5495.5),\n (0.65, 11.8, -1.5, 5.0, -9.5),\n (0.65, 3.95, -1.5, 5.0, 5495.5),\n (0.65, 3.95, -1.5, 5.0, -9.5),\n (0.825, 7.875, -1.4, 3.0, 2743.0),\n (1.0, 11.8, -1.5, 1.0, 11000.5),\n (1.0, 3.95, -1.5, 5.0, 11000.5),\n (1.0, 3.95, -1.5, 1.0, 11000.5),\n (0.65, 11.8, -1.5, 5.0, 11000.5),\n (0.65, 11.8, -1.5, 1.0, 11000.5),\n (0.65, 3.95, -1.5, 5.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.825, 7.875, -1.4, 3.0, 8248.0),\n (1.0, 3.95, -1.5, -3.0, 11000.5),\n (0.65, 11.8, -1.5, -3.0, 11000.5),\n (0.65, 3.95, -1.5, -3.0, 11000.5),\n (0.825, 7.875, -1.4, -1.0, 8248.0),\n (1.0, -3.9, -1.1, 1.0, 5495.5),\n (1.0, -3.9, -1.1, -3.0, 5495.5), (1.0, -3.9, -1.1, 1.0, -9.5),\n (0.65, -3.9, -1.1, 1.0, 5495.5),\n (0.65, -3.9, -1.1, -3.0, 5495.5),\n (0.65, -3.9, -1.1, -3.0, -9.5),\n (0.65, -3.9, -1.1, 1.0, -9.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, -1.0,\n 2743.0), (1.0, -3.9, -1.1, 5.0, 5495.5),\n (1.0, -3.9, -1.3, 5.0, 5495.5), (1.0, -3.9, -1.3, 5.0, -9.5),\n (0.65, -3.9, -1.1, 5.0, 5495.5),\n (0.65, -3.9, -1.1, 5.0, -9.5),\n (0.65, -3.9, -1.3, 5.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, -9.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, 3.0, 2743.0),\n (1.0, -3.9, -1.1, 1.0, 11000.5),\n (1.0, -3.9, -1.3, 5.0, 11000.5),\n (1.0, -3.9, -1.3, 1.0, 11000.5),\n (0.65, -3.9, -1.1, 5.0, 11000.5),\n (0.65, -3.9, -1.1, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, 3.0, 8248.0),\n (1.0, -3.9, -1.3, -3.0, 11000.5),\n (0.65, -3.9, -1.1, -3.0, 11000.5),\n (0.65, -3.9, -1.3, -3.0, 11000.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, -1.0,\n 8248.0), (1.0, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.5, 5.0, -9.5),\n (0.825, 0.025000000000000133, -1.4, 3.0, 2743.0),\n (1.0, -3.9, -1.5, 1.0, 11000.5),\n (0.65, -3.9, -1.5, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.825, 0.025000000000000133, -1.4, 3.0, 8248.0),\n (0.65, -3.9, -1.5, -3.0, 11000.5),\n (0.825, 0.025000000000000133, -1.4, -1.0, 8248.0),\n (0.3, 11.8, -1.3, 1.0, 5495.5), (0.3, 11.8, -1.5, 1.0, 5495.5),\n (0.3, 11.8, -1.5, -3.0, 5495.5), (0.3, 11.8, -1.5, 1.0, -9.5),\n (0.3, 11.8, -1.3, -3.0, 5495.5), (0.3, 11.8, -1.3, -3.0, -9.5),\n (0.3, 11.8, -1.3, 1.0, -9.5),\n (0.475, 7.875, -1.4, -1.0, 2743.0),\n (0.3, 11.8, -1.1, 1.0, 5495.5),\n (0.3, 11.8, -1.1, -3.0, 5495.5), (0.3, 11.8, -1.1, 1.0, -9.5),\n (0.3, 3.95, -1.1, 1.0, 5495.5),\n (0.3, 3.95, -1.1, -3.0, 5495.5),\n (0.3, 3.95, -1.1, -3.0, -9.5),\n (0.3, 3.95, -1.1, 1.0, -9.5),\n (0.475, 7.875, -1.2000000000000002, -1.0, 2743.0),\n (0.3, 11.8, -1.1, 5.0, 5495.5), (0.3, 11.8, -1.3, 5.0, 5495.5),\n (0.3, 11.8, -1.3, 5.0, -9.5),\n (0.3, 3.95, -1.1, 5.0, 5495.5),\n (0.3, 3.95, -1.1, 5.0, -9.5),\n (0.3, 3.95, -1.3, 5.0, 5495.5),\n (0.3, 3.95, -1.3, 5.0, -9.5),\n (0.475, 7.875, -1.2000000000000002, 3.0, 2743.0),\n (0.3, 11.8, -1.1, 1.0, 11000.5),\n (0.3, 11.8, -1.3, 5.0, 11000.5),\n (0.3, 11.8, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.1, 5.0, 11000.5),\n (0.3, 3.95, -1.1, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 5.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5),\n (0.475, 7.875, -1.2000000000000002, 3.0, 8248.0),\n (0.3, 11.8, -1.3, -3.0, 11000.5),\n (0.3, 3.95, -1.1, -3.0, 11000.5),\n (0.3, 3.95, -1.3, -3.0, 11000.5),\n (0.475, 7.875, -1.2000000000000002, -1.0, 8248.0),\n (0.3, 11.8, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.5, 5.0, -9.5),\n (0.475, 7.875, -1.4, 3.0, 2743.0),\n (0.3, 11.8, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.475, 7.875, -1.4, 3.0, 8248.0),\n (0.3, 3.95, -1.5, -3.0, 11000.5),\n (0.475, 7.875, -1.4, -1.0, 8248.0),\n (0.3, -3.9, -1.1, 1.0, 5495.5),\n (0.3, -3.9, -1.1, -3.0, 5495.5), (0.3, -3.9, -1.1, 1.0, -9.5),\n (0.475, 0.025000000000000133, -1.2000000000000002, -1.0,\n 2743.0), (0.3, -3.9, -1.1, 5.0, 5495.5),\n (0.3, -3.9, -1.3, 5.0, 5495.5), (0.3, -3.9, -1.3, 5.0, -9.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, 3.0, 2743.0),\n (0.3, -3.9, -1.1, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 5.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, 3.0, 8248.0),\n (0.3, -3.9, -1.3, -3.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, -1.0,\n 8248.0), (0.3, -3.9, -1.5, 5.0, 5495.5),\n (0.475, 0.025000000000000133, -1.4, 3.0, 2743.0),\n (0.3, -3.9, -1.5, 1.0, 11000.5),\n (0.475, 0.025000000000000133, -1.4, 3.0, 8248.0),\n (0.475, 0.025000000000000133, -1.4, -1.0, 8248.0)]\n nn_checks = {\n (0.3, -3.9, -1.5, -3.0, -9.5): [(0.3, -3.9, -1.5, 1.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 1.0, -9.5),\n (0.65, -3.9, -1.5, 1.0, -9.5),\n (0.3, -3.9, -1.3, 1.0, -9.5),\n (0.3, 3.95, -1.5, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.5, -3.0, -9.5),\n (0.65, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, 5495.5),\n (0.3, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, 5495.5),\n (0.3, -3.9, -1.3, -3.0, 5495.5),\n (0.3, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5), (\n 0.475, 0.025000000000000133, -1.4,\n -1.0, 2743.0),\n (0.3, 3.95, -1.3, 1.0, -9.5)],\n (0.3, -3.9, -1.5, 1.0, 11000.5): [(0.3, -3.9, -1.5, -3.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.4,\n 3.0, 8248.0),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5),\n (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5), (\n 0.475, 0.025000000000000133, -1.4,\n -1.0, 8248.0),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5)],\n (0.475, 0.025000000000000133, -1.4, 3.0, 8248.0): [\n (0.65, 3.95, -1.5, 5.0, 11000.5),\n (0.65, 3.95, -1.3, 5.0, 11000.5),\n (0.3, -3.9, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5), (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5), (0.3, 3.95, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.3, 5.0, 5495.5), (0.65, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, 5495.5), (0.3, -3.9, -1.5, 5.0, 5495.5),\n (0.3, -3.9, -1.3, 5.0, 5495.5), (0.65, 3.95, -1.3, 5.0, 5495.5),\n (0.65, 3.95, -1.5, 5.0, 5495.5), (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 5.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 5.0, 11000.5)]}\n\n init_triangulation(5, 1, check, nn_checks,\n bounds=[(0.3, 1), (-3.9, 11.8), (-1.5, -1.1),\n (-3, 5), (-9.5, 11000.5)])", "def flagser_contain(adjacency_matrix):\n N=adjacency_matrix.shape[0]\n row,col=convertCOO(adjacency_matrix,ret_data=False)\n return compute_cell_count(N, np.transpose(np.array( (row,col))))", "def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds", "def test_splitting_on_feature(num = 0):\n left = [0,0]\n right = [0,0]\n for i, side in enumerate(train_matrix_df[num]):\n if int(side) == 0: \n if train_labels_df.ix[i]['candy_id'] == 0:\n left[0] += 1\n else:\n left[1] += 1\n else:\n if train_labels_df.ix[i]['candy_id'] == 0:\n right[0] += 1\n else:\n right[1] += 1\n\n #print \" %s goes to the %i side\" %(train_labels_df.ix[i]['candy'], side)\n\n print str(left) + \"<--left -- + -- right-->\" + str(right)", "def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")", "def group_brakedown(group_list):\r\n group_types = {}\r\n for group1 in group_list:\r\n group_types[(group1.size,group1.valence)] = 1 + group_types.get((group1.size,group1.valence), 0)\r\n return group_types", "def nb_data_bands(dataset):\n alphaband = dataset.GetRasterBand(1).GetMaskBand()\n if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or\n dataset.RasterCount == 4 or\n dataset.RasterCount == 2):\n return dataset.RasterCount - 1\n else:\n return dataset.RasterCount", "def all_valid(self, tower) -> int:\r\n count = 0\r\n for layer in range(1, len(tower.tower)):\r\n for index in range(1, 4):\r\n if self.is_valid(layer, index, tower):\r\n count += 1\r\n \r\n return count", "def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count", "def compatibility_g_a(gen, anot):\n print(\"Checking compatibility of genome with annotation file\")\n r_code = 0\n for seq in gen:\n if seq not in anot:\n print(\"WARN\\t{} sequence not found in annotaion file\".format(seq))\n r_code = 1\n for seq in anot:\n if seq not in gen:\n print(\"FAIL\\t{} sequence in annotation \"\n \"but not in genome.\".format(seq))\n r_code = 2\n elif anot[seq] > gen[seq]:\n print(\"FAIL\\tannotation interval on {} sequence is out of \"\n \"reference range.\".format(seq))\n r_code = 2\n print()\n return r_code", "def eafindicators(npsA, npsB):\n # calcular os indicadores com o eaf conjunto\n lt, ind = libaft.eaf2d(npsA + npsB, ind=True)\n # espalmar lista, ou seja,\n # (m listas) * (n pontos) * (b bits) -> lista de (m * n pontos) * (b bits)\n flat_ind = [point for level in ind for point in level]\n return flat_ind", "def k_ary_support_count(itemset, tagnamesdict):\n X = itemset[0]\n x_list = tagnamesdict[X]\n inter = set(x_list)\n\n for i in range(1, len(itemset)):\n Y = itemset[i]\n y_list = tagnamesdict[Y]\n inter = inter.intersection(y_list)\n\n support_count = len(inter)\n return support_count", "def load_gred_dat(self, wannier_txt_file, index_word = \"WANNIER FUNCTIONS - LIST OF ACTIVE BANDS\", permutation = None):\n f = open(wannier_txt_file, \"r\")\n F = f.read()\n f.close()\n F = os.linesep.join([s for s in F.splitlines() if s]) #remove empty lines\n F = F.split(index_word)[1].split(\"WANNIER\")[0].split(\"G = \")\n \n bands = np.array([literal_eval(i) for i in F[0].split()])-1 # indexing begins at 0\n\n for i in np.arange(1,len(F[1:])+1):\n # Reading block index vector\n \n \n G = -1*np.array([literal_eval(j) for j in F[i].split(\")\")[0].split(\"(\")[1].split()])\n\n gmap = self.mapping[self._c2i(G)]\n \n # parse block\n \n B = F[i].split(\")\")[1]\n \n # read elements in block\n\n for line in B.split(\"\\n\")[1:]:\n # note : Crystal is column-major (fortran)\n row_list = [literal_eval(j) for j in line.split()]\n if len(row_list)!=0:\n if len(row_list)==1:\n # row_list contains index\n columns = np.array(row_list) -1\n else:\n if type(row_list[1]) is int:\n # line contains indices\n columns = np.array(row_list) -1\n \n else:\n # line contains elements\n row = row_list[0] - 1\n elements = np.array(row_list[1:]) \n \n self.blocks[ gmap ][row, columns + bands[0]] = elements #row and column \n return bands", "def numIslands3(self, grid: List[List[str]]) -> int:\n m = len(grid)\n if m > 0:\n n = len(grid[0])\n else:\n return 0\n\n def dfs(grid, i, j):\n if grid[i][j] != '0':\n grid[i][j] = '0'\n\n for direction in self.directions(grid, i, j):\n dfs(grid, direction[0], direction[1])\n\n island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n island += 1 # count the number of CCs\n dfs(grid, i, j)\n return island", "def build_index_groups(train):\n nz_row, nz_col = train.nonzero()\n nz_train = list(zip(nz_row, nz_col))\n\n grouped_nz_train_byrow = group_by(nz_train, index=0)\n nz_row_colindices = [(g, np.array([v[1] for v in value]))\n for g, value in grouped_nz_train_byrow]\n\n grouped_nz_train_bycol = group_by(nz_train, index=1)\n nz_col_rowindices = [(g, np.array([v[0] for v in value]))\n for g, value in grouped_nz_train_bycol]\n return nz_train, nz_row_colindices, nz_col_rowindices", "def test_faceDetect_Group1(self):\n group1_antall, group1_image = detectFace(group1)\n group1_expectedNumber = 7\n self.assertEqual(group1_antall, group1_expectedNumber)", "def test(indices_to_visit = None):\n ##0 Chicago\n ##1 New York City\n ##2 Los Angeles\n ##3 Minneapolis\n ##4 Denver\n ##5 Dallas\n ##6 Seattle\n ##7 Boston\n ##8 San Francisco\n ##9 St. Louis\n ##10 Houston\n ##11 Phoenix\n ##12 Salt Lake City\n ##13 Miami\n ##14 Atlanta\n ##15 Kansas City\n home_index = 15 # Kansas city\n # 15x15 matrix with main diagonal consisting of 0s and to which data is mirrored along\n # (values are derived from external resource and multiplied by 1000 for higher accuracy)\n matrix = np.array([[0.0, 1148413.3550047704, 2813453.6297408855, 572861.4368351421, 1483440.7452179305, 1296355.2188721865, 2801269.1215845253, 1370943.3069385102, 2996683.256068982, 422589.4697157836, 1515737.0196676727, 2343639.7107855356, 2031500.319603397, 1913900.3015914203, 946854.1020487415, 665894.0336505901],\n [1148413.3550047704, 0.0, 3949451.153672887, 1642119.4792808082, 2628946.6435325537, 2212019.1209020815, 3882177.952930788, 306997.0343229422, 4144977.810718553, 1408454.3261387087, 2286054.8575902223, 3455343.3108375454, 3179102.5335818897, 1754834.3710577146, 1202616.154562711, 1766599.1336905772],\n [2813453.6297408855, 3949451.153672887, 0.0, 2455296.3791196346, 1339227.410707824, 1998182.1420783552, 1545364.434045008, 4184394.186016967, 559978.4273194656, 2560790.9591738936, 2212581.51715849, 575975.8749662543, 933602.6426595236, 3767490.41517038, 3120118.850020503, 2186473.1552241463],\n [572861.4368351421, 1642119.4792808082, 2455296.3791196346, 0.0, 1127312.7583590776, 1390159.7734006236, 2249169.1308160927, 1811513.5290266906, 2554165.8167895717, 750916.7305340832, 1701189.1538312144, 2062079.2399570548, 1590460.9488364782, 2434801.332310659, 1462408.5353501518, 662752.1291133759],\n [1483440.7452179305, 2628946.6435325537, 1339227.410707824, 1127312.7583590776, 0.0, 1067257.7993323756, 1646308.7967673023, 2852307.4164419994, 1530510.2790658756, 1283707.511393525, 1414308.8805983758, 943721.1931707633, 598728.757362067, 2779561.192116527, 1952618.0544916363, 899656.1020173575],\n [1296355.2188721865, 2212019.1209020815, 1998182.1420783552, 1390159.7734006236, 1067257.7993323756, 0.0, 2709804.112590561, 2500314.4507069485, 2390841.4329337194, 882457.80942383, 361482.7025425731, 1427995.4150203674, 1610768.421819668, 1788903.6065106322, 1161480.3557326929, 730446.8613086065],\n [2801269.1215845253, 3882177.952930788, 1545364.434045008, 2249169.1308160927, 1646308.7967673023, 2709804.112590561, 0.0, 4018059.834330202, 1093104.7332788548, 2778905.575804111, 3046648.362755992, 1794989.6453295103, 1129464.5539648102, 4404737.747850686, 3516794.375197078, 2427457.036285458],\n [1370943.3069385102, 306997.0343229422, 4184394.186016967, 1811513.5290266906, 2852307.4164419994, 2500314.4507069485, 4018059.834330202, 0.0, 4350710.853063807, 1673216.4080939887, 2586942.3262796295, 3706392.097841614, 3382851.415271485, 2022974.6418062754, 1509585.60107986, 2015770.1390589625],\n [2996683.256068982, 4144977.810718553, 559978.4273194656, 2554165.8167895717, 1530510.2790658756, 2390841.4329337194, 1093104.7332788548, 4350710.853063807, 0.0, 2812916.3098878833, 2650547.941880299, 1053620.7288649315, 967859.8344376946, 4179636.203479384, 3448359.745690545, 2428862.4239271535],\n [422589.4697157836, 1408454.3261387087, 2560790.9591738936, 750916.7305340832, 1283707.511393525, 882457.80942383, 2778905.575804111, 1673216.4080939887, 2812916.3098878833, 0.0, 1093601.4408876144, 2050115.5214378452, 1872971.1741522516, 1708236.6189296674, 752855.8488125347, 384122.2000072272],\n [1515737.0196676727, 2286054.8575902223, 2212581.51715849, 1701189.1538312144, 1414308.8805983758, 361482.7025425731, 3046648.362755992, 2586942.3262796295, 2650547.941880299, 1093601.4408876144, 0.0, 1636770.4499809493, 1932616.2801687205, 1559260.024532222, 1130480.278513877, 1039856.4844335921],\n [2343639.7107855356, 3455343.3108375454, 575975.8749662543, 2062079.2399570548, 943721.1931707633, 1427995.4150203674, 1794989.6453295103, 3706392.097841614, 1053620.7288649315, 2050115.5214378452, 1636770.4499809493, 0.0, 812548.5062332726, 3191662.5092484164, 2564665.4531581327, 1690942.142157212],\n [2031500.319603397, 3179102.5335818897, 933602.6426595236, 1590460.9488364782, 598728.757362067, 1610768.421819668, 1129464.5539648102, 3382851.415271485, 967859.8344376946, 1872971.1741522516, 1932616.2801687205, 812548.5062332726, 0.0, 3364908.7076308434, 2551338.215149899, 1490589.7393085626],\n [1913900.3015914203, 1754834.3710577146, 3767490.41517038, 2434801.332310659, 2779561.192116527, 1788903.6065106322, 4404737.747850686, 2022974.6418062754, 4179636.203479384, 1708236.6189296674, 1559260.024532222, 3191662.5092484164, 3364908.7076308434, 0.0, 973244.7750437199, 2000112.4162614697],\n [946854.1020487415, 1202616.154562711, 3120118.850020503, 1462408.5353501518, 1952618.0544916363, 1161480.3557326929, 3516794.375197078, 1509585.60107986, 3448359.745690545, 752855.8488125347, 1130480.278513877, 2564665.4531581327, 2551338.215149899, 973244.7750437199, 0.0, 1089830.6426635552],\n [665894.0336505901, 1766599.1336905772, 2186473.1552241463, 662752.1291133759, 899656.1020173575, 730446.8613086065, 2427457.036285458, 2015770.1390589625, 2428862.4239271535, 384122.2000072272, 1039856.4844335921, 1690942.142157212, 1490589.7393085626, 2000112.4162614697, 1089830.6426635552, 0.0]])\n\n solver = FacilityOrderSolver(matrix, home_index)\n \n return solver.solve(indices_to_visit)", "def gain_range_indexes(self, grinds, smask):\n if grinds is not None: return grinds\n if smask.ndim<4: return None\n ngranges = smask.shape[0]\n if ngranges>5: ngranges=5 # protection for epix10ka which have 7 gain ranges, but two of them - evaluated\n return tuple(range(ngranges))" ]
[ "0.5661988", "0.5445959", "0.5352282", "0.52611715", "0.52502793", "0.5246728", "0.52346057", "0.52059287", "0.5193185", "0.5147294", "0.5127264", "0.5097809", "0.5094135", "0.5081257", "0.50290185", "0.49999043", "0.49905908", "0.49850026", "0.49826434", "0.49777365", "0.49676508", "0.49670532", "0.49543184", "0.4945511", "0.49411052", "0.49334717", "0.4931088", "0.4924623", "0.49231812", "0.492045", "0.49191618", "0.4917945", "0.49053434", "0.49044743", "0.4899347", "0.48979753", "0.4896724", "0.4895164", "0.48901284", "0.48889154", "0.48770997", "0.4856392", "0.48423886", "0.48404756", "0.48399782", "0.4838614", "0.48368487", "0.48342425", "0.4831339", "0.48254535", "0.48135948", "0.48029932", "0.4799826", "0.47967863", "0.47909856", "0.47906128", "0.47900185", "0.47741297", "0.47716245", "0.47676316", "0.47581157", "0.47571316", "0.475598", "0.47547647", "0.47529653", "0.4750091", "0.47461906", "0.47399017", "0.47380167", "0.47375947", "0.47375947", "0.47297075", "0.47278354", "0.472664", "0.47226793", "0.4717438", "0.47168332", "0.47149634", "0.47117656", "0.47089857", "0.4708081", "0.47063848", "0.4697397", "0.46925515", "0.46918234", "0.4690312", "0.4689709", "0.46888882", "0.46868595", "0.46862823", "0.46861285", "0.46851376", "0.46835828", "0.46829644", "0.46822444", "0.46795988", "0.4678766", "0.4673875", "0.467155", "0.46681315" ]
0.49147612
32
Returns a nested element label list.
def element_labels(hosts, *loci): num_loci = int(len(loci) / 2) # number of loci elements = [] for i in hosts: for j in range(num_loci): locus_abbrev, allele_range = loci[j*2:(j+1)*2] elements.append( ["%s%s%d" % (i,locus_abbrev,k) for k in allele_range] ) return elements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_labels(self, depth=None):\n labels = libCopy.deepcopy(self.labels)\n if depth is None or depth > 0:\n for element in self.elements:\n if isinstance(element, CellReference):\n labels.extend(\n element.get_labels(None if depth is None else depth -\n 1))\n elif isinstance(element, CellArray):\n labels.extend(\n element.get_labels(None if depth is None else depth -\n 1))\n return labels", "def get_labels(self):\n return []", "def labels_list(issue):\n return [x['name'] for x in issue['labels']]", "def list_of_labels(self):\n L = np.unique(self.box_label)\n return np.union1d(L, self.geom.list_of_elements_labels())", "def labels(self):\n return self._labels", "def get_labels(self) -> List[str]:\n raise NotImplementedError()", "def labels(self) -> list[\"Label\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"labels\", _args)\n _ctx = Label(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[Label])", "def to_list(self):\n root = self.label\n if len(self.children) > 0:\n children = [c.to_list() for c in self.children]\n else:\n children = []\n return [root, [children]]", "def provide_label(self):\n return [(k, v.shape) for k, v in self.label]", "def get_labels(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0)\n st = numpy.array([-st, st])\n if self.x_reflection:\n xrefl = numpy.array([1, -1], dtype='int')\n if self.magnification is not None:\n mag = numpy.array([self.magnification, self.magnification])\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n labels = self.ref_cell.get_labels(depth=depth)\n for lbl in labels:\n if self.x_reflection:\n lbl.position = lbl.position * xrefl\n if self.magnification is not None:\n lbl.position = lbl.position * mag\n if self.rotation is not None:\n lbl.position = lbl.position * ct + lbl.position[::-1] * st\n if self.origin is not None:\n lbl.position = lbl.position + orgn\n return labels", "def get_labels(self) -> List[str]:\n return self.labels", "def get_labels(self):\n return [token.label for token in self.tokens]", "def labels(self) -> list:\n return self._labels", "def get_labels(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0)\n st = numpy.array([-st, st])\n if self.magnification is not None:\n mag = numpy.array([self.magnification, self.magnification])\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n if self.x_reflection:\n xrefl = numpy.array([1, -1], dtype='int')\n cell_labels = self.ref_cell.get_labels(depth=depth)\n labels = []\n for ii in range(self.columns):\n for jj in range(self.rows):\n spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])\n for clbl in cell_labels:\n lbl = libCopy.deepcopy(clbl)\n if self.magnification:\n lbl.position = lbl.position * mag + spc\n else:\n lbl.position = lbl.position + spc\n if self.x_reflection:\n lbl.position = lbl.position * xrefl\n if self.rotation is not None:\n lbl.position = lbl.position * ct + lbl.position[::-1] * st\n if self.origin is not None:\n lbl.position = lbl.position + orgn\n labels.append(lbl)\n return labels", "def get_labels_decomposed(self) -> List[List[str]]:\n return [list(label) for label in self.labels]", "def _get_labels(self, ind):\n pass", "def getLabels(self):\n return self.numToLabel", "def provide_label(self):\r\n return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.label]", "def get_labels(self):\n\t\traise NotImplementedError()", "def label_list(self, labnames=None):\n vallabs = self._vallabs\n if labnames is None:\n labnames = vallabs.keys()\n else:\n if isinstance(labnames, str):\n labnames = (labnames,)\n elif (not isinstance(labnames, collections.Iterable)\n or not all(isinstance(value, str) for value in labnames)):\n raise TypeError(\"labnames should be str or iterable of str\") \n labnames = set(name for value in labnames\n for name in value.split())\n if not labnames.issubset(vallabs.keys()):\n bad_names = \", \".join(str(lbl) for lbl in \n labnames.difference(vallabs.keys()))\n raise KeyError(bad_names + \" are not defined labels\")\n for name in labnames:\n print(name + \":\")\n lbldict = vallabs[name]\n for value in lbldict:\n print(\"{:>12} {}\".format(value, lbldict[value]))", "def get_labels(self):\r\n raise NotImplementedError()", "def labels_all(self):\n return self._labels_all", "def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def labels_set(self):\n if len(self.children) == 0:\n return {self.label}\n else:\n children_labels = set()\n for c in self.children:\n children_labels = children_labels | c.labels_set()\n return set([self.label]) | children_labels", "def items(cls) -> t.List[t.Tuple[t.Any, t.Union[str, NameTitle]]]:\n return list(cls.__labels__.items())", "def format_labels(self, data):\n ret = []\n for sentence, labels, attr in data:\n sentence_length = len(sentence)\n labels_copy = copy.deepcopy(labels)\n labels_copy = [label[0] for label in labels_copy if type(label) is list ]\n ret.append((sentence, labels_copy, attr))\n return ret", "def SAMT_labels(self):\n \t\t#find basic labels\n \t\tlabels_basic = self.dependency_labels()\n \t\tlabels = Labels(labels_basic)\n \t\treturn labels.SAMT_labels()", "def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels", "def leaves_labels(tree, label=None):\n leaves = []\n for child in tree:\n if isinstance(child, Tree):\n leaves.extend(leaves_labels(child, child.label()))\n else:\n leaves.append(label)\n return leaves", "def get_labels(self):\n raise NotImplementedError", "def get_children(self, parent):\n\n child_names = []\n for child in parent.children:\n child_names.append(child.label)\n return child_names", "def get_labels(self) -> Set[str]:", "def label_children(node: etree.Element) -> t.Mapping[str, etree.Element]:\n return {child.tag: child for child in node}", "def labels(self) -> List[str]:\n\n return list(self.t0.keys())", "def return_only_top_level_labels(label_list):\n to_return = []\n for label_name in label_list:\n if label_name.count('/') == 1:\n to_return.append(label_name)\n return to_return", "def _generateLabel(self, obj, **args):\n result = []\n label = self._script.utilities.displayedLabel(obj)\n if label:\n result.append(label)\n return result", "def get(self, name):\n labels = self.list()\n return [\n label\n for label\n in labels\n if name == label.get('name')\n ]", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n return self.labels", "def get_label(self, hierarchy: List[str]) -> Any:", "def get_labels(self):\r\n return None", "def get_labels(self):\n return self.labels[1:]", "def get_labels() -> list[Label]:\n\n labels_file = deepcopy(get_data(\"labels.yml\"))\n standard_labels = []\n for group_info in labels_file[\"groups\"]:\n labels = group_info.pop(\"labels\", [])\n group = LabelGroup(**group_info)\n for label_info in labels:\n label = Label(**label_info, group=group)\n standard_labels.append(label)\n for label_info in labels_file[\"standalone\"]:\n label = Label(**label_info)\n standard_labels.append(label)\n return standard_labels", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_extra_label(self, label_name: str, hierarchy: List[str]) -> Any:", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def get_labels(self):\n return [\"A轮\", \"B轮\",\"C轮\",\"天使轮\",\"战略融资\"]", "def label(tree):\n return tree[0]", "def get_labels():\n return if_found(dao.get_labels())", "def __level_entries_list__(self):\n # | - __level_entries_list__\n level_entries_dict = self.level_entries\n level_labels = self.tree_level_labels\n\n level_entries_list = []\n for param_i in level_labels:\n # for name, params_list in level_entries_dict.iteritems():\n for name, params_list in level_entries_dict.items():\n if param_i == name:\n level_entries_list.append(params_list)\n\n return(level_entries_list)\n # __|", "def labels(self):\n return self._get_labels(self.label_vector)", "def label(l):\r\n def action(string, loc, tokens):\r\n newlist = [l]\r\n newlist.extend(tokens)\r\n return newlist\r\n return action", "def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]", "def _generateLabelOrName(self, obj, **args):\n result = []\n result.extend(self._generateLabel(obj, **args))\n if not result:\n if obj.name and (len(obj.name)):\n result.append(obj.name)\n return result", "def get_labels(orthographic: str):\n labels = []\n tmp = ''\n tag = False\n\n # Get all labels from orthographic form\n for char in orthographic:\n if char == '[':\n tag = True\n elif char == ']':\n labels.append(tmp)\n tag = False\n tmp = ''\n elif tag:\n tmp += char\n return labels", "def get_labels(self):\n return get_labels(self.api_key)", "def get_labels(self, depth=None, set_transform=False):\n labels = libcopy.deepcopy(self.labels)\n if depth is None or depth > 0:\n for reference in self.references:\n if depth is None:\n next_depth = None\n else:\n next_depth = depth - 1\n labels.extend(reference.get_labels(next_depth, set_transform))\n return labels", "def labels(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"labels\")", "def labels_(self) -> DNDarray:\n return self._labels", "def _extract_labels(srcs):\n # Tuples are already labels.\n if type(srcs) == type(()):\n return list(srcs)\n return []", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def _compute_labels(self, element, data, mapping):\n lidx = element.nodes.get_dimension(self.label_index)\n if element.vdims:\n edges = Dataset(element)[element[element.vdims[0].name]>0]\n nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))\n nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})\n else:\n nodes = element\n\n value_dim = element.vdims[0]\n labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]\n if self.show_values:\n value_labels = []\n for i, node in enumerate(element._sankey['nodes']):\n value = value_dim.pprint_value(node['value'])\n label = '%s - %s' % (labels[i], value)\n if value_dim.unit:\n label += ' %s' % value_dim.unit\n value_labels.append(label)\n labels = value_labels\n\n ys = nodes.dimension_values(1)\n nodes = element._sankey['nodes']\n offset = (nodes[0]['x1']-nodes[0]['x0'])/4.\n if self.label_position == 'right':\n xs = np.array([node['x1'] for node in nodes])+offset\n else:\n xs = np.array([node['x0'] for node in nodes])-offset\n data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])\n align = 'left' if self.label_position == 'right' else 'right'\n mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)", "def label_all(self):\n\t\tlabels_basic = self.dependency_labels()\n\t\tlabels = Labels(labels_basic)\n\t\treturn labels.label_most()", "def get_predefined_labels(self):\n raise NotImplementedError", "def get_doc_labels(self, sort=False):\n labels = self.keys()\n\n if sort:\n return sorted(labels)\n else:\n return list(labels)", "def leaves_with_labels(tree, label=None):\n leaves = []\n for child in tree:\n if isinstance(child, Tree):\n leaves.extend(leaves_with_labels(child, child.label()))\n else:\n leaves.append((child, label))\n return leaves", "def get_labels(self):\n return set(category.label for category in\n self.get_categories(LABELS_SCHEME))", "def label_names(self) -> Strings:\n\n try:\n if self._le:\n return self._le.classes_.tolist()\n except AttributeError:\n self.logger.warning('AttributeError: LabelEncoder was not found.')\n self.logger.warning('No LabelEncoder. Please call label_encoder first.')\n return None", "def labels(self) -> Dict[str, str]:\n return self.attrs.get(\"Labels\", {})", "def get_labels(info):\n return info.features[\"labels\"].names" ]
[ "0.69724953", "0.66440785", "0.6584364", "0.65755326", "0.6563327", "0.64899945", "0.64018184", "0.63992316", "0.6393283", "0.63765824", "0.6373589", "0.6325616", "0.63025737", "0.6302564", "0.62481004", "0.6218792", "0.62084216", "0.61802614", "0.6172223", "0.6171989", "0.61489105", "0.6148765", "0.61369526", "0.61328036", "0.61328036", "0.61328036", "0.6131018", "0.61191404", "0.61159486", "0.6115819", "0.6114433", "0.61042345", "0.61039233", "0.6062076", "0.60585606", "0.6030367", "0.6029593", "0.602205", "0.6016773", "0.6009567", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.60052675", "0.6002603", "0.59894556", "0.5989208", "0.59884", "0.5977485", "0.5975292", "0.5975292", "0.5975292", "0.5975292", "0.5975292", "0.5975292", "0.5975292", "0.5975292", "0.59624714", "0.5956286", "0.5956286", "0.5956286", "0.5956286", "0.5956286", "0.5956286", "0.5955314", "0.59364814", "0.5910177", "0.5872539", "0.58677375", "0.58565116", "0.58142763", "0.58056843", "0.5800654", "0.57986915", "0.57898307", "0.576282", "0.57568765", "0.5752137", "0.57257986", "0.5721173", "0.5720104", "0.5717088", "0.5715858", "0.5701068", "0.57009304", "0.56983346", "0.5696088", "0.5695322" ]
0.6261045
14
This method is called after assignment to a populationp.
def _init_after_assignment(self): self.labels = { 'axes': [self.locus], \ 'elements': [self.pop._get_axis_elements(self.locus)] } self._init_labels(self.labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_population(self):\n pass", "def update(self):\n self.chromosome_list = self.next_population\n self.reset_mating_pool()\n self.reset_next_population()", "def on_assign(self):", "def init_pop(self):\n genes = np.random.randn( self.population_size * self.individual.gene_count )\n self.population = genes.reshape((self.population_size, -1))\n #print(self.population)", "def _populate(self):\n raise NotImplemented", "def __call__(self, population, context):\n pass", "def on_unassign(self):", "def PLP(self, *_):\n self.reg.P = self.pop()", "def populate(self):\n pass", "def reset_next_population(self):\n self.next_population = []", "def populate(self, populator):\n pass", "def populate(self):\n raise NotImplementedError", "def reinit(self):\n self.logger.info(\"Reinit called. Clear the population.\")\n self.set_init_population([], perf_name=None)\n self._gt_rollouts = []\n self._gt_scores = []", "def setPop(self, pop: Population):\n self.population = pop\n self.population_size = pop.size()", "def pick_up(self):", "def test_pop_objects(self):\r\n tape = copy.deepcopy(self.tape)\r\n tape.populate_database_objects()\r\n eq_(tape.notes[1].state, 'PA')", "def create_new_population(self):\n self.check_for_generation_cap()\n pop_container = list()\n for chromosome in self.population:\n partner = bm.select_partner(\n self.fitness_scores, self.population)\n child = bm.mutate(bm.crossover(chromosome, partner))\n pop_container.append(child)\n if self.population == pop_container:\n print(\"newly created populous is the same as the old populous\")\n self.population = pop_container\n print(\"generations: \", self.generations)\n self.generations += 1", "def evolve_population(self, **options):\n new_population = self.population.evolve(**options)\n self.population = new_population\n self.generation = new_population.generation_number", "def population(self) -> SimplePopulation:\n raise NotImplementedError(\"Subclass must implement\")", "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "def _evolve_population(self):\n # save the old population\n self.old_population = self.population.copy()\n self.old_population_errors = self.population_errors.copy()\n\n # index pointers\n rind = numpy.random.permutation(4)+1\n\n # shuffle the locations of the individuals\n ind1 = numpy.random.permutation(self.population_size)\n pop1 = self.old_population[ind1,:]\n \n # rotate for remaining indices\n rot = numpy.remainder(self.rot_ind + rind[0], self.population_size)\n ind2 = ind1[rot,:]\n pop2 = self.old_population[ind2,:]\n\n rot = numpy.remainder(self.rot_ind + rind[1], self.population_size)\n ind3 = ind2[rot,:]\n pop3 = self.old_population[ind3,:]\n\n rot = numpy.remainder(self.rot_ind + rind[2], self.population_size)\n ind4 = ind3[rot,:]\n pop4 = self.old_population[ind4,:]\n\n rot = numpy.remainder(self.rot_ind + rind[3], self.population_size)\n ind5 = ind4[rot,:]\n pop5 = self.old_population[ind5,:]\n \n # population filled with best individual\n best_population = self.best_individual[numpy.newaxis,:].repeat(self.population_size,axis=0)\n\n # figure out the crossover ind\n xold_ind = numpy.random.rand(self.population_size,self.num_params) >= \\\n self.crossover_prob\n\n # get new population based on desired strategy\n # DE/rand/1\n if self.method == DE_RAND_1:\n population = pop3 + self.scale*(pop1 - pop2)\n population_orig = pop3\n # DE/BEST/1\n if self.method == DE_BEST_1:\n population = best_population + self.scale*(pop1 - pop2)\n population_orig = best_population\n # DE/best/2\n elif self.method == DE_BEST_2:\n population = best_population + self.scale * \\\n (pop1 + pop2 - pop3 - pop4)\n population_orig = best_population\n # DE/BEST/1/JITTER\n elif self.method == DE_BEST_1_JITTER:\n population = best_population + (pop1 - pop2) * \\\n ((1.0-0.9999) * \\\n numpy.random.rand(self.population_size,self.num_params) + \\\n self.scale)\n population_orig = best_population\n # DE/LOCAL_TO_BEST/1\n elif self.method == DE_LOCAL_TO_BEST_1:\n population = self.old_population + \\\n self.scale*(best_population - self.old_population) + \\\n self.scale*(pop1 - pop2)\n population_orig = self.old_population\n \n # crossover\n population[xold_ind] = self.old_population[xold_ind]\n\n # apply the boundary constraints\n for p in xrange(self.num_params):\n # get min and max\n min_val = self.param_ranges[p][0]\n max_val = self.param_ranges[p][1]\n\n # find where exceeded max\n ind = population[:,p] > max_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = max_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-max_val)\n\n # find where below min\n ind = population[:,p] < min_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = min_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-min_val)\n\n # set the class members\n self.population = population\n self.population_orig = population", "def populate(self, records):\n if self._population is not None:\n raise AttributeError(\"The herd is already populated.\")\n if not isinstance(records, (tuple, list)):\n raise ValueError(\"Expected a tuple or list.\")\n if isinstance(records, list):\n records = tuple(records)\n self._population = records", "def _add_population(self, population):\n self._populations.append(population)", "def population(self):\n return self._population", "def setUp(self):\n N = 10\n pA = 0.5\n pB = 0.5\n qA = 1 - pA\n qB = 1 - pB\n locus_A = (['A'] * int(N * pA)) + (['a'] * int(round(N * qA)))\n locus_B = (['B'] * int(N * pB)) + (['b'] * int(round(N * qB)))\n self.pop = population.Population(N, locus_A, locus_B)", "def reset(self, pop_stats):\n self.assigned = False\n self.move_count = 0\n self.dstrct = None\n\n self.x = pop_stats[0] #target pop for districts\n self.greatest_pop = pop_stats[1]\n self.least_pop = pop_stats[2]", "def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.p = pdict.pop('p')", "def save_population(self, t):\n # TODO ejw: convert individual level data from self.P into dataframe\n # Info of interest is self.P.I: class of each individual in the population, including:\n # ID\n # adam {bool}: from initial population\n # age {age}:\n # age_days {int}: unknown\n # birth_order {int}: guessing st born, 2nd born, etc.\n # children {list}: IDs of children.\n # deps {list}: IDs of dependents still in household, or orphans?.\n # divorced {bool}:\n # father {str}: guessing farther `ID`.\n # groups {dict}: {`household`: int} -> Household ID linked to self.P.groups['household']:\n # mother {str}: guessing mother's `ID`.\n # next_birth_age {int}: age at which next birth can occur\n # partner {int}: partner's ID, if married.\n # sex {int}: 0: male, 1: female\n # with_parents {int}: still staying with parents (mother or father should be in same household)\n # self.P.groups['households']: same as above, but group per household member's, so much easier to extract\n # from here.\n # self.P.households: history of each household, but unknown how it's linked to self.P.groups.\n\n #n_people = len(self.P.I)\n pop = self.P.I\n ids = pop.keys()\n\n # attributes to be extracted\n col_seq = ['time', 'person_id', 'household_id', 'sex', 'age', 'time_birth', 'time_die',\n 'birth_order', 'adam_eve', 'divorced', 'stay_with_parents', 'partner_id',\n 'father_id', 'mother_id', 'children_id', 'dependents_id']\n col_attr = {\n 'person_id': 'ID',\n 'age': 'age',\n 'sex': 'sex',\n 'birth_order': 'birth_order',\n 'adam_eve': 'adam',\n 'divorced': 'divorced',\n 'stay_with_parents': 'with_parents',\n 'time_birth': 'time_birth',\n 'time_die': 'time_die'\n }\n pd_dict = {}\n #other_col = [ 'time', 'household_id', 'partner_id', 'father_id', 'mother_id', 'children_id', 'dependents_id' ]\n for id_key in ids:\n person = pop[id_key]\n person_records = { k:None for k in col_seq }\n person_records.update({ k:getattr(person, v) for k,v in col_attr.items() })\n #person_records.update({ k:None for k in other_col })\n\n person_records['time'] = t\n person_records['household_id'] = person.groups['household']\n\n if not(person.partner is None):\n person_records['partner_id'] = person.partner.ID\n\n father = None\n mother = None\n if len(person.parents) == 2:\n father = person.parents[0].ID\n mother = person.parents[1].ID\n elif len(person.parents) == 1:\n father = None\n mother = person.parents[0].ID\n person_records['father_id'] = father\n person_records['mother_id'] = mother\n\n if len(person.children):\n children_ids = ';'.join([str(ind.ID) for ind in person.children])\n person_records['children_id'] = children_ids\n if len(person.deps):\n dep_ids = ';'.join([str(ind.ID) for ind in person.deps])\n person_records['dependents_id'] = dep_ids\n\n pd_dict[id_key] = person_records\n pop_pd = pd.DataFrame.from_dict(pd_dict, orient='index')\n\n #pop_pd = pop_pd[col_seq] # to ensure the column sequence\n self.pop_pd.append(pop_pd)", "def prepare(self, popSize, evaluate):\n for i in range(popSize):\n member = Member(self.memberLength, self.lowerDomain, self.upperDomain, self.memberDataType)\n member.loss = evaluate(member.rep)\n self.population.append(member)\n if (self.eliteLoss is None) or (self.eliteLoss > member.loss):\n self.eliteLoss = member.loss\n self.eliteIndex = i\n elif (self.diversityLoss is None) or (self.diversityLoss < member.loss):\n self.diversityLoss = member.loss\n self.diversityIndex = i", "def next_generation(self, population):\n pass", "def __call__(self, time: float):\n # Double loop over the whole population, clearing places\n # of the variable population and refilling them.\n\n # Can call a this line if being called in from file etc.\n params = Parameters.instance().place_params\n for cell in self._population.cells:\n for place in cell.places:\n param_ind = place.place_type.value - 1\n if param_ind < len(params[\"mean_size\"]):\n # Checks whether values are present, otherwise uses\n # defaults\n mean_cap = params[\"mean_size\"][param_ind]\n if place.place_type.name == \"Workplace\":\n # Variable population is people not in the fixed pop.\n # Held in the last group of the place.\n # Changed at each timestep\n group_ind = list(place.person_groups.keys())[-1]\n place.empty_place(groups_to_empty=[group_ind])\n person_list = [person for person in place.cell.persons\n if person not in place.persons]\n self.update_place_group(place, group_index=group_ind,\n mean_capacity=mean_cap,\n person_list=person_list.copy())\n\n elif place.place_type.name == \"OutdoorSpace\":\n place.empty_place()\n self.update_place_group(place)", "def _set_propreties(self):\n pass", "def default_replacement(random, population, parents, offspring, args):\r\n return population", "def getPopulation(self):\n\n return self.p", "def pop(self):\n assert self.local_variables.parent is not None\n self.local_variables = self.local_variables.parent\n assert self.local_types.parent is not None\n self.local_types = self.local_types.parent", "def set_population_dependent_flags(self, *args):\n # check if self.food <= self.population\n # change self.hungry\n # change if population can be added (6)\n if self.population.get() == Species.MAX_POP:\n # turn off add population button\n pass", "def testExceedingSetter(self):\n _1 = [ (self.kl[0], 3),\n (self.kl[1], 3),\n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 2, \"got {}\".format(_2))\n _expect = set([(self.kl[1], 4), (self.kl[0], 6), ])\n self.assertEqual(_2, _expect, \"something odd\")", "def testPartialAndIncorrectSetter(self):\n _1 = [ (self.kl[0], 1), \n (self.kl[1], 1), \n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 3, \"got {}\".format(_2))\n _expect = set([(self.kl[0], 2),\n (self.kl[1], 2),\n (getattr(tp, \"RandConso\"), 6)])\n self.assertEqual(_2, _expect, \"something odd\")", "def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num", "def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num", "def finalise(self):", "def tearDown(self):\n del self.pop", "def add_population(self):\n # First, check if less than max size\n if self.population.get() >= Species.MAX_POP:\n raise PopulationOverflowException(\"Cannot add more than 6 population\") \n self.population.set(self.population.get()+1)\n self.player.request_payment(\"discard\")", "def select(self, m, population):\n pass", "def pop(self):", "def pop(self):", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus1, self.locus2, self.locus3], \\\n 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2), self.pop._get_axis_elements(self.locus3)] }\n self._init_labels(self.labels)", "def save_population(self, generation):\r\n pop_checkpoint = {\r\n 'populations' : {name : np.stack(pop.population) for name, pop in self.populations.items()},\r\n 'generation' : generation,\r\n 'mutation_prob' : {name : pop.mutation_prob for name, pop in self.populations.items()},\r\n 'evolution_hist' : self.evolution_history,\r\n 'mu' : {name : pop.strategy_m for name, pop in self.populations.items()},\r\n 'C' :{name : pop.strategy_C for name, pop in self.populations.items()},\r\n 'cc' :{name : pop.cc for name, pop in self.populations.items()},\r\n 'cs' :{name : pop.cs for name, pop in self.populations.items()},\r\n 'c_cov' :{name : pop.c_cov for name, pop in self.populations.items()},\r\n 'mu_cov':{name : pop.mu_cov for name, pop in self.populations.items()},\r\n 'ds':{name : pop.ds for name, pop in self.populations.items()},\r\n 'evo_path':{name : pop.evo_path for name, pop in self.populations.items()},\r\n 'ps':{name : pop.ps for name, pop in self.populations.items()},\r\n 'B':{name : pop.B for name, pop in self.populations.items()},\r\n 'Bt' :{name : pop.Bt for name, pop in self.populations.items()},\r\n 'D' : {name : pop.D for name, pop in self.populations.items()},\r\n 'sigma' : {name : pop.sigma for name, pop in self.populations.items()},\r\n 'num_evals' :{name : pop.num_evals for name, pop in self.populations.items()},\r\n }\r\n file_name = 'spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name\r\n save_pickle(pop_checkpoint, file_name)\r\n logging.info('Successfully saved evolution checkpoint.')", "def pop_write(self):\n ...", "def populate(cls):\n raise NotImplementedError", "def pop(self):\n pass", "def pop(self):\n pass", "def _add_population(self, info, index, population):\n info[index] = [self._representation.decode(item) for\n item in population]", "def _localSetState(self,pdict):\n self.p = pdict.pop('p')", "def _localSetState(self,pdict):\n self.p = pdict.pop('p')", "def _generate_population(self) -> None:\n self._population = list()\n blank_img_ext = ExtendedImage.create_empty_image(width=self._orig_img.get_width(),\n height=self._orig_img.get_height())\n initial_fitval = self._fit_test(blank_img_ext)\n\n for i in range(self._pop_size):\n # Each chromosome is an empty black image\n blank_img_ext = ExtendedImage.create_empty_image(width=self._orig_img.get_width(),\n height=self._orig_img.get_height())\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((blank_img_ext, initial_fitval))", "def _read_pops(self, sheet) -> None:\n\n # TODO - can modify _read_pops() and _write_pops() if there are more population attributes\n tables = read_tables(sheet)[0]\n assert len(tables) == 1, \"Population Definitions page should only contain one table\"\n\n self.pops = sc.odict()\n assert cell_get_string(tables[0][0][0]).lower() == \"abbreviation\"\n assert cell_get_string(tables[0][0][1]).lower() == \"full name\"\n\n # If pop typ column exists, check the heading is correct\n if len(tables[0][0]) > 2:\n assert cell_get_string(tables[0][0][2]).lower() == \"population type\"\n\n for row in tables[0][1:]:\n\n pop_name = cell_get_string(row[0])\n assert len(pop_name) > 1, 'Population code name (abbreviation) \"%s\" is not valid - it must be at least two characters long' % (pop_name)\n\n label = cell_get_string(row[1])\n assert len(label) > 1, 'Population full name \"%s\" is not valid - it must be at least two characters long' % (label)\n\n if pop_name.lower() in FS.RESERVED_KEYWORDS:\n raise Exception('Population name \"%s\" is a reserved keyword' % (pop_name.lower()))\n\n poptype = None\n if len(row) > 2 and row[2].value is not None:\n poptype = cell_get_string(row[2])\n\n self.pops[pop_name] = {\"label\": label, \"type\": poptype}", "def new_population(self, population_p, population_r):\n population_choice_method = 2\n\n population = []\n for i in range(self.population_length):\n population.append(population_p[i])\n population.append(population_r[i])\n\n population = self.sort_population(population)\n\n if population_choice_method == 1: # mi najlepszych osobnikow\n population = [population[x] for x in range(0, self.population_length)]\n\n else: # metoda ruletki\n tmp_population = []\n population_size = len(population)\n for i in range(self.elite): # Zachowanie elitarnych osobnikow\n tmp_population.append(population[i])\n del population[i]\n population_size = population_size - 1\n\n tmp = [(1.0 / self.fitness(x), x) for x in population]\n tmp.reverse()\n error_sum = 0\n for i in range(len(tmp)):\n error_sum = error_sum + tmp[i][0]\n tmp = [(x[0] / error_sum, x[1]) for x in tmp]\n\n sum = 0\n roulette = []\n for i in range(len(tmp)):\n sum = sum + tmp[i][0]\n roulette.append([sum, tmp[i][1]])\n\n roulette = [(x[0] * 10000, x[1]) for x in roulette]\n\n for i in range(self.population_length - self.elite):\n rand = -1\n item = self.random_chromosome()\n while rand != -1 and item in tmp_population:\n rand = random.randint(0, 9999)\n item = next(x[1] for x in roulette if rand <= x[0])\n tmp_population.append(item)\n\n population = tmp_population\n\n return population", "def step(self):\n y = np.random.rand(self.p.lambda_, self.p.d).T\n x = self.p.m.reshape(-1, 1) * y\n f = np.array(list(map(sum, x)))\n self.p.used_budget += self.p.lambda_\n self.p.population = Population(x, y, f)\n self.p.m_old = self.p.m.copy()\n self.p.m *= np.linalg.norm(y, axis=1).reshape(-1, 1)\n self.p.adapt()\n self.p.old_population = self.p.population.copy()", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus1, self.locus2], \\\n 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2)] }\n self._init_labels(self.labels)", "def updateMatingPool(self):\n self.matingPool = []\n for ind_i in self.population:\n self.matingPool.append( ind_i.copy() )", "def default_replacement(random, population, parents, offspring, args):\n return population", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def _localSetState(self,pdict):\n self.n = pdict.pop('n')\n self.p = pdict.pop('p')", "def breed(self): \n while (len(self.population) <= self.pop_size):\n orga = random.choice(self.population)\n orgb = random.choice(self.population) # Asexualism works too :-p\n self.population.append(orga.breed(orgb)) # Add a new organism", "def add_population(self, population):\n for species in population:\n y, x = [n - 1 for n in species['loc']]\n for ani in species['pop']:\n if ani['species'] == 'Herbivore':\n self.island.island[y][x].herbivores.append(Herbivore(\n weight=ani['weight'], age=ani['age'],\n coordinates=(y, x)))\n elif ani['species'] == 'Carnivore':\n self.island.island[y][x].carnivores.append(Carnivore(\n weight=ani['weight'], age=ani['age'],\n coordinates=(y, x)))", "def init_population(self):\n for idx in xrange(0, self.population_size):\n individual = self.individual_factory.create()\n self.population.append(individual)\n\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n\n # In order to roulette wheel selection work with negative values, \n # we sum all fitness values to the absolute value of the most negative plus one\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n #print self.population_fitness.min()\n #print self.population_fitness\n #print self.normalized_fitness", "def sortPopulation(self):\n self.population = sorted(self.population, key=attrgetter('fitness'), reverse=True)", "def pop_gauges(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def load_population(self):\r\n checkpoint = load_pickle('spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name)\r\n logging.info('Resuming CMA-ES evolution using checkpoint ' + self.checkpoint_name)\r\n key = tuple(self.populations.keys())[0]\r\n for key, pop in checkpoint['populations'].items():\r\n self.populations[key].strategy_m = checkpoint['mu'][key]\r\n self.populations[key].strategy_C = checkpoint['C'][key]\r\n self.populations[key].cc = checkpoint['cc'][key]\r\n self.populations[key].cs = checkpoint['cs'][key]\r\n self.populations[key].mu_cov = checkpoint['mu_cov'][key]\r\n self.populations[key].c_cov = checkpoint['c_cov'][key]\r\n self.populations[key].ds = checkpoint['ds'][key]\r\n self.populations[key].evo_path = checkpoint['evo_path'][key]\r\n self.populations[key].ps = checkpoint['ps'][key]\r\n self.populations[key].B = checkpoint['B'][key]\r\n self.populations[key].Bt = checkpoint['Bt'][key]\r\n self.populations[key].D = checkpoint['D'][key]\r\n self.populations[key].sigma = checkpoint['sigma'][key]\r\n self.populations[key].num_evals = checkpoint['num_evals'][key]\r\n self.populations[key].population = self.populations[key].sample()\r\n self.init_generation = checkpoint['generation']\r\n self.evolution_history = checkpoint['evolution_hist']", "def _reload_values(self):\r\n raise NotImplementedError", "def popitem(self):\n pass", "def final(self):\n pass", "def apply(self, population_current, population_offspring):\n population_current[population_current.worst_index] = population_offspring[0]\n return population_current", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')", "def initialize_population(self):\n for i in range(GAConfig[\"initial_population_size\"]):\n new_chromosome = Chromosome(GAConfig[\"num_categories\"])\n for gene in self.phones:\n random_category = randint(0, GAConfig[\"num_categories\"] - 1)\n new_chromosome.insert_into_category(random_category, gene)\n #need to make sure that the chromosome has all categories fixed here.\n\n #adds the restrictions to the categories\n if(GAConfig[\"category_restriction\"] == \"True\"):\n new_chromosome = self.space_chrom(new_chromosome)\n\n self.population.append(new_chromosome)\n\n self.population = self.computeFitness.compute(self.population)\n self._sort()", "def post_init(self):\n\t\tpass", "def __iadd__(self, population):\n self.chromosome_list += (to_chromosome(chromosome) for chromosome in population)", "def truncate(self):\n\n self.population = self.population[:self.max_number_trees]", "def mutatePopulation(self, population):\n\t\tfor i in range(int(math.ceil(self.selectionRate * len(population)))):\n\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population[:self.populationSize]", "def SetPop(self, fname, var):\n\n\t\tself._pop_fname = fname\n\t\tself._pop_var = var", "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "def pop(self) -> Any:\n # TODO: Implement this method\n ...", "def _localSetState(self,pdict):\n self.apex = pdict.pop('apex')\n self.min = pdict.pop('min' )\n self.max = pdict.pop('max' )", "def postLoad(self):\n pass", "def create_population(self):\n stagnation = DefaultStagnation(self.config.population, self.reporters)\n self.reporters = ReporterSet()\n self.reproduction = DefaultReproduction(self.reporters, stagnation)\n \n # Create a population from scratch, then partition into species\n self.population = self.reproduction.create_new(config=self.config,\n num_genomes=self.config.population.pop_size)\n self.species = DefaultSpecies(reporters=self.reporters)\n self.species.speciate(config=self.config,\n population=self.population,\n generation=self.generation,\n logger=self.log)\n \n # Add to each of the species its elites\n self.update_species_fitness_hist()\n \n # Use 'print' to output information about the run\n reporter = StdOutReporter()\n self.add_reporter(reporter)\n \n # Save newly made population\n self.save()\n \n # Write population configuration to file\n with open(f\"population{'_backup' if self.use_backup else ''}/\"\n f\"storage/\"\n f\"{self.folder_name}/\"\n f\"{self}/\"\n f\"config.txt\", 'w') as f:\n f.write(self.config.read())", "def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')", "def set_parameters(self, **kwargs):\n kwargs.pop('population_size', None)\n super().set_parameters(population_size=1, **kwargs)\n self.candidates = None", "def _recompute_fitness(self):\n for cur in self.population:\n if cur['fitness'] is None:\n cur['fitness'] = self.op.fitness(cur['individual'])", "def set_test(self):\n self.genes_test = self.__genes.copy()\n self.__fitness_test = self.__fitness", "def populate(self):\n insert_many_on_conflict_ignore(self.settings, Player, self.rows)", "def _perturbInPlaceHard(self):\n die", "def __init__(self, _populationSize, _chromosomeClass):\n # a generation is a collection of chromosomes stored in a priority queue\n # which is ordered by fitness\n self.generation = PriorityQueue()\n # store how many chromosomes are in each generation\n self.populationSize = _populationSize\n # store a template for generating chromosomes\n self.chromosomeClass = _chromosomeClass\n # choose a random starting population\n self.randomPopulation()", "def update_original_data(self):\n pass", "def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None", "def pop(self):\n pass", "def generational_step(self, population):\n offspring = self.variation(population, self._number_offspring)\n self.evaluation(population)\n self.evaluation(offspring)\n if self._target_populations_size is None:\n new_pop_size = len(population)\n else:\n new_pop_size = self._target_populations_size\n self.update_diagnostics(population, offspring)\n return self.selection(population + offspring, new_pop_size)", "def init_population(self, size):\n print(\"Initializing population.\")\n self.population = []\n for _ in range(size):\n self.population.append(Gene(self.tactics))", "def pop():" ]
[ "0.6783058", "0.6499103", "0.64520025", "0.63784206", "0.6292192", "0.6238812", "0.6130736", "0.6072983", "0.60598457", "0.6027957", "0.5980297", "0.5886131", "0.5880756", "0.58642906", "0.5846734", "0.5805156", "0.57963616", "0.57949483", "0.5778682", "0.57561123", "0.5726718", "0.5719026", "0.56881166", "0.56396073", "0.5639513", "0.5635086", "0.5619893", "0.5615631", "0.55761486", "0.5560683", "0.5519276", "0.54922974", "0.5476284", "0.5471784", "0.5470862", "0.5449019", "0.54347", "0.54337376", "0.5414289", "0.5414289", "0.54122984", "0.53878194", "0.536703", "0.53486955", "0.53441983", "0.53441983", "0.5329456", "0.5328839", "0.53251714", "0.5320616", "0.5311731", "0.5311731", "0.52953917", "0.5285297", "0.5285297", "0.52773553", "0.5277309", "0.52749556", "0.52748775", "0.52742124", "0.5269925", "0.52459806", "0.52369666", "0.523523", "0.5216775", "0.5215811", "0.5201592", "0.5201034", "0.51997024", "0.5194283", "0.51896065", "0.51882815", "0.5185656", "0.5184979", "0.5183791", "0.5180535", "0.5176039", "0.51703787", "0.5163871", "0.51561874", "0.5151419", "0.5150049", "0.51494396", "0.51494116", "0.51447207", "0.51365453", "0.51353234", "0.51229906", "0.5119958", "0.5115068", "0.5108601", "0.5104367", "0.5103776", "0.5095493", "0.5090149", "0.50883615", "0.5088332", "0.5085413", "0.50833905", "0.5074022" ]
0.52223545
64
This method is called after assignment to a populationp.
def _init_after_assignment(self): self.labels = { 'axes': [self.locus1, self.locus2], \ 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2)] } self._init_labels(self.labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_population(self):\n pass", "def update(self):\n self.chromosome_list = self.next_population\n self.reset_mating_pool()\n self.reset_next_population()", "def on_assign(self):", "def init_pop(self):\n genes = np.random.randn( self.population_size * self.individual.gene_count )\n self.population = genes.reshape((self.population_size, -1))\n #print(self.population)", "def _populate(self):\n raise NotImplemented", "def __call__(self, population, context):\n pass", "def on_unassign(self):", "def PLP(self, *_):\n self.reg.P = self.pop()", "def populate(self):\n pass", "def reset_next_population(self):\n self.next_population = []", "def populate(self, populator):\n pass", "def populate(self):\n raise NotImplementedError", "def reinit(self):\n self.logger.info(\"Reinit called. Clear the population.\")\n self.set_init_population([], perf_name=None)\n self._gt_rollouts = []\n self._gt_scores = []", "def setPop(self, pop: Population):\n self.population = pop\n self.population_size = pop.size()", "def pick_up(self):", "def test_pop_objects(self):\r\n tape = copy.deepcopy(self.tape)\r\n tape.populate_database_objects()\r\n eq_(tape.notes[1].state, 'PA')", "def evolve_population(self, **options):\n new_population = self.population.evolve(**options)\n self.population = new_population\n self.generation = new_population.generation_number", "def create_new_population(self):\n self.check_for_generation_cap()\n pop_container = list()\n for chromosome in self.population:\n partner = bm.select_partner(\n self.fitness_scores, self.population)\n child = bm.mutate(bm.crossover(chromosome, partner))\n pop_container.append(child)\n if self.population == pop_container:\n print(\"newly created populous is the same as the old populous\")\n self.population = pop_container\n print(\"generations: \", self.generations)\n self.generations += 1", "def population(self) -> SimplePopulation:\n raise NotImplementedError(\"Subclass must implement\")", "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "def _evolve_population(self):\n # save the old population\n self.old_population = self.population.copy()\n self.old_population_errors = self.population_errors.copy()\n\n # index pointers\n rind = numpy.random.permutation(4)+1\n\n # shuffle the locations of the individuals\n ind1 = numpy.random.permutation(self.population_size)\n pop1 = self.old_population[ind1,:]\n \n # rotate for remaining indices\n rot = numpy.remainder(self.rot_ind + rind[0], self.population_size)\n ind2 = ind1[rot,:]\n pop2 = self.old_population[ind2,:]\n\n rot = numpy.remainder(self.rot_ind + rind[1], self.population_size)\n ind3 = ind2[rot,:]\n pop3 = self.old_population[ind3,:]\n\n rot = numpy.remainder(self.rot_ind + rind[2], self.population_size)\n ind4 = ind3[rot,:]\n pop4 = self.old_population[ind4,:]\n\n rot = numpy.remainder(self.rot_ind + rind[3], self.population_size)\n ind5 = ind4[rot,:]\n pop5 = self.old_population[ind5,:]\n \n # population filled with best individual\n best_population = self.best_individual[numpy.newaxis,:].repeat(self.population_size,axis=0)\n\n # figure out the crossover ind\n xold_ind = numpy.random.rand(self.population_size,self.num_params) >= \\\n self.crossover_prob\n\n # get new population based on desired strategy\n # DE/rand/1\n if self.method == DE_RAND_1:\n population = pop3 + self.scale*(pop1 - pop2)\n population_orig = pop3\n # DE/BEST/1\n if self.method == DE_BEST_1:\n population = best_population + self.scale*(pop1 - pop2)\n population_orig = best_population\n # DE/best/2\n elif self.method == DE_BEST_2:\n population = best_population + self.scale * \\\n (pop1 + pop2 - pop3 - pop4)\n population_orig = best_population\n # DE/BEST/1/JITTER\n elif self.method == DE_BEST_1_JITTER:\n population = best_population + (pop1 - pop2) * \\\n ((1.0-0.9999) * \\\n numpy.random.rand(self.population_size,self.num_params) + \\\n self.scale)\n population_orig = best_population\n # DE/LOCAL_TO_BEST/1\n elif self.method == DE_LOCAL_TO_BEST_1:\n population = self.old_population + \\\n self.scale*(best_population - self.old_population) + \\\n self.scale*(pop1 - pop2)\n population_orig = self.old_population\n \n # crossover\n population[xold_ind] = self.old_population[xold_ind]\n\n # apply the boundary constraints\n for p in xrange(self.num_params):\n # get min and max\n min_val = self.param_ranges[p][0]\n max_val = self.param_ranges[p][1]\n\n # find where exceeded max\n ind = population[:,p] > max_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = max_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-max_val)\n\n # find where below min\n ind = population[:,p] < min_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = min_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-min_val)\n\n # set the class members\n self.population = population\n self.population_orig = population", "def populate(self, records):\n if self._population is not None:\n raise AttributeError(\"The herd is already populated.\")\n if not isinstance(records, (tuple, list)):\n raise ValueError(\"Expected a tuple or list.\")\n if isinstance(records, list):\n records = tuple(records)\n self._population = records", "def _add_population(self, population):\n self._populations.append(population)", "def population(self):\n return self._population", "def setUp(self):\n N = 10\n pA = 0.5\n pB = 0.5\n qA = 1 - pA\n qB = 1 - pB\n locus_A = (['A'] * int(N * pA)) + (['a'] * int(round(N * qA)))\n locus_B = (['B'] * int(N * pB)) + (['b'] * int(round(N * qB)))\n self.pop = population.Population(N, locus_A, locus_B)", "def reset(self, pop_stats):\n self.assigned = False\n self.move_count = 0\n self.dstrct = None\n\n self.x = pop_stats[0] #target pop for districts\n self.greatest_pop = pop_stats[1]\n self.least_pop = pop_stats[2]", "def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.p = pdict.pop('p')", "def save_population(self, t):\n # TODO ejw: convert individual level data from self.P into dataframe\n # Info of interest is self.P.I: class of each individual in the population, including:\n # ID\n # adam {bool}: from initial population\n # age {age}:\n # age_days {int}: unknown\n # birth_order {int}: guessing st born, 2nd born, etc.\n # children {list}: IDs of children.\n # deps {list}: IDs of dependents still in household, or orphans?.\n # divorced {bool}:\n # father {str}: guessing farther `ID`.\n # groups {dict}: {`household`: int} -> Household ID linked to self.P.groups['household']:\n # mother {str}: guessing mother's `ID`.\n # next_birth_age {int}: age at which next birth can occur\n # partner {int}: partner's ID, if married.\n # sex {int}: 0: male, 1: female\n # with_parents {int}: still staying with parents (mother or father should be in same household)\n # self.P.groups['households']: same as above, but group per household member's, so much easier to extract\n # from here.\n # self.P.households: history of each household, but unknown how it's linked to self.P.groups.\n\n #n_people = len(self.P.I)\n pop = self.P.I\n ids = pop.keys()\n\n # attributes to be extracted\n col_seq = ['time', 'person_id', 'household_id', 'sex', 'age', 'time_birth', 'time_die',\n 'birth_order', 'adam_eve', 'divorced', 'stay_with_parents', 'partner_id',\n 'father_id', 'mother_id', 'children_id', 'dependents_id']\n col_attr = {\n 'person_id': 'ID',\n 'age': 'age',\n 'sex': 'sex',\n 'birth_order': 'birth_order',\n 'adam_eve': 'adam',\n 'divorced': 'divorced',\n 'stay_with_parents': 'with_parents',\n 'time_birth': 'time_birth',\n 'time_die': 'time_die'\n }\n pd_dict = {}\n #other_col = [ 'time', 'household_id', 'partner_id', 'father_id', 'mother_id', 'children_id', 'dependents_id' ]\n for id_key in ids:\n person = pop[id_key]\n person_records = { k:None for k in col_seq }\n person_records.update({ k:getattr(person, v) for k,v in col_attr.items() })\n #person_records.update({ k:None for k in other_col })\n\n person_records['time'] = t\n person_records['household_id'] = person.groups['household']\n\n if not(person.partner is None):\n person_records['partner_id'] = person.partner.ID\n\n father = None\n mother = None\n if len(person.parents) == 2:\n father = person.parents[0].ID\n mother = person.parents[1].ID\n elif len(person.parents) == 1:\n father = None\n mother = person.parents[0].ID\n person_records['father_id'] = father\n person_records['mother_id'] = mother\n\n if len(person.children):\n children_ids = ';'.join([str(ind.ID) for ind in person.children])\n person_records['children_id'] = children_ids\n if len(person.deps):\n dep_ids = ';'.join([str(ind.ID) for ind in person.deps])\n person_records['dependents_id'] = dep_ids\n\n pd_dict[id_key] = person_records\n pop_pd = pd.DataFrame.from_dict(pd_dict, orient='index')\n\n #pop_pd = pop_pd[col_seq] # to ensure the column sequence\n self.pop_pd.append(pop_pd)", "def prepare(self, popSize, evaluate):\n for i in range(popSize):\n member = Member(self.memberLength, self.lowerDomain, self.upperDomain, self.memberDataType)\n member.loss = evaluate(member.rep)\n self.population.append(member)\n if (self.eliteLoss is None) or (self.eliteLoss > member.loss):\n self.eliteLoss = member.loss\n self.eliteIndex = i\n elif (self.diversityLoss is None) or (self.diversityLoss < member.loss):\n self.diversityLoss = member.loss\n self.diversityIndex = i", "def next_generation(self, population):\n pass", "def __call__(self, time: float):\n # Double loop over the whole population, clearing places\n # of the variable population and refilling them.\n\n # Can call a this line if being called in from file etc.\n params = Parameters.instance().place_params\n for cell in self._population.cells:\n for place in cell.places:\n param_ind = place.place_type.value - 1\n if param_ind < len(params[\"mean_size\"]):\n # Checks whether values are present, otherwise uses\n # defaults\n mean_cap = params[\"mean_size\"][param_ind]\n if place.place_type.name == \"Workplace\":\n # Variable population is people not in the fixed pop.\n # Held in the last group of the place.\n # Changed at each timestep\n group_ind = list(place.person_groups.keys())[-1]\n place.empty_place(groups_to_empty=[group_ind])\n person_list = [person for person in place.cell.persons\n if person not in place.persons]\n self.update_place_group(place, group_index=group_ind,\n mean_capacity=mean_cap,\n person_list=person_list.copy())\n\n elif place.place_type.name == \"OutdoorSpace\":\n place.empty_place()\n self.update_place_group(place)", "def _set_propreties(self):\n pass", "def default_replacement(random, population, parents, offspring, args):\r\n return population", "def pop(self):\n assert self.local_variables.parent is not None\n self.local_variables = self.local_variables.parent\n assert self.local_types.parent is not None\n self.local_types = self.local_types.parent", "def getPopulation(self):\n\n return self.p", "def set_population_dependent_flags(self, *args):\n # check if self.food <= self.population\n # change self.hungry\n # change if population can be added (6)\n if self.population.get() == Species.MAX_POP:\n # turn off add population button\n pass", "def testExceedingSetter(self):\n _1 = [ (self.kl[0], 3),\n (self.kl[1], 3),\n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 2, \"got {}\".format(_2))\n _expect = set([(self.kl[1], 4), (self.kl[0], 6), ])\n self.assertEqual(_2, _expect, \"something odd\")", "def testPartialAndIncorrectSetter(self):\n _1 = [ (self.kl[0], 1), \n (self.kl[1], 1), \n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 3, \"got {}\".format(_2))\n _expect = set([(self.kl[0], 2),\n (self.kl[1], 2),\n (getattr(tp, \"RandConso\"), 6)])\n self.assertEqual(_2, _expect, \"something odd\")", "def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num", "def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num", "def finalise(self):", "def tearDown(self):\n del self.pop", "def add_population(self):\n # First, check if less than max size\n if self.population.get() >= Species.MAX_POP:\n raise PopulationOverflowException(\"Cannot add more than 6 population\") \n self.population.set(self.population.get()+1)\n self.player.request_payment(\"discard\")", "def select(self, m, population):\n pass", "def pop(self):", "def pop(self):", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus1, self.locus2, self.locus3], \\\n 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2), self.pop._get_axis_elements(self.locus3)] }\n self._init_labels(self.labels)", "def save_population(self, generation):\r\n pop_checkpoint = {\r\n 'populations' : {name : np.stack(pop.population) for name, pop in self.populations.items()},\r\n 'generation' : generation,\r\n 'mutation_prob' : {name : pop.mutation_prob for name, pop in self.populations.items()},\r\n 'evolution_hist' : self.evolution_history,\r\n 'mu' : {name : pop.strategy_m for name, pop in self.populations.items()},\r\n 'C' :{name : pop.strategy_C for name, pop in self.populations.items()},\r\n 'cc' :{name : pop.cc for name, pop in self.populations.items()},\r\n 'cs' :{name : pop.cs for name, pop in self.populations.items()},\r\n 'c_cov' :{name : pop.c_cov for name, pop in self.populations.items()},\r\n 'mu_cov':{name : pop.mu_cov for name, pop in self.populations.items()},\r\n 'ds':{name : pop.ds for name, pop in self.populations.items()},\r\n 'evo_path':{name : pop.evo_path for name, pop in self.populations.items()},\r\n 'ps':{name : pop.ps for name, pop in self.populations.items()},\r\n 'B':{name : pop.B for name, pop in self.populations.items()},\r\n 'Bt' :{name : pop.Bt for name, pop in self.populations.items()},\r\n 'D' : {name : pop.D for name, pop in self.populations.items()},\r\n 'sigma' : {name : pop.sigma for name, pop in self.populations.items()},\r\n 'num_evals' :{name : pop.num_evals for name, pop in self.populations.items()},\r\n }\r\n file_name = 'spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name\r\n save_pickle(pop_checkpoint, file_name)\r\n logging.info('Successfully saved evolution checkpoint.')", "def pop_write(self):\n ...", "def populate(cls):\n raise NotImplementedError", "def pop(self):\n pass", "def pop(self):\n pass", "def _add_population(self, info, index, population):\n info[index] = [self._representation.decode(item) for\n item in population]", "def _localSetState(self,pdict):\n self.p = pdict.pop('p')", "def _localSetState(self,pdict):\n self.p = pdict.pop('p')", "def _generate_population(self) -> None:\n self._population = list()\n blank_img_ext = ExtendedImage.create_empty_image(width=self._orig_img.get_width(),\n height=self._orig_img.get_height())\n initial_fitval = self._fit_test(blank_img_ext)\n\n for i in range(self._pop_size):\n # Each chromosome is an empty black image\n blank_img_ext = ExtendedImage.create_empty_image(width=self._orig_img.get_width(),\n height=self._orig_img.get_height())\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((blank_img_ext, initial_fitval))", "def step(self):\n y = np.random.rand(self.p.lambda_, self.p.d).T\n x = self.p.m.reshape(-1, 1) * y\n f = np.array(list(map(sum, x)))\n self.p.used_budget += self.p.lambda_\n self.p.population = Population(x, y, f)\n self.p.m_old = self.p.m.copy()\n self.p.m *= np.linalg.norm(y, axis=1).reshape(-1, 1)\n self.p.adapt()\n self.p.old_population = self.p.population.copy()", "def _read_pops(self, sheet) -> None:\n\n # TODO - can modify _read_pops() and _write_pops() if there are more population attributes\n tables = read_tables(sheet)[0]\n assert len(tables) == 1, \"Population Definitions page should only contain one table\"\n\n self.pops = sc.odict()\n assert cell_get_string(tables[0][0][0]).lower() == \"abbreviation\"\n assert cell_get_string(tables[0][0][1]).lower() == \"full name\"\n\n # If pop typ column exists, check the heading is correct\n if len(tables[0][0]) > 2:\n assert cell_get_string(tables[0][0][2]).lower() == \"population type\"\n\n for row in tables[0][1:]:\n\n pop_name = cell_get_string(row[0])\n assert len(pop_name) > 1, 'Population code name (abbreviation) \"%s\" is not valid - it must be at least two characters long' % (pop_name)\n\n label = cell_get_string(row[1])\n assert len(label) > 1, 'Population full name \"%s\" is not valid - it must be at least two characters long' % (label)\n\n if pop_name.lower() in FS.RESERVED_KEYWORDS:\n raise Exception('Population name \"%s\" is a reserved keyword' % (pop_name.lower()))\n\n poptype = None\n if len(row) > 2 and row[2].value is not None:\n poptype = cell_get_string(row[2])\n\n self.pops[pop_name] = {\"label\": label, \"type\": poptype}", "def new_population(self, population_p, population_r):\n population_choice_method = 2\n\n population = []\n for i in range(self.population_length):\n population.append(population_p[i])\n population.append(population_r[i])\n\n population = self.sort_population(population)\n\n if population_choice_method == 1: # mi najlepszych osobnikow\n population = [population[x] for x in range(0, self.population_length)]\n\n else: # metoda ruletki\n tmp_population = []\n population_size = len(population)\n for i in range(self.elite): # Zachowanie elitarnych osobnikow\n tmp_population.append(population[i])\n del population[i]\n population_size = population_size - 1\n\n tmp = [(1.0 / self.fitness(x), x) for x in population]\n tmp.reverse()\n error_sum = 0\n for i in range(len(tmp)):\n error_sum = error_sum + tmp[i][0]\n tmp = [(x[0] / error_sum, x[1]) for x in tmp]\n\n sum = 0\n roulette = []\n for i in range(len(tmp)):\n sum = sum + tmp[i][0]\n roulette.append([sum, tmp[i][1]])\n\n roulette = [(x[0] * 10000, x[1]) for x in roulette]\n\n for i in range(self.population_length - self.elite):\n rand = -1\n item = self.random_chromosome()\n while rand != -1 and item in tmp_population:\n rand = random.randint(0, 9999)\n item = next(x[1] for x in roulette if rand <= x[0])\n tmp_population.append(item)\n\n population = tmp_population\n\n return population", "def updateMatingPool(self):\n self.matingPool = []\n for ind_i in self.population:\n self.matingPool.append( ind_i.copy() )", "def default_replacement(random, population, parents, offspring, args):\n return population", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def _localSetState(self,pdict):\n self.n = pdict.pop('n')\n self.p = pdict.pop('p')", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus], \\\n 'elements': [self.pop._get_axis_elements(self.locus)] }\n self._init_labels(self.labels)", "def breed(self): \n while (len(self.population) <= self.pop_size):\n orga = random.choice(self.population)\n orgb = random.choice(self.population) # Asexualism works too :-p\n self.population.append(orga.breed(orgb)) # Add a new organism", "def add_population(self, population):\n for species in population:\n y, x = [n - 1 for n in species['loc']]\n for ani in species['pop']:\n if ani['species'] == 'Herbivore':\n self.island.island[y][x].herbivores.append(Herbivore(\n weight=ani['weight'], age=ani['age'],\n coordinates=(y, x)))\n elif ani['species'] == 'Carnivore':\n self.island.island[y][x].carnivores.append(Carnivore(\n weight=ani['weight'], age=ani['age'],\n coordinates=(y, x)))", "def init_population(self):\n for idx in xrange(0, self.population_size):\n individual = self.individual_factory.create()\n self.population.append(individual)\n\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n\n # In order to roulette wheel selection work with negative values, \n # we sum all fitness values to the absolute value of the most negative plus one\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n #print self.population_fitness.min()\n #print self.population_fitness\n #print self.normalized_fitness", "def sortPopulation(self):\n self.population = sorted(self.population, key=attrgetter('fitness'), reverse=True)", "def pop_gauges(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def load_population(self):\r\n checkpoint = load_pickle('spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name)\r\n logging.info('Resuming CMA-ES evolution using checkpoint ' + self.checkpoint_name)\r\n key = tuple(self.populations.keys())[0]\r\n for key, pop in checkpoint['populations'].items():\r\n self.populations[key].strategy_m = checkpoint['mu'][key]\r\n self.populations[key].strategy_C = checkpoint['C'][key]\r\n self.populations[key].cc = checkpoint['cc'][key]\r\n self.populations[key].cs = checkpoint['cs'][key]\r\n self.populations[key].mu_cov = checkpoint['mu_cov'][key]\r\n self.populations[key].c_cov = checkpoint['c_cov'][key]\r\n self.populations[key].ds = checkpoint['ds'][key]\r\n self.populations[key].evo_path = checkpoint['evo_path'][key]\r\n self.populations[key].ps = checkpoint['ps'][key]\r\n self.populations[key].B = checkpoint['B'][key]\r\n self.populations[key].Bt = checkpoint['Bt'][key]\r\n self.populations[key].D = checkpoint['D'][key]\r\n self.populations[key].sigma = checkpoint['sigma'][key]\r\n self.populations[key].num_evals = checkpoint['num_evals'][key]\r\n self.populations[key].population = self.populations[key].sample()\r\n self.init_generation = checkpoint['generation']\r\n self.evolution_history = checkpoint['evolution_hist']", "def _reload_values(self):\r\n raise NotImplementedError", "def popitem(self):\n pass", "def apply(self, population_current, population_offspring):\n population_current[population_current.worst_index] = population_offspring[0]\n return population_current", "def final(self):\n pass", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')", "def initialize_population(self):\n for i in range(GAConfig[\"initial_population_size\"]):\n new_chromosome = Chromosome(GAConfig[\"num_categories\"])\n for gene in self.phones:\n random_category = randint(0, GAConfig[\"num_categories\"] - 1)\n new_chromosome.insert_into_category(random_category, gene)\n #need to make sure that the chromosome has all categories fixed here.\n\n #adds the restrictions to the categories\n if(GAConfig[\"category_restriction\"] == \"True\"):\n new_chromosome = self.space_chrom(new_chromosome)\n\n self.population.append(new_chromosome)\n\n self.population = self.computeFitness.compute(self.population)\n self._sort()", "def post_init(self):\n\t\tpass", "def __iadd__(self, population):\n self.chromosome_list += (to_chromosome(chromosome) for chromosome in population)", "def truncate(self):\n\n self.population = self.population[:self.max_number_trees]", "def mutatePopulation(self, population):\n\t\tfor i in range(int(math.ceil(self.selectionRate * len(population)))):\n\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population[:self.populationSize]", "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "def SetPop(self, fname, var):\n\n\t\tself._pop_fname = fname\n\t\tself._pop_var = var", "def pop(self) -> Any:\n # TODO: Implement this method\n ...", "def _localSetState(self,pdict):\n self.apex = pdict.pop('apex')\n self.min = pdict.pop('min' )\n self.max = pdict.pop('max' )", "def postLoad(self):\n pass", "def create_population(self):\n stagnation = DefaultStagnation(self.config.population, self.reporters)\n self.reporters = ReporterSet()\n self.reproduction = DefaultReproduction(self.reporters, stagnation)\n \n # Create a population from scratch, then partition into species\n self.population = self.reproduction.create_new(config=self.config,\n num_genomes=self.config.population.pop_size)\n self.species = DefaultSpecies(reporters=self.reporters)\n self.species.speciate(config=self.config,\n population=self.population,\n generation=self.generation,\n logger=self.log)\n \n # Add to each of the species its elites\n self.update_species_fitness_hist()\n \n # Use 'print' to output information about the run\n reporter = StdOutReporter()\n self.add_reporter(reporter)\n \n # Save newly made population\n self.save()\n \n # Write population configuration to file\n with open(f\"population{'_backup' if self.use_backup else ''}/\"\n f\"storage/\"\n f\"{self.folder_name}/\"\n f\"{self}/\"\n f\"config.txt\", 'w') as f:\n f.write(self.config.read())", "def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')", "def set_parameters(self, **kwargs):\n kwargs.pop('population_size', None)\n super().set_parameters(population_size=1, **kwargs)\n self.candidates = None", "def _recompute_fitness(self):\n for cur in self.population:\n if cur['fitness'] is None:\n cur['fitness'] = self.op.fitness(cur['individual'])", "def set_test(self):\n self.genes_test = self.__genes.copy()\n self.__fitness_test = self.__fitness", "def populate(self):\n insert_many_on_conflict_ignore(self.settings, Player, self.rows)", "def _perturbInPlaceHard(self):\n die", "def __init__(self, _populationSize, _chromosomeClass):\n # a generation is a collection of chromosomes stored in a priority queue\n # which is ordered by fitness\n self.generation = PriorityQueue()\n # store how many chromosomes are in each generation\n self.populationSize = _populationSize\n # store a template for generating chromosomes\n self.chromosomeClass = _chromosomeClass\n # choose a random starting population\n self.randomPopulation()", "def update_original_data(self):\n pass", "def pop(self):\n pass", "def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None", "def generational_step(self, population):\n offspring = self.variation(population, self._number_offspring)\n self.evaluation(population)\n self.evaluation(offspring)\n if self._target_populations_size is None:\n new_pop_size = len(population)\n else:\n new_pop_size = self._target_populations_size\n self.update_diagnostics(population, offspring)\n return self.selection(population + offspring, new_pop_size)", "def init_population(self, size):\n print(\"Initializing population.\")\n self.population = []\n for _ in range(size):\n self.population.append(Gene(self.tactics))", "def pop():" ]
[ "0.6781482", "0.64989024", "0.6452162", "0.6377374", "0.6291111", "0.6236171", "0.61310154", "0.6073461", "0.6057978", "0.6027081", "0.59793514", "0.5884385", "0.5880151", "0.58638716", "0.58442855", "0.58037657", "0.5794639", "0.57940483", "0.5777354", "0.5754382", "0.5726092", "0.5718168", "0.56859607", "0.5638595", "0.56381685", "0.5635518", "0.56198317", "0.5614223", "0.5576082", "0.55591345", "0.5518841", "0.54909104", "0.54757875", "0.5471002", "0.54709345", "0.5446345", "0.54338956", "0.5433834", "0.54150933", "0.54150933", "0.54097587", "0.5386042", "0.53656566", "0.53472066", "0.53429747", "0.53429747", "0.5329117", "0.53273565", "0.532441", "0.5319009", "0.5310631", "0.5310631", "0.5292913", "0.5285327", "0.5285327", "0.52757293", "0.5275688", "0.52750075", "0.5273652", "0.5269429", "0.5245572", "0.523629", "0.5235646", "0.5221882", "0.5215133", "0.5212254", "0.520131", "0.52010864", "0.5198781", "0.51927394", "0.5189573", "0.5186894", "0.5184774", "0.51838756", "0.5183747", "0.5179157", "0.5174597", "0.5168589", "0.51622975", "0.5156873", "0.5150574", "0.5148962", "0.5148742", "0.5148636", "0.5144786", "0.51345456", "0.5133305", "0.5122913", "0.51185006", "0.51155543", "0.5108693", "0.51040643", "0.5103341", "0.5094687", "0.5089858", "0.50874865", "0.5086769", "0.508399", "0.5081995", "0.50727147" ]
0.52738607
58
This method is called after assignment to a populationp.
def _init_after_assignment(self): self.labels = { 'axes': [self.locus1, self.locus2, self.locus3], \ 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2), self.pop._get_axis_elements(self.locus3)] } self._init_labels(self.labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_population(self):\n pass", "def update(self):\n self.chromosome_list = self.next_population\n self.reset_mating_pool()\n self.reset_next_population()", "def on_assign(self):", "def init_pop(self):\n genes = np.random.randn( self.population_size * self.individual.gene_count )\n self.population = genes.reshape((self.population_size, -1))\n #print(self.population)", "def _populate(self):\n raise NotImplemented", "def __call__(self, population, context):\n pass", "def on_unassign(self):", "def PLP(self, *_):\n self.reg.P = self.pop()", "def populate(self):\n pass", "def reset_next_population(self):\n self.next_population = []", "def populate(self, populator):\n pass", "def populate(self):\n raise NotImplementedError", "def reinit(self):\n self.logger.info(\"Reinit called. Clear the population.\")\n self.set_init_population([], perf_name=None)\n self._gt_rollouts = []\n self._gt_scores = []", "def setPop(self, pop: Population):\n self.population = pop\n self.population_size = pop.size()", "def pick_up(self):", "def test_pop_objects(self):\r\n tape = copy.deepcopy(self.tape)\r\n tape.populate_database_objects()\r\n eq_(tape.notes[1].state, 'PA')", "def create_new_population(self):\n self.check_for_generation_cap()\n pop_container = list()\n for chromosome in self.population:\n partner = bm.select_partner(\n self.fitness_scores, self.population)\n child = bm.mutate(bm.crossover(chromosome, partner))\n pop_container.append(child)\n if self.population == pop_container:\n print(\"newly created populous is the same as the old populous\")\n self.population = pop_container\n print(\"generations: \", self.generations)\n self.generations += 1", "def evolve_population(self, **options):\n new_population = self.population.evolve(**options)\n self.population = new_population\n self.generation = new_population.generation_number", "def population(self) -> SimplePopulation:\n raise NotImplementedError(\"Subclass must implement\")", "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "def _evolve_population(self):\n # save the old population\n self.old_population = self.population.copy()\n self.old_population_errors = self.population_errors.copy()\n\n # index pointers\n rind = numpy.random.permutation(4)+1\n\n # shuffle the locations of the individuals\n ind1 = numpy.random.permutation(self.population_size)\n pop1 = self.old_population[ind1,:]\n \n # rotate for remaining indices\n rot = numpy.remainder(self.rot_ind + rind[0], self.population_size)\n ind2 = ind1[rot,:]\n pop2 = self.old_population[ind2,:]\n\n rot = numpy.remainder(self.rot_ind + rind[1], self.population_size)\n ind3 = ind2[rot,:]\n pop3 = self.old_population[ind3,:]\n\n rot = numpy.remainder(self.rot_ind + rind[2], self.population_size)\n ind4 = ind3[rot,:]\n pop4 = self.old_population[ind4,:]\n\n rot = numpy.remainder(self.rot_ind + rind[3], self.population_size)\n ind5 = ind4[rot,:]\n pop5 = self.old_population[ind5,:]\n \n # population filled with best individual\n best_population = self.best_individual[numpy.newaxis,:].repeat(self.population_size,axis=0)\n\n # figure out the crossover ind\n xold_ind = numpy.random.rand(self.population_size,self.num_params) >= \\\n self.crossover_prob\n\n # get new population based on desired strategy\n # DE/rand/1\n if self.method == DE_RAND_1:\n population = pop3 + self.scale*(pop1 - pop2)\n population_orig = pop3\n # DE/BEST/1\n if self.method == DE_BEST_1:\n population = best_population + self.scale*(pop1 - pop2)\n population_orig = best_population\n # DE/best/2\n elif self.method == DE_BEST_2:\n population = best_population + self.scale * \\\n (pop1 + pop2 - pop3 - pop4)\n population_orig = best_population\n # DE/BEST/1/JITTER\n elif self.method == DE_BEST_1_JITTER:\n population = best_population + (pop1 - pop2) * \\\n ((1.0-0.9999) * \\\n numpy.random.rand(self.population_size,self.num_params) + \\\n self.scale)\n population_orig = best_population\n # DE/LOCAL_TO_BEST/1\n elif self.method == DE_LOCAL_TO_BEST_1:\n population = self.old_population + \\\n self.scale*(best_population - self.old_population) + \\\n self.scale*(pop1 - pop2)\n population_orig = self.old_population\n \n # crossover\n population[xold_ind] = self.old_population[xold_ind]\n\n # apply the boundary constraints\n for p in xrange(self.num_params):\n # get min and max\n min_val = self.param_ranges[p][0]\n max_val = self.param_ranges[p][1]\n\n # find where exceeded max\n ind = population[:,p] > max_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = max_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-max_val)\n\n # find where below min\n ind = population[:,p] < min_val\n if ind.sum() > 0:\n # bounce back\n population[ind,p] = min_val + \\\n numpy.random.rand(ind.sum())*\\\n (population_orig[ind,p]-min_val)\n\n # set the class members\n self.population = population\n self.population_orig = population", "def populate(self, records):\n if self._population is not None:\n raise AttributeError(\"The herd is already populated.\")\n if not isinstance(records, (tuple, list)):\n raise ValueError(\"Expected a tuple or list.\")\n if isinstance(records, list):\n records = tuple(records)\n self._population = records", "def _add_population(self, population):\n self._populations.append(population)", "def setUp(self):\n N = 10\n pA = 0.5\n pB = 0.5\n qA = 1 - pA\n qB = 1 - pB\n locus_A = (['A'] * int(N * pA)) + (['a'] * int(round(N * qA)))\n locus_B = (['B'] * int(N * pB)) + (['b'] * int(round(N * qB)))\n self.pop = population.Population(N, locus_A, locus_B)", "def population(self):\n return self._population", "def reset(self, pop_stats):\n self.assigned = False\n self.move_count = 0\n self.dstrct = None\n\n self.x = pop_stats[0] #target pop for districts\n self.greatest_pop = pop_stats[1]\n self.least_pop = pop_stats[2]", "def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.p = pdict.pop('p')", "def save_population(self, t):\n # TODO ejw: convert individual level data from self.P into dataframe\n # Info of interest is self.P.I: class of each individual in the population, including:\n # ID\n # adam {bool}: from initial population\n # age {age}:\n # age_days {int}: unknown\n # birth_order {int}: guessing st born, 2nd born, etc.\n # children {list}: IDs of children.\n # deps {list}: IDs of dependents still in household, or orphans?.\n # divorced {bool}:\n # father {str}: guessing farther `ID`.\n # groups {dict}: {`household`: int} -> Household ID linked to self.P.groups['household']:\n # mother {str}: guessing mother's `ID`.\n # next_birth_age {int}: age at which next birth can occur\n # partner {int}: partner's ID, if married.\n # sex {int}: 0: male, 1: female\n # with_parents {int}: still staying with parents (mother or father should be in same household)\n # self.P.groups['households']: same as above, but group per household member's, so much easier to extract\n # from here.\n # self.P.households: history of each household, but unknown how it's linked to self.P.groups.\n\n #n_people = len(self.P.I)\n pop = self.P.I\n ids = pop.keys()\n\n # attributes to be extracted\n col_seq = ['time', 'person_id', 'household_id', 'sex', 'age', 'time_birth', 'time_die',\n 'birth_order', 'adam_eve', 'divorced', 'stay_with_parents', 'partner_id',\n 'father_id', 'mother_id', 'children_id', 'dependents_id']\n col_attr = {\n 'person_id': 'ID',\n 'age': 'age',\n 'sex': 'sex',\n 'birth_order': 'birth_order',\n 'adam_eve': 'adam',\n 'divorced': 'divorced',\n 'stay_with_parents': 'with_parents',\n 'time_birth': 'time_birth',\n 'time_die': 'time_die'\n }\n pd_dict = {}\n #other_col = [ 'time', 'household_id', 'partner_id', 'father_id', 'mother_id', 'children_id', 'dependents_id' ]\n for id_key in ids:\n person = pop[id_key]\n person_records = { k:None for k in col_seq }\n person_records.update({ k:getattr(person, v) for k,v in col_attr.items() })\n #person_records.update({ k:None for k in other_col })\n\n person_records['time'] = t\n person_records['household_id'] = person.groups['household']\n\n if not(person.partner is None):\n person_records['partner_id'] = person.partner.ID\n\n father = None\n mother = None\n if len(person.parents) == 2:\n father = person.parents[0].ID\n mother = person.parents[1].ID\n elif len(person.parents) == 1:\n father = None\n mother = person.parents[0].ID\n person_records['father_id'] = father\n person_records['mother_id'] = mother\n\n if len(person.children):\n children_ids = ';'.join([str(ind.ID) for ind in person.children])\n person_records['children_id'] = children_ids\n if len(person.deps):\n dep_ids = ';'.join([str(ind.ID) for ind in person.deps])\n person_records['dependents_id'] = dep_ids\n\n pd_dict[id_key] = person_records\n pop_pd = pd.DataFrame.from_dict(pd_dict, orient='index')\n\n #pop_pd = pop_pd[col_seq] # to ensure the column sequence\n self.pop_pd.append(pop_pd)", "def prepare(self, popSize, evaluate):\n for i in range(popSize):\n member = Member(self.memberLength, self.lowerDomain, self.upperDomain, self.memberDataType)\n member.loss = evaluate(member.rep)\n self.population.append(member)\n if (self.eliteLoss is None) or (self.eliteLoss > member.loss):\n self.eliteLoss = member.loss\n self.eliteIndex = i\n elif (self.diversityLoss is None) or (self.diversityLoss < member.loss):\n self.diversityLoss = member.loss\n self.diversityIndex = i", "def next_generation(self, population):\n pass", "def __call__(self, time: float):\n # Double loop over the whole population, clearing places\n # of the variable population and refilling them.\n\n # Can call a this line if being called in from file etc.\n params = Parameters.instance().place_params\n for cell in self._population.cells:\n for place in cell.places:\n param_ind = place.place_type.value - 1\n if param_ind < len(params[\"mean_size\"]):\n # Checks whether values are present, otherwise uses\n # defaults\n mean_cap = params[\"mean_size\"][param_ind]\n if place.place_type.name == \"Workplace\":\n # Variable population is people not in the fixed pop.\n # Held in the last group of the place.\n # Changed at each timestep\n group_ind = list(place.person_groups.keys())[-1]\n place.empty_place(groups_to_empty=[group_ind])\n person_list = [person for person in place.cell.persons\n if person not in place.persons]\n self.update_place_group(place, group_index=group_ind,\n mean_capacity=mean_cap,\n person_list=person_list.copy())\n\n elif place.place_type.name == \"OutdoorSpace\":\n place.empty_place()\n self.update_place_group(place)", "def _set_propreties(self):\n pass", "def default_replacement(random, population, parents, offspring, args):\r\n return population", "def pop(self):\n assert self.local_variables.parent is not None\n self.local_variables = self.local_variables.parent\n assert self.local_types.parent is not None\n self.local_types = self.local_types.parent", "def getPopulation(self):\n\n return self.p", "def set_population_dependent_flags(self, *args):\n # check if self.food <= self.population\n # change self.hungry\n # change if population can be added (6)\n if self.population.get() == Species.MAX_POP:\n # turn off add population button\n pass", "def testExceedingSetter(self):\n _1 = [ (self.kl[0], 3),\n (self.kl[1], 3),\n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 2, \"got {}\".format(_2))\n _expect = set([(self.kl[1], 4), (self.kl[0], 6), ])\n self.assertEqual(_2, _expect, \"something odd\")", "def testPartialAndIncorrectSetter(self):\n _1 = [ (self.kl[0], 1), \n (self.kl[1], 1), \n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 3, \"got {}\".format(_2))\n _expect = set([(self.kl[0], 2),\n (self.kl[1], 2),\n (getattr(tp, \"RandConso\"), 6)])\n self.assertEqual(_2, _expect, \"something odd\")", "def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num", "def rePop(self):\n nScan = 0\n pScan = self.num-1\n while not self.isFull():\n while self.genepool[0][pScan] == None:\n pScan = (pScan-1)%self.num\n while self.genepool[0][nScan] != None:\n nScan = (nScan+1)%self.num\n self.genepool[0][nScan] = self.genepool[0][pScan].mutate()\n self.genepool[1][nScan] = self.genepool[1][pScan].mutate()\n nScan = (nScan+1)%self.num\n pScan = (pScan-1)%self.num", "def finalise(self):", "def tearDown(self):\n del self.pop", "def add_population(self):\n # First, check if less than max size\n if self.population.get() >= Species.MAX_POP:\n raise PopulationOverflowException(\"Cannot add more than 6 population\") \n self.population.set(self.population.get()+1)\n self.player.request_payment(\"discard\")", "def select(self, m, population):\n pass", "def pop(self):", "def pop(self):", "def save_population(self, generation):\r\n pop_checkpoint = {\r\n 'populations' : {name : np.stack(pop.population) for name, pop in self.populations.items()},\r\n 'generation' : generation,\r\n 'mutation_prob' : {name : pop.mutation_prob for name, pop in self.populations.items()},\r\n 'evolution_hist' : self.evolution_history,\r\n 'mu' : {name : pop.strategy_m for name, pop in self.populations.items()},\r\n 'C' :{name : pop.strategy_C for name, pop in self.populations.items()},\r\n 'cc' :{name : pop.cc for name, pop in self.populations.items()},\r\n 'cs' :{name : pop.cs for name, pop in self.populations.items()},\r\n 'c_cov' :{name : pop.c_cov for name, pop in self.populations.items()},\r\n 'mu_cov':{name : pop.mu_cov for name, pop in self.populations.items()},\r\n 'ds':{name : pop.ds for name, pop in self.populations.items()},\r\n 'evo_path':{name : pop.evo_path for name, pop in self.populations.items()},\r\n 'ps':{name : pop.ps for name, pop in self.populations.items()},\r\n 'B':{name : pop.B for name, pop in self.populations.items()},\r\n 'Bt' :{name : pop.Bt for name, pop in self.populations.items()},\r\n 'D' : {name : pop.D for name, pop in self.populations.items()},\r\n 'sigma' : {name : pop.sigma for name, pop in self.populations.items()},\r\n 'num_evals' :{name : pop.num_evals for name, pop in self.populations.items()},\r\n }\r\n file_name = 'spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name\r\n save_pickle(pop_checkpoint, file_name)\r\n logging.info('Successfully saved evolution checkpoint.')", "def pop_write(self):\n ...", "def populate(cls):\n raise NotImplementedError", "def pop(self):\n pass", "def pop(self):\n pass", "def _add_population(self, info, index, population):\n info[index] = [self._representation.decode(item) for\n item in population]", "def _localSetState(self,pdict):\n self.p = pdict.pop('p')", "def _localSetState(self,pdict):\n self.p = pdict.pop('p')", "def _generate_population(self) -> None:\n self._population = list()\n blank_img_ext = ExtendedImage.create_empty_image(width=self._orig_img.get_width(),\n height=self._orig_img.get_height())\n initial_fitval = self._fit_test(blank_img_ext)\n\n for i in range(self._pop_size):\n # Each chromosome is an empty black image\n blank_img_ext = ExtendedImage.create_empty_image(width=self._orig_img.get_width(),\n height=self._orig_img.get_height())\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((blank_img_ext, initial_fitval))", "def step(self):\n y = np.random.rand(self.p.lambda_, self.p.d).T\n x = self.p.m.reshape(-1, 1) * y\n f = np.array(list(map(sum, x)))\n self.p.used_budget += self.p.lambda_\n self.p.population = Population(x, y, f)\n self.p.m_old = self.p.m.copy()\n self.p.m *= np.linalg.norm(y, axis=1).reshape(-1, 1)\n self.p.adapt()\n self.p.old_population = self.p.population.copy()", "def _read_pops(self, sheet) -> None:\n\n # TODO - can modify _read_pops() and _write_pops() if there are more population attributes\n tables = read_tables(sheet)[0]\n assert len(tables) == 1, \"Population Definitions page should only contain one table\"\n\n self.pops = sc.odict()\n assert cell_get_string(tables[0][0][0]).lower() == \"abbreviation\"\n assert cell_get_string(tables[0][0][1]).lower() == \"full name\"\n\n # If pop typ column exists, check the heading is correct\n if len(tables[0][0]) > 2:\n assert cell_get_string(tables[0][0][2]).lower() == \"population type\"\n\n for row in tables[0][1:]:\n\n pop_name = cell_get_string(row[0])\n assert len(pop_name) > 1, 'Population code name (abbreviation) \"%s\" is not valid - it must be at least two characters long' % (pop_name)\n\n label = cell_get_string(row[1])\n assert len(label) > 1, 'Population full name \"%s\" is not valid - it must be at least two characters long' % (label)\n\n if pop_name.lower() in FS.RESERVED_KEYWORDS:\n raise Exception('Population name \"%s\" is a reserved keyword' % (pop_name.lower()))\n\n poptype = None\n if len(row) > 2 and row[2].value is not None:\n poptype = cell_get_string(row[2])\n\n self.pops[pop_name] = {\"label\": label, \"type\": poptype}", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus1, self.locus2], \\\n 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2)] }\n self._init_labels(self.labels)", "def new_population(self, population_p, population_r):\n population_choice_method = 2\n\n population = []\n for i in range(self.population_length):\n population.append(population_p[i])\n population.append(population_r[i])\n\n population = self.sort_population(population)\n\n if population_choice_method == 1: # mi najlepszych osobnikow\n population = [population[x] for x in range(0, self.population_length)]\n\n else: # metoda ruletki\n tmp_population = []\n population_size = len(population)\n for i in range(self.elite): # Zachowanie elitarnych osobnikow\n tmp_population.append(population[i])\n del population[i]\n population_size = population_size - 1\n\n tmp = [(1.0 / self.fitness(x), x) for x in population]\n tmp.reverse()\n error_sum = 0\n for i in range(len(tmp)):\n error_sum = error_sum + tmp[i][0]\n tmp = [(x[0] / error_sum, x[1]) for x in tmp]\n\n sum = 0\n roulette = []\n for i in range(len(tmp)):\n sum = sum + tmp[i][0]\n roulette.append([sum, tmp[i][1]])\n\n roulette = [(x[0] * 10000, x[1]) for x in roulette]\n\n for i in range(self.population_length - self.elite):\n rand = -1\n item = self.random_chromosome()\n while rand != -1 and item in tmp_population:\n rand = random.randint(0, 9999)\n item = next(x[1] for x in roulette if rand <= x[0])\n tmp_population.append(item)\n\n population = tmp_population\n\n return population", "def updateMatingPool(self):\n self.matingPool = []\n for ind_i in self.population:\n self.matingPool.append( ind_i.copy() )", "def default_replacement(random, population, parents, offspring, args):\n return population", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])", "def _localSetState(self,pdict):\n self.n = pdict.pop('n')\n self.p = pdict.pop('p')", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus], \\\n 'elements': [self.pop._get_axis_elements(self.locus)] }\n self._init_labels(self.labels)", "def breed(self): \n while (len(self.population) <= self.pop_size):\n orga = random.choice(self.population)\n orgb = random.choice(self.population) # Asexualism works too :-p\n self.population.append(orga.breed(orgb)) # Add a new organism", "def add_population(self, population):\n for species in population:\n y, x = [n - 1 for n in species['loc']]\n for ani in species['pop']:\n if ani['species'] == 'Herbivore':\n self.island.island[y][x].herbivores.append(Herbivore(\n weight=ani['weight'], age=ani['age'],\n coordinates=(y, x)))\n elif ani['species'] == 'Carnivore':\n self.island.island[y][x].carnivores.append(Carnivore(\n weight=ani['weight'], age=ani['age'],\n coordinates=(y, x)))", "def init_population(self):\n for idx in xrange(0, self.population_size):\n individual = self.individual_factory.create()\n self.population.append(individual)\n\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n\n # In order to roulette wheel selection work with negative values, \n # we sum all fitness values to the absolute value of the most negative plus one\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n #print self.population_fitness.min()\n #print self.population_fitness\n #print self.normalized_fitness", "def sortPopulation(self):\n self.population = sorted(self.population, key=attrgetter('fitness'), reverse=True)", "def pop_gauges(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def load_population(self):\r\n checkpoint = load_pickle('spike_swarm_sim/checkpoints/populations/' + self.checkpoint_name)\r\n logging.info('Resuming CMA-ES evolution using checkpoint ' + self.checkpoint_name)\r\n key = tuple(self.populations.keys())[0]\r\n for key, pop in checkpoint['populations'].items():\r\n self.populations[key].strategy_m = checkpoint['mu'][key]\r\n self.populations[key].strategy_C = checkpoint['C'][key]\r\n self.populations[key].cc = checkpoint['cc'][key]\r\n self.populations[key].cs = checkpoint['cs'][key]\r\n self.populations[key].mu_cov = checkpoint['mu_cov'][key]\r\n self.populations[key].c_cov = checkpoint['c_cov'][key]\r\n self.populations[key].ds = checkpoint['ds'][key]\r\n self.populations[key].evo_path = checkpoint['evo_path'][key]\r\n self.populations[key].ps = checkpoint['ps'][key]\r\n self.populations[key].B = checkpoint['B'][key]\r\n self.populations[key].Bt = checkpoint['Bt'][key]\r\n self.populations[key].D = checkpoint['D'][key]\r\n self.populations[key].sigma = checkpoint['sigma'][key]\r\n self.populations[key].num_evals = checkpoint['num_evals'][key]\r\n self.populations[key].population = self.populations[key].sample()\r\n self.init_generation = checkpoint['generation']\r\n self.evolution_history = checkpoint['evolution_hist']", "def _reload_values(self):\r\n raise NotImplementedError", "def popitem(self):\n pass", "def final(self):\n pass", "def apply(self, population_current, population_offspring):\n population_current[population_current.worst_index] = population_offspring[0]\n return population_current", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')", "def initialize_population(self):\n for i in range(GAConfig[\"initial_population_size\"]):\n new_chromosome = Chromosome(GAConfig[\"num_categories\"])\n for gene in self.phones:\n random_category = randint(0, GAConfig[\"num_categories\"] - 1)\n new_chromosome.insert_into_category(random_category, gene)\n #need to make sure that the chromosome has all categories fixed here.\n\n #adds the restrictions to the categories\n if(GAConfig[\"category_restriction\"] == \"True\"):\n new_chromosome = self.space_chrom(new_chromosome)\n\n self.population.append(new_chromosome)\n\n self.population = self.computeFitness.compute(self.population)\n self._sort()", "def post_init(self):\n\t\tpass", "def __iadd__(self, population):\n self.chromosome_list += (to_chromosome(chromosome) for chromosome in population)", "def truncate(self):\n\n self.population = self.population[:self.max_number_trees]", "def mutatePopulation(self, population):\n\t\tfor i in range(int(math.ceil(self.selectionRate * len(population)))):\n\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(population[i])\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population[:self.populationSize]", "def pop(self) -> Any:\n # TODO: Implement this method\n ...", "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "def SetPop(self, fname, var):\n\n\t\tself._pop_fname = fname\n\t\tself._pop_var = var", "def _localSetState(self,pdict):\n self.apex = pdict.pop('apex')\n self.min = pdict.pop('min' )\n self.max = pdict.pop('max' )", "def postLoad(self):\n pass", "def create_population(self):\n stagnation = DefaultStagnation(self.config.population, self.reporters)\n self.reporters = ReporterSet()\n self.reproduction = DefaultReproduction(self.reporters, stagnation)\n \n # Create a population from scratch, then partition into species\n self.population = self.reproduction.create_new(config=self.config,\n num_genomes=self.config.population.pop_size)\n self.species = DefaultSpecies(reporters=self.reporters)\n self.species.speciate(config=self.config,\n population=self.population,\n generation=self.generation,\n logger=self.log)\n \n # Add to each of the species its elites\n self.update_species_fitness_hist()\n \n # Use 'print' to output information about the run\n reporter = StdOutReporter()\n self.add_reporter(reporter)\n \n # Save newly made population\n self.save()\n \n # Write population configuration to file\n with open(f\"population{'_backup' if self.use_backup else ''}/\"\n f\"storage/\"\n f\"{self.folder_name}/\"\n f\"{self}/\"\n f\"config.txt\", 'w') as f:\n f.write(self.config.read())", "def _localSetState(self,pdict):\n self.mapping = pdict.pop('mapping')\n self.values = pdict.pop('values')", "def set_parameters(self, **kwargs):\n kwargs.pop('population_size', None)\n super().set_parameters(population_size=1, **kwargs)\n self.candidates = None", "def _recompute_fitness(self):\n for cur in self.population:\n if cur['fitness'] is None:\n cur['fitness'] = self.op.fitness(cur['individual'])", "def set_test(self):\n self.genes_test = self.__genes.copy()\n self.__fitness_test = self.__fitness", "def _perturbInPlaceHard(self):\n die", "def populate(self):\n insert_many_on_conflict_ignore(self.settings, Player, self.rows)", "def __init__(self, _populationSize, _chromosomeClass):\n # a generation is a collection of chromosomes stored in a priority queue\n # which is ordered by fitness\n self.generation = PriorityQueue()\n # store how many chromosomes are in each generation\n self.populationSize = _populationSize\n # store a template for generating chromosomes\n self.chromosomeClass = _chromosomeClass\n # choose a random starting population\n self.randomPopulation()", "def update_original_data(self):\n pass", "def pop(self):\n pass", "def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None", "def generational_step(self, population):\n offspring = self.variation(population, self._number_offspring)\n self.evaluation(population)\n self.evaluation(offspring)\n if self._target_populations_size is None:\n new_pop_size = len(population)\n else:\n new_pop_size = self._target_populations_size\n self.update_diagnostics(population, offspring)\n return self.selection(population + offspring, new_pop_size)", "def init_population(self, size):\n print(\"Initializing population.\")\n self.population = []\n for _ in range(size):\n self.population.append(Gene(self.tactics))", "def pop():" ]
[ "0.6781537", "0.649858", "0.6452947", "0.6377448", "0.6291218", "0.62366885", "0.6131744", "0.60734284", "0.6058311", "0.6027393", "0.59784764", "0.5884509", "0.58802176", "0.58639985", "0.5845675", "0.58053315", "0.57949525", "0.57939976", "0.57769996", "0.5754858", "0.5725988", "0.5718096", "0.56870854", "0.5638378", "0.5637603", "0.56348294", "0.56207806", "0.56143945", "0.5575862", "0.55592036", "0.55186325", "0.549242", "0.5475194", "0.54707193", "0.54698217", "0.5446932", "0.5433762", "0.54330844", "0.541435", "0.541435", "0.54123527", "0.5387804", "0.536671", "0.534708", "0.5344157", "0.5344157", "0.53276885", "0.532526", "0.53190655", "0.5311635", "0.5311635", "0.52934116", "0.52860606", "0.52860606", "0.5275158", "0.5275103", "0.52746385", "0.5274005", "0.5273656", "0.5269609", "0.5244894", "0.52363443", "0.5235917", "0.52221525", "0.5215507", "0.5213675", "0.5201157", "0.5200943", "0.5199032", "0.5193488", "0.5190004", "0.51879984", "0.5186011", "0.5184361", "0.51829267", "0.5178624", "0.51747537", "0.5171135", "0.51623166", "0.5156337", "0.5150137", "0.5149625", "0.51490796", "0.5148715", "0.5145078", "0.51367104", "0.5133559", "0.51234114", "0.5118745", "0.51149446", "0.5108639", "0.5104001", "0.5103844", "0.5094514", "0.50905645", "0.50885385", "0.5087535", "0.5083311", "0.5082411", "0.5073945" ]
0.5329292
46
Build weight for the inheritance of nuclear background (a.k.a species). male | A B H + A | A H A/2,H/2 female B | H B B/2,H/2 H | A/2,H/2 B/2,H/2 A/4,B/4,H/2
def _build(self): ary = np.zeros( (3,3,3), float ) ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1. ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5 ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5 ary[2,2,0] = ary[2,2,1] = 0.25 ary[2,2,2] = 0.5 return ary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weight(self):", "def _init_inherit_physical_attributes(self):\n config = self.person.cosmos.config\n mother, father = self.person.biological_mother, self.person.biological_father\n parents = (mother.body, father.body)\n # Handedness\n if random.random() < config.heritability_of_handedness:\n takes_after = random.choice(parents)\n self.left_handed = Feature(value=takes_after.left_handed, inherited_from=takes_after)\n self.right_handed = Feature(value=takes_after.right_handed, inherited_from=takes_after)\n # Hustle\n if random.random() < config.heritability_of_hustle:\n takes_after = random.choice(parents)\n inherited_hustle = takes_after.hustle\n mutated_hustle = normal(inherited_hustle, config.hustle_mutation_sd)\n self.hustle = Feature(value=mutated_hustle, inherited_from=takes_after)\n else:\n pass # TODO SET UP GENERATING FROM NOTHING", "def animal_weights(self):\n herb_weights = []\n carn_weights = []\n for cell in self.land_cells.values():\n for herb in cell.herbivores:\n herb_weights.append(herb.weight)\n for carn in cell.carnivores:\n carn_weights.append(carn.weight)\n\n if not herb_weights:\n return [carn_weights]\n elif not carn_weights:\n return [herb_weights]\n else:\n return [herb_weights, carn_weights]", "def weighted_bright():\n return np.random.choice(\n [17, 18, 19, 20, 21] * 1 +\n [22, 23, 24, 25, 26] * 2 +\n [27, 28, 29, 30, 31] * 3\n )", "def __deriveElementalWeightsByNaturalNuclideAbundances():\n for element in elements.byName.values():\n numer = 0.0\n denom = 0.0\n for nb in element.getNaturalIsotopics():\n numer += nb.weight * nb.abundance\n denom += nb.abundance\n\n if denom:\n element.standardWeight = numer / denom", "def weight(self) -> Mass:\n return Mass(0.0)", "def update_animal_weight_age(self):\n for species in self.fauna_list:\n for animal in self.fauna_list[species]:\n animal.animal_grows()", "def calc_carbon_herb(height, diameter = 1, age = 1):\n \"\"\"This includes habits: perennial, annual, bulb, climber, biennial\\\n annual/biennial, perennial climber, annual/perennial, corm, annual climber\"\"\"\n \n #convert to imperial\n height /= 3.281 #feet\n diameter /= 2.54 #inches\n \n #print(height, diameter)\n \n #calculate green weight of herb: (above-ground weight) * 1.2\n green_weight = ( diameter**2 * height) * 1.2\n \n #dry weight: average tree is 72.5 dry matter \n dry_weight = 0.725 * green_weight\n \n #weight of carbon: 50% of tree dry weight\n c_weight = 0.5 * dry_weight\n \n #weight of CO2 sequestered\n co2_weight = 3.67 * c_weight\n \n return co2_weight/2.205/1 #convert from lbs to kg, divide by age", "def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)", "def create_model():\n # Get list of all syllables: [\"<s>\", \"AH\", \"</s>\", \"<s>\", \"T\", ...]\n syllabifier = Syllabifier()\n all_syllables = syllabifier.all_syllables()\n\n # Count conditional probabilties of phoneme tuples\n tcf = TrigramCollocationFinder.from_words(all_syllables)\n bcf = BigramCollocationFinder.from_words(all_syllables)\n tri_dict = dict(sorted(tcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n bi_dict = dict(sorted(bcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n\n # Create dictionary to count cond prob all phoneme tuples\n accepted_phonemes = [i[0] for i in cmudict.phones()]\n accepted_phonemes.append('<s>')\n accepted_phonemes.append('</s>')\n phoneme_tups = [p for p in itertools.product(accepted_phonemes, repeat=3)]\n cond_probs_dict = dict([(char, 0) for char in phoneme_tups])\n\n for t in tri_dict:\n p1, p2, p3 = t[0], t[1], t[2]\n tri_count = tri_dict[t]\n bi_count = bi_dict[(p1, p2)]\n if bi_count > 1:\n cond_prob = tri_count * 1.0 / bi_count\n else:\n cond_prob = 0.0\n cond_probs_dict[(p1, p2, p3)] = cond_prob\n\n pickle.dump(cond_probs_dict, open(COND_PROBS_PATH, \"wb\"))\n return", "def grains(self):\n grain_weight = self.mass * kilograms_to_grains\n return grain_weight", "def weight(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"weighting_value\"):\n self.load_weighting_data()\n self.weighting_calculation()", "def _init_generate_physical_attributes(self):\n # Prepare these now, for speedier access\n config = self.person.cosmos.config\n year = self.person.cosmos.year\n male = self.person.male\n # Determine age of physical peak, i.e., baseball prime\n self.age_of_physical_peak = config.determine_age_of_physical_peak()\n # Determine handedness\n self.lefty = True if random.random() < config.chance_of_being_left_handed else False\n self.righty = not self.lefty\n self.left_handed = 1.0 if self.lefty else 0.0\n self.right_handed = 1.0 if self.righty else 0.0\n # Determine hustle\n self.hustle = config.determine_hustle()\n # Determine adult height this person will attain, in inches\n if male:\n self.adult_height = normal(\n config.adult_male_height_mean(year=year), config.adult_male_height_sd(year=year)\n )\n else:\n self.adult_height = normal(\n config.adult_female_height_mean(year=year), config.adult_female_height_sd(year=year)\n )\n # Determine this person's BMI TODO BMI INCREASES AS ADULTHOOD PROGRESSES\n if male:\n self.bmi = normal(\n config.young_adult_male_bmi_mean(year=year), config.young_adult_male_bmi_sd(year=year)\n )\n else:\n self.bmi = normal(\n config.young_adult_female_bmi_mean(year=year), config.young_adult_female_bmi_sd(year=year)\n )\n # Determine propensities for coordination, reflexes, agility, jumping...\n self.coordination_propensity = config.determine_coordination_propensity()\n self.reflexes_propensity = config.determine_reflexes_propensity(\n coordination_propensity=self.coordination_propensity\n )\n self.agility_propensity = config.determine_agility_propensity()\n self.jumping_propensity = config.determine_jumping_propensity() # Number of inches added/subtracted to base\n # ...and finally footspeed propensity, which is a bit more convoluted to compute\n primitive_coordination = config.determine_primitive_coordination(bmi=self.bmi) if self.bmi > 24 else 1.0\n adult_coordination = primitive_coordination * self.coordination_propensity\n primitive_footspeed = config.determine_primitive_footspeed(\n coordination=adult_coordination, height=self.adult_height\n )\n self.footspeed_propensity = config.determine_footspeed_propensity(primitive_footspeed=primitive_footspeed)\n # Finally, fit these potentials to the person's current age\n self.develop()", "def hebb_rule(dados):\n # Passo 0: Inicializar todos os pesos\n n = len(dados[0][0]) - 1\n weight = zeros(n + 1)\n print(weight)\n\n # Passo 1: Para cada vetor de treinamento na entrada e par de objetivos na saída (e : s)\n for _, dado in enumerate(dados):\n # Passo 2: Ajuste as ativações para as unidades de entrada\n x = dado[0]\n # Passo 3: Ajuste a ativação para a unidade de saída\n y = dado[1]\n # Passo 4: Ajuste os pesos e o bias\n for j in range(n):\n weight[j] += x[j] * y\n weight[n] += + y # Bias é weight[n]\n print(weight)", "def bmi(weight, height):\n return weight / height ** 2", "def __init__(\n self,\n height=20,\n width=20,\n initial_sheep=100,\n initial_wolves=50,\n sheep_reproduce=0.04,\n wolf_reproduce=0.05,\n wolf_gain_from_food=20,\n grass=False,\n grass_regrowth_time=30,\n sheep_gain_from_food=4,\n trees_carrots_ratio=0.5,\n YEAR=20,\n nb_of_hunters=0,\n ):\n super().__init__()\n # Set parameters\n self.height = height\n self.width = width\n self.initial_sheep = initial_sheep\n self.initial_wolves = initial_wolves\n self.sheep_reproduce = sheep_reproduce\n self.wolf_reproduce = wolf_reproduce\n self.wolf_gain_from_food = wolf_gain_from_food\n self.grass = grass\n self.grass_regrowth_time = grass_regrowth_time\n self.sheep_gain_from_food = sheep_gain_from_food\n self.trees_carrots_ratio = trees_carrots_ratio\n self.YEAR = YEAR\n self.nb_of_hunters = nb_of_hunters\n\n self.schedule = RandomActivationByBreed(self) # classe contenant un dictionnaire des types d'agents et agents existants par type, avec une ordre d'activation possible\n self.grid = MultiGrid(self.height, self.width, torus=True)\n self.datacollector = DataCollector(\n {\n \"Fox\": lambda m: m.schedule.get_breed_count(Predator),\n \"Rabbit\": lambda m: m.schedule.get_breed_count(Prey),\n }\n )\n\n # Create sheep:\n for i in range(self.initial_sheep):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n age = self.random.randrange(3*self.YEAR)\n energy = self.random.randrange( int(self.sheep_gain_from_food/2), 2 * self.sheep_gain_from_food)\n sheep = Prey(self.next_id(), (x, y), self, True, energy, age)\n self.grid.place_agent(sheep, (x, y))\n self.schedule.add(sheep)\n\n # Create wolves\n for i in range(self.initial_wolves):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n age = self.random.randrange(4*self.YEAR)\n energy = self.random.randrange(int(self.wolf_gain_from_food/2), 2 * self.wolf_gain_from_food)\n wolf = Predator(self.next_id(), (x, y), self, True, energy, age)\n self.grid.place_agent(wolf, (x, y))\n self.schedule.add(wolf)\n\n # Create grass patches\n if self.grass:\n for agent, x, y in self.grid.coord_iter():\n if self.trees_carrots_ratio < self.random.random(): # aléatoire du nombre d'arbres et de carottes\n fully_grown = self.random.choice([True, False])\n if fully_grown: # carottes ou pousses de carotes\n countdown = self.grass_regrowth_time\n else:\n countdown = self.random.randrange(self.grass_regrowth_time)\n plant = Plant(self.next_id(), (x, y), self, fully_grown, countdown)\n else:\n plant = Tree(self.next_id(), (x, y), self)\n self.grid.place_agent(plant, (x, y))\n self.schedule.add(plant)\n\n # create hunters\n for i in range(self.nb_of_hunters):\n x = self.random.randrange(self.width-13, self.width-7) # HUNTERMODIF\n y = self.random.randrange(self.height-13, self.height-7) # HUNTERMODIF\n hunter = Hunter(self.next_id(), (x, y), self)\n self.grid.place_agent(hunter, (x, y))\n self.schedule.add(hunter)\n\n self.running = True\n self.datacollector.collect(self)", "def get_weights(self):", "def weightedBoldness(self):\n\n\t\treturn sum([blend.varietal.boldness * blend.portion / 100.0 for blend in self.blends])", "def brain_weight_oz(self):\r\n return Heart.heart_weight_oz(self) # Used method from Heart Class\r", "def calculate_prep_weight(weight, size):\n r = find_recovery_on_size(size)\n return weight / r", "def get_weight_class(weight):\n\n if(weight >= 3500):\n return 5\n elif(weight >= 3000 and weight < 3500):\n return 4\n elif(weight >= 2500 and weight < 3000):\n return 3\n elif(weight >= 2000 and weight < 2500):\n return 2\n else:\n return 1", "def guess_molecular_weight(self, i_seq):\n map_values = self._map_values.get(\"mFo\", None)\n if (map_values is None) : return None\n height = map_values[i_seq]\n mean_carbon = flex.mean(self.carbon_fo_values)\n assert (mean_carbon > 0)\n return 6 * height / mean_carbon", "def _build_multinomial_weights(self) -> None:\n weights_obs = ramp_up_weights(\n len(self.obs), self.tpe.full_weight_num, self.tpe.equal_weight\n )\n counts_obs = numpy.bincount(\n self.obs, minlength=len(self.choices), weights=weights_obs\n )\n counts_obs = counts_obs + self.tpe.prior_weight\n self.weights = counts_obs / counts_obs.sum()", "def metis(W, levels, rid=None):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34\n\n N, N = W.shape\n if rid is None:\n rid = np.random.permutation(range(N))\n parents = []\n degree = W.sum(axis=0) - W.diagonal()\n graphs = []\n graphs.append(W)\n #supernode_size = np.ones(N)\n #nd_sz = [supernode_size]\n #count = 0\n\n #while N > maxsize:\n for _ in range(levels):\n\n #count += 1\n\n # CHOOSE THE WEIGHTS FOR THE PAIRING\n # weights = ones(N,1) # metis weights\n weights = degree # graclus weights\n # weights = supernode_size # other possibility\n weights = np.array(weights).squeeze()\n\n # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR\n idx_row, idx_col, val = scipy.sparse.find(W)\n perm = np.argsort(idx_row)\n rr = idx_row[perm]\n cc = idx_col[perm]\n vv = val[perm]\n cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered\n parents.append(cluster_id)\n\n # TO DO\n # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE \n #supernode_size = full( sparse(cluster_id, ones(N,1) ,\n #\tsupernode_size ) )\n #print(cluster_id)\n #print(supernode_size)\n #nd_sz{count+1}=supernode_size;\n\n # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH\n nrr = cluster_id[rr]\n ncc = cluster_id[cc]\n nvv = vv\n Nnew = cluster_id.max() + 1\n # CSR is more appropriate: row,val pairs appear multiple times\n W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))\n W.eliminate_zeros()\n # Add new graph to the list of all coarsened graphs\n graphs.append(W)\n N, N = W.shape\n\n # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)\n degree = W.sum(axis=0)\n #degree = W.sum(axis=0) - W.diagonal()\n\n # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS\n #[~, rid]=sort(ss); # arthur strategy\n #[~, rid]=sort(supernode_size); # thomas strategy\n #rid=randperm(N); # metis/graclus strategy\n ss = np.array(W.sum(axis=0)).squeeze()\n rid = np.argsort(ss)\n\n return graphs, parents", "def generateSuit(self):\n\n dna = self.style\n self.headParts = []\n \n # most heads do not need different poly color or texture\n self.headColor = None\n self.headTexture = None\n\n # For suit death animation\n self.loseActor = None\n\n # Have we become a skelecog?\n self.isSkeleton = 0\n \n # Suit heights have been determined empirically; see\n # RoguesGallery.py or the magic word ~rogues.\n\n # corporate dept\n if (dna.name == 'f'):\n # flunky\n self.scale = 4.0/cSize\n self.handColor = SuitDNA.corpPolyColor\n self.generateBody()\n # this suit has two head parts\n self.generateHead(\"flunky\")\n self.generateHead(\"glasses\") \n self.setHeight(4.88)\n elif (dna.name == 'p'):\n # pencil pusher\n self.scale = 3.35/bSize\n self.handColor = SuitDNA.corpPolyColor\n self.generateBody()\n self.generateHead(\"pencilpusher\")\n self.setHeight(5.00)\n elif (dna.name == 'ym'):\n # yes man\n self.scale = 4.125/aSize\n self.handColor = SuitDNA.corpPolyColor \n self.generateBody()\n self.generateHead(\"yesman\")\n self.setHeight(5.28)\n elif (dna.name == 'mm'):\n # micromanager\n self.scale = 2.5/cSize\n self.handColor = SuitDNA.corpPolyColor \n self.generateBody()\n self.generateHead(\"micromanager\")\n self.setHeight(3.25)\n elif (dna.name == 'ds'):\n # downsizer - DEFAULT\n self.scale = 4.5/bSize\n self.handColor = SuitDNA.corpPolyColor \n self.generateBody()\n self.generateHead(\"beancounter\")\n self.setHeight(6.08)\n elif (dna.name == 'hh'):\n # head hunter\n self.scale = 6.5/aSize\n self.handColor = SuitDNA.corpPolyColor \n self.generateBody()\n self.generateHead(\"headhunter\")\n self.setHeight(7.45)\n elif (dna.name == 'cr'):\n # corporate raider\n self.scale = 6.75/cSize\n self.handColor = VBase4(0.85, 0.55, 0.55, 1.0) \n self.generateBody()\n self.headTexture = \"corporate-raider.jpg\"\n self.generateHead(\"flunky\")\n self.setHeight(8.23)\n elif (dna.name == 'tbc'):\n # the big cheese\n self.scale = 7.0/aSize\n self.handColor = VBase4(0.75, 0.95, 0.75, 1.0)\n self.generateBody()\n self.generateHead(\"bigcheese\")\n self.setHeight(9.34)\n \n # legal dept\n elif (dna.name == 'bf'):\n # bottom feeder\n self.scale = 4.0/cSize\n self.handColor = SuitDNA.legalPolyColor \n self.generateBody()\n self.headTexture = \"bottom-feeder.jpg\"\n self.generateHead(\"tightwad\")\n self.setHeight(4.81)\n elif (dna.name == 'b'):\n # blood sucker\n self.scale = 4.375/bSize\n self.handColor = VBase4(0.95, 0.95, 1.0, 1.0)\n self.generateBody()\n self.headTexture = \"blood-sucker.jpg\" \n self.generateHead(\"movershaker\")\n self.setHeight(6.17)\n elif (dna.name == 'dt'):\n # double talker\n self.scale = 4.25/aSize\n self.handColor = SuitDNA.legalPolyColor \n self.generateBody()\n self.headTexture = \"double-talker.jpg\" \n self.generateHead(\"twoface\")\n self.setHeight(5.63)\n elif (dna.name == 'ac'):\n # ambulance chaser\n self.scale = 4.35/bSize\n self.handColor = SuitDNA.legalPolyColor \n self.generateBody()\n self.generateHead(\"ambulancechaser\")\n self.setHeight(6.39)\n elif (dna.name == 'bs'):\n # back stabber\n self.scale = 4.5/aSize\n self.handColor = SuitDNA.legalPolyColor \n self.generateBody()\n self.generateHead(\"backstabber\")\n self.setHeight(6.71)\n elif (dna.name == 'sd'):\n # spin doctor\n self.scale = 5.65/bSize\n self.handColor = VBase4(0.5, 0.8, 0.75, 1.0) \n self.generateBody()\n self.headTexture = \"spin-doctor.jpg\" \n self.generateHead(\"telemarketer\")\n self.setHeight(7.90)\n elif (dna.name == 'le'):\n # legal eagle\n self.scale = 7.125/aSize\n self.handColor = VBase4(0.25, 0.25, 0.5, 1.0) \n self.generateBody()\n self.generateHead(\"legaleagle\")\n self.setHeight(8.27)\n elif (dna.name == 'bw'):\n # bigwig\n self.scale = 7.0/aSize\n self.handColor = SuitDNA.legalPolyColor\n self.generateBody()\n self.generateHead(\"bigwig\")\n self.setHeight(8.69)\n \n # money dept\n elif (dna.name == 'sc'):\n # short changer\n self.scale = 3.6/cSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.generateHead(\"coldcaller\")\n self.setHeight(4.77)\n elif (dna.name == 'pp'):\n # penny pincher\n self.scale = 3.55/aSize\n self.handColor = VBase4( 1.0, 0.5, 0.6, 1.0) \n self.generateBody()\n self.generateHead(\"pennypincher\")\n self.setHeight(5.26)\n elif (dna.name == 'tw'):\n # tightwad\n self.scale = 4.5/cSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.generateHead(\"tightwad\")\n self.setHeight(5.41)\n elif (dna.name == 'bc'):\n # bean counter\n self.scale = 4.4/bSize\n self.handColor = SuitDNA.moneyPolyColor\n self.generateBody()\n self.generateHead(\"beancounter\")\n self.setHeight(5.95)\n elif (dna.name == 'nc'):\n # number cruncher\n self.scale = 5.25/aSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.generateHead(\"numbercruncher\")\n self.setHeight(7.22)\n elif (dna.name == 'mb'):\n # money bags\n self.scale = 5.3/cSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.generateHead(\"moneybags\")\n self.setHeight(6.97)\n elif (dna.name == 'ls'):\n # load shark\n self.scale = 6.5/bSize\n self.handColor = VBase4(0.5, 0.85, 0.75, 1.0) \n self.generateBody()\n self.generateHead(\"loanshark\")\n self.setHeight(8.58)\n elif (dna.name == 'rb'):\n # robber baron\n self.scale = 7.0/aSize\n self.handColor = SuitDNA.moneyPolyColor \n self.generateBody()\n self.headTexture = \"robber-baron.jpg\"\n self.generateHead(\"yesman\")\n self.setHeight(8.95)\n\n # sales dept\n elif (dna.name == 'cc'):\n # cold caller\n self.scale = 3.5/cSize\n self.handColor = VBase4(0.55, 0.65, 1.0, 1.0)\n self.headColor = VBase4(0.25, 0.35, 1.0, 1.0)\n self.generateBody() \n self.generateHead(\"coldcaller\")\n self.setHeight(4.63)\n elif (dna.name == 'tm'):\n # telemarketer\n self.scale = 3.75/bSize\n self.handColor = SuitDNA.salesPolyColor\n self.generateBody()\n self.generateHead(\"telemarketer\")\n self.setHeight(5.24)\n elif (dna.name == 'nd'):\n # name dropper\n self.scale = 4.35/aSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.headTexture = \"name-dropper.jpg\"\n self.generateHead(\"numbercruncher\")\n self.setHeight(5.98)\n elif (dna.name == 'gh'):\n # glad hander\n self.scale = 4.75/cSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.generateHead(\"gladhander\")\n self.setHeight(6.40)\n elif (dna.name == 'ms'):\n # mover & shaker\n self.scale = 4.75/bSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.generateHead(\"movershaker\")\n self.setHeight(6.70)\n elif (dna.name == 'tf'):\n # two-face\n self.scale = 5.25/aSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.generateHead(\"twoface\")\n self.setHeight(6.95)\n elif (dna.name == 'm'):\n # the mingler\n self.scale = 5.75/aSize\n self.handColor = SuitDNA.salesPolyColor \n self.generateBody()\n self.headTexture = \"mingler.jpg\" \n self.generateHead(\"twoface\")\n self.setHeight(7.61)\n elif (dna.name == 'mh'):\n # Mr. Hollywood\n self.scale = 7.0/aSize\n self.handColor = SuitDNA.salesPolyColor\n self.generateBody()\n self.generateHead(\"yesman\")\n self.setHeight(8.95)\n \n self.setName(SuitBattleGlobals.SuitAttributes[dna.name]['name'])\n self.getGeomNode().setScale(self.scale)\n self.generateHealthBar()\n self.generateCorporateMedallion()", "def make_weights_for_balanced_classes(self):\n\n count = [0] * self.get_num_classes()\n\n # label = self.class_map_dict[self.meta_data.loc[image_id]['dx']]\n # labels = [self.class_map_dict[l] for l in self.get_labels()]\n\n labels = self.get_labels()\n\n # Count how many instances there are for each class\n for l in labels:\n count[l] += 1\n\n weight_per_class = [0.] * self.get_num_classes()\n\n N = float(sum(count))\n\n # Assign a weight which is inversely proportional to class frequency\n for i in range(self.get_num_classes()):\n weight_per_class[i] = N/float(count[i])\n\n # Save results for debugging purposes\n self._weight_per_class = weight_per_class\n\n # Now assign a weight to each data point\n weight = [0] * len(labels)\n\n for idx, val in enumerate(labels):\n weight[idx] = weight_per_class[val]\n\n return weight", "def constructCell():\n\t\tself.weightGenerate()", "def coating_weight(self, thickness, weight=2400):\n return self.__length * self.__width * thickness * weight / 100", "def weight(self) -> int:\n weight = 0\n if self.models:\n weight += 401 - (1 if callable(self.models) else len(self.models))\n\n if self.manufacturers:\n weight += 301 - (\n 1 if callable(self.manufacturers) else len(self.manufacturers)\n )\n\n weight += 10 * len(self.channel_names)\n weight += 5 * len(self.generic_ids)\n if isinstance(self.aux_channels, frozenset):\n weight += 1 * len(self.aux_channels)\n return weight", "def nb_leafy_rameau(x):\r\n return sum([nb_leafy_rameau_cat(x, cat) for cat in ['small', 'medium', 'large']])", "def getWeight(self) -> float:\n ...", "def analyze_belief_strength_with_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n if apbs*og_bs > 0:\r\n if apbs > 0:\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100)\r\n else:\r\n nbs = max(og_bs + (0.1*prob*unc*apbs), -100)\r\n nbs = int(nbs)\r\n else:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def get_weight(self):\n pass", "def get_weight(self):\n pass", "def calc_weight(base):\n return weights[base] + sum([calc_weight(i) for i in leafs[base]])", "def weights(self):\r\n\t\treturn None", "def calc_weight(sequence):\r\n return len(sequence) * AVG_WEIGHT", "def __init__(self, weight: float = 1.0):\n\n super().__init__()\n self.weight = weight", "def test_breeding_parent_weight(self):\n Herbivore.set_parameters({\"gamma\": 1})\n start_weight = 50\n self.herb.weight = start_weight\n self.herb.breeding(10)\n nt.assert_less(self.herb.weight, start_weight)", "def weighted_setup(self):\r\n\r\n grading_policy = {\r\n \"GRADER\": [{\r\n \"type\": \"Homework\",\r\n \"min_count\": 1,\r\n \"drop_count\": 0,\r\n \"short_label\": \"HW\",\r\n \"weight\": 0.25\r\n }, {\r\n \"type\": \"Final\",\r\n \"name\": \"Final Section\",\r\n \"short_label\": \"Final\",\r\n \"weight\": 0.75\r\n }]\r\n }\r\n self.add_grading_policy(grading_policy)\r\n\r\n # set up a structure of 1 homework and 1 final\r\n self.homework = self.add_graded_section_to_course('homework')\r\n self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')\r\n self.final = self.add_graded_section_to_course('Final Section', 'Final')\r\n self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')", "def at_object_creation(self):\n\n # in GRAMS mofo, this isn't the 17th century you know!\n self.db.weight = 0\n # In calories, main currency will be calories and bullets\n self.db.cost = 0\n # Not completely decided on implementing this - potenially a L W H or a \"tiny, small \" etc\n # or perhaps volume. Was thinking to stop people putting a chair in a fannypack\n # 10 tiny in one small - Tiny 1\n # 10 Small in one medium - Small - 10\n # 10 Medium in one large - Medium - 100\n # 10 Large - VeryLarge - Large - 1000\n # 10 VeryLarge - Room - 10000 VeryLarge - 10000\n # Room - 100000 Room - 100000\n # Fanny Pack - one Medium, 100 bullets,\n # Regular Back Page - 5 Medium\n # Hiking Pack - 1=2 large\n # Car - vary large, truck 3-4 Very Large to 2-3 rooms\n #\n # To carry something you have be able to carry the WEIGHT and fit in a container\n #\n # we'll assume you can carry 300 by default - Worn items removed from this\n # We'll also assume users can carry ONE item to their maximum weight without a container.. i.e. body\n # Or perhaps we add a \"Carry\" command, anything can be carried in your hands, just one of them\n # Other commands \"Drag\" - might be a Weight * 3 amount\n\n self.db.size = 0\n # some items will have a minimum size object they can fit in\n # self.db.min_size_fit = 0 # Removed - we were going to enforce this - to hard/complicated/saddens game\n\n # many items will have a noise modifier which will effect sneaking\n # We may also have it for weapon attacks.. i.e. firing bow vs gun\n # will it attract more zombies to you than another person\n # Increase chance of random encounters\n self.db.noise_modifier = 0\n\n # Can this be broken to create other parts\n # IDEA: Move this to a \"material_source\" object.. an object that we can create and when broken\n # generates further objects for collection\n\n self.db.breakable = 0\n # Hidden indicates this object is hidden in the room\n self.db.hidden_in_room = 0\n\n self.locks.add(\"equip:false()\")", "def work(char, base, scale_stat, factor):\n added = int(math.floor(char.__dict__[scale_stat] / factor))\n earned = base + added\n return [(\"gold\", earned)]", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def _random_weight(self):\n return random.uniform(MIN_WEIGHT, MAX_WEIGHT)", "def dp_make_weight(egg_weights, target_weight, memo={}):\n\n \"\"\"\n 根据提示: 每个pound类型的蛋是无限的。\n 问题是提供一种蛋的组合,最好pound数等于或是接近总的weight 并且要满足数量要越少越好。\n 这是两个限制条件。但是提示也给了总是有egg为value1的,那么难度小了很多。\n 现在是怎样让蛋的数量越少越好。\n \n 1.最优子结构\n egg_weights 现在假设是(1, 5, 10, 25)\n dp_make_weight((1, 5, 10, 25),x,memo) , 当x - n >= 0 时(n代表 1,5,10,25),\n 然后在 dp_make_weight((1,5,10,25,x-n,memo) +1 中 挑选最小值。+1的原因是包含本次\n 2.重叠子问题\n 详见ps1b的图片。\n 那么memo记录的key 为 avail(即剩余的容量) ,value 为avail下最小的蛋的数量n。\n \n 那么base_case是什么?\n target == 0时,返回0\n 现在按照深度优先的思路思考\n \"\"\"\n\n if target_weight == 0:\n return 0\n\n if target_weight in memo:\n return memo[target_weight]\n\n result = None # 占位符,没有多大用\n\n for elt in egg_weights:\n if target_weight - elt >= 0: # 这样才有继续探索的必要\n tmp_result = dp_make_weight(egg_weights, target_weight - elt, memo) + 1\n if result is None or tmp_result < result:\n result = tmp_result\n memo[target_weight] = result\n return result", "def calculate_bmi(height, weight):\n m_weight = 703 * weight\n m_height = height**2\n bmi = m_weight / m_height\n return bmi", "def test_attributes_weigthed(self):\n fields = Mineral.attributes_weighted()\n self.assertListEqual(fields[:-2], [\n 'group',\n 'formula',\n 'category',\n 'strunz_classification',\n 'crystal_system',\n 'mohs_scale_hardness',\n 'luster',\n 'color',\n 'specific_gravity',\n 'cleavage',\n 'diaphaneity',\n 'crystal_habit',\n 'streak',\n 'optical_properties',\n 'refractive_index', ])\n self.assertSetEqual(set(fields[-2:]), {\n 'unit_cell',\n 'crystal_symmetry',\n })", "def get_weight(self):\n # FIXME: BELUM ADA KEPUTUSAN\n return 0", "def gen_class_weights(df):\n class_counts_df = df.select(\"tumor_score\").groupBy(\"tumor_score\").count()\n class_counts = {row[\"tumor_score\"]:row[\"count\"] for row in class_counts_df.collect()}\n max_count = max(class_counts.values())\n class_weights = {k-1:max_count/v for k,v in class_counts.items()}\n return class_weights", "def findSimpleBackgroundModel(sequences):\n\tbackground = {\"A\": 0, \"C\": 0, \"G\": 0, \"T\":0}\n\tfor s in sequences:\n\t\tfor nt in background:\n\t\t\tbackground[nt] = background[nt] + s.sequence.count(nt)\n\n\t# normalize\n\ttotCounts = float(sum(background.values()))\n\tfor nt in background:\n\t\tbackground[nt] = background[nt]/totCounts\n\treturn background", "def calculate_basal_metabolic_rate(self, formula=1):\n factor = 5 if self.gender == self.GENDER_MALE else -161\n weight = self.weight if self.use_metric else AbstractWeight(self.weight, 'lb').kg\n\n try:\n rate = ((10 * weight) # in kg\n + (decimal.Decimal(6.25) * self.height) # in cm\n - (5 * self.age) # in years\n + factor)\n # Any of the entries is missing\n except TypeError:\n rate = 0\n\n return decimal.Decimal(str(rate)).quantize(TWOPLACES)", "def init_weight(w):\n shape = w.shape\n if len(shape) == 4:\n i, o, u, v = shape\n k = np.sqrt(6 / (i * u * v + o * u * v))\n w.data.uniform_(-k, k)\n elif len(shape) == 2:\n k = np.sqrt(6 / sum(shape))\n w.data.uniform_(-k, k)\n elif len(shape) == 1:\n w.data.zero_()", "def weight(self):\n return self.stabilizer_h.weight + self.stabilizer_vright.weight + self.stabilizer_vleft.weight", "def weight(self):\n return self._base.weight", "def _calculate_tag_weight(weight, max_weight, distribution):\r\n if distribution == LINEAR or max_weight == 1:\r\n return weight\r\n elif distribution == LOGARITHMIC:\r\n return math.log(weight) * max_weight / math.log(max_weight)\r\n raise ValueError(_('Invalid distribution algorithm specified: %s.') % distribution)", "def calc_carbon_tree(height, diameter=25, age = 10):\n \"\"\"Height in meter, diameter in cm, age in years\"\"\"\n \"\"\"This includes habits: Tree, Bamboo\"\"\"\n \n #convert to imperial\n height = height/3.281 #feet\n diameter = diameter/2.54 #inches\n \n #calculate green weight of tree: (above-ground weight) * 1.2\n if diameter < 11:\n green_weight = (0.25 * diameter**2 * height) * 1.2\n else:\n green_weight = (0.15 * diameter**2 * height) * 1.2\n \n #dry weight: average tree is 72.5 dry matter \n dry_weight = 0.725 * green_weight\n \n #weight of carbon: 50% of tree dry weight\n c_weight = 0.5 * dry_weight\n \n #weight of CO2 sequestered\n co2_weight = 3.67 * c_weight\n \n return co2_weight/2.205/age #convert from lbs to kg and divide by age", "def get_importance(self, key, value, depth):\n multiplier = 0.8 ** depth if depth > 1 else 1.0\n base = 0.0\n if key in ['condition', 'symptom', 'disease', 'treatment']:\n base += 5\n elif key in ['gender', 'age'] or 'location' in key:\n base += 4\n elif 'condition' in key or 'symptom' in key or 'disease' in key or 'treatment' in key:\n base += 3\n else:\n base += 2\n return multiplier * base", "def test_ontology_term_graph_based_information_content_as_weights(ontology):\n\tassert ontology.ic(\"TO:0000001\", as_weight=True) == 0.000\n\tassert ontology.ic(\"TO:0000002\", as_weight=True) == (((0.3690702464285426 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000003\", as_weight=True) == (((0.3690702464285426 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000004\", as_weight=True) == (((1.000 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000005\", as_weight=True) == 1.000\n\tassert ontology.ic(\"TO:0000006\", as_weight=True) == 1.000\n\tassert ontology.ic(\"TO:0000007\", as_weight=True) == (((0.5 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000008\", as_weight=True) == (((1.3690702464285427 - 0.000) * 1.000) / 3.000) + 0.000\n\tassert ontology.ic(\"TO:0000009\", as_weight=True) == 1.000", "def calculateRichness(self):\n inf = logger.info\n noc = self.occurrences.count()\n inf('Number of occurrences in this biome: %s' %noc)\n nsp = self.species.count()\n ngn = self.genera.count()\n inf('Number of species in this biome: %s' %nsp)\n inf('Number of genera in this biome: %s' %ngn) \n nfa = self.families.count()\n inf('Number of families in this biome: %s' %nfa)\n nor = self.orders.count()\n inf('Number of orders in this biome: %s' %nor) \n ncl = self.classes.count()\n inf('Number of classes in this biome: %s' %ncl)\n nph = self.phyla.count()\n inf('Number of orders in this biome: %s' %nph)\n nki = self.kingdoms.count()\n inf('Number of kingdoms in this biome %s' %nki)\n self.richness ={ 'occurrences' : noc,\n 'species' : nsp,\n 'genera' : ngn,\n 'families': nfa,\n 'classes' : ncl,\n 'orders' : nor,\n 'phyla' : nph,\n 'kingdoms' : nki\n }\n \n return self.richness", "def get_weight_category(self) -> WeightCategory:\n return WeightCategory.light if self.weight < 100 else WeightCategory.heavy", "def weight_setup(self, weighting):\n if weighting == \"overlap\":\n self.weights = overlap_generator(overlap, self.graph)\n elif weighting == \"unit\":\n self.weights = overlap_generator(unit, self.graph)\n elif weighting == \"min_norm\":\n self.weights = overlap_generator(min_norm, self.graph)\n else:\n self.weights = overlap_generator(normalized_overlap, self.graph)", "def weightThick(var,lats,types):\n \n if types == 'lens':\n sityr = np.empty((var.shape[0],var.shape[1],var.shape[2]))\n for ens in xrange(var.shape[0]):\n for i in xrange(var.shape[1]):\n for j in xrange(var.shape[2]):\n varq = var[ens,i,j,:,:]\n mask = np.isfinite(varq) & np.isfinite(lats)\n varmask = varq[mask]\n areamask = np.cos(np.deg2rad(lats[mask]))\n sityr[ens,i,j] = np.nansum(varmask*areamask)/np.sum(areamask)\n \n print 'Completed: Weighting per ensemble #%s!' % ense[ens]\n \n elif types == 'piomas':\n sityr = np.empty((var.shape[0],var.shape[1]))\n for i in xrange(var.shape[0]):\n for j in xrange(var.shape[1]):\n varq = var[i,j,:,:]\n mask = np.isfinite(varq) & np.isfinite(lats)\n varmask = varq[mask]\n areamask = np.cos(np.deg2rad(lats[mask]))\n sityr[i,j] = np.nansum(varmask*areamask)/np.sum(areamask)\n \n print '\\nCompleted: Yearly weighted SIT average!' \n return sityr", "def generate_weights(sizes):\n weights = {}\n weights[\"w\"] = []\n weights[\"b\"] = []\n for i in range(len(sizes)-2):\n weights[\"w\"].append(np.random.randn(sizes[i], sizes[i+1]))\n weights[\"b\"].append(np.random.randn(sizes[i+1]))\n weights[\"w_final\"] = np.random.randn(sizes[-2], sizes[-1])/np.sqrt(sizes[-1])\n weights[\"b_final\"] = np.random.randn(sizes[-1])\n return weights", "def weights_initializer(self):\n self.weights = [np.random.normal(0, 1 / np.sqrt(x), (x, y)) for x, y in list(zip(self.structure[1:], self.structure[:-1]))]", "def determine_category(weight):\n if weight < 52:\n return Category.FLY\n elif 52 <= weight < 57:\n return Category.FEATHER\n elif 57 <= weight < 63:\n return Category.LIGHT\n elif 63 <= weight < 69:\n return Category.WELTER\n elif 69 <= weight < 75:\n return Category.MEDIUM\n elif 75 <= weight < 81:\n return Category.MEDIUM_HEAVY\n elif 81 <= weight < 91:\n return Category.HEAVY\n elif weight >= 91:\n return Category.SUPER_HEAVY\n else:\n return TypeError", "def backgroundAndSkills(self):\n backgrounds = {}\n def createBackgrounds(fileName):\n \"\"\" (str) -> None\n Opens a file with background information and populates the backgrounds dictionary with\n that information\n \"\"\"\n backgroundFile = open(fileName,'r')\n current_bg = ''\n for line in backgroundFile:\n #If there is no text, go to next line\n if line == \"\\n\":\n pass\n #Else if the line starts with \"~~\", create new key in top level\n #dictionary with the remainder of that line and set its value\n #to an empty dictionary\n elif line[:2] == \"~~\":\n current_bg = line[2:-1]\n backgrounds[line[2:-1]] = {}\n #Go through the next few lines and set them to keys and values\n #in the nestled dictionary\n elif \":\" in line:\n line_heading = line[:line.index(\":\")]\n after_heading = line[line.index(\":\")+2:-1]\n #create a key/value pair for the background regarding its profession\n if line_heading == \"hasProfession\":\n \n #Change the string to a bool\n if after_heading == \"True\":\n backgrounds[current_bg][line_heading] = True\n else:\n backgrounds[current_bg][line_heading] = False\n #Create professions list if current BG has professions\n if line_heading == \"professions\" and backgrounds[current_bg]['hasProfession']:\n backgrounds[current_bg]['professions'] = after_heading.split(', ')\n #Create a two item list to store the trait name and its description\n if line_heading == \"trait\":\n backgrounds[current_bg]['trait'] = [line[line.index(\":\")+2: line.index(\"-\")-1],\\\n line[line.index(\"-\")+2:-1]]\n #Create an entry for the story of a character's background\n if line_heading == \"story\":\n backgrounds[current_bg]['story'] = after_heading\n #Create a list for the recommended skills\n if line_heading == \"recommended\":\n backgrounds[current_bg]['recommended'] = after_heading.split(', ')\n backgroundFile.close()\n\n #Creat background now\n createBackgrounds('Backgrounds.txt')\n\n \n \n \n #Make a list of backgrounds\n background_list = []\n for i in backgrounds:\n background_list.append(i)\n background_list.sort()\n #Ask user to choose a background and set that to self.background\n background_choice = raw_input('Enter a background from this list: '+str(background_list)+': ').title()\n print\n while background_choice not in background_list:\n background_choice = raw_input('Enter a background from this list: '+str(background_list)+': ').title()\n print\n self.background = background_choice\n self.backgroundStory = backgrounds[self.background]['story']\n #Add the background's trait to self.traits\n self.traits[backgrounds[self.background]['trait'][0]] = backgrounds[self.background]['trait'][1]\n #If the background has a profession, add that now\n if backgrounds[self.background]['hasProfession'] == True:\n #Create a temp list to account for Commoner's multiple profession listing\n profession_holder = []\n for i in backgrounds[self.background]['professions']:\n if i in profession_holder:\n pass\n else:\n profession_holder.append(i)\n #Ask user which profession they'd like or if they press Enter one is chosen randomly\n temp_choice = raw_input(\"Which profession would you like? \"+str(profession_holder)+\"\\n\"\\\n \"Enter one from the list above or press Enter for random. \").title()\n print\n while temp_choice != '' and temp_choice not in backgrounds[self.background]['professions']:\n temp_choice = raw_input(\"Which profession would you like? \"+str(profession_holder)+\"\\n\"\\\n \"Enter one from the list above or press Enter for random. \").title()\n print\n if temp_choice == '':\n temp_int = r.randint(0,len(backgrounds[self.background]['professions'])-1)\n self.backgroundProfession = backgrounds[self.background]['professions'][temp_int]\n else:\n self.backgroundProfession = temp_choice\n else:\n pass\n\n #Ask about skills.\n skill_choice = []\n print \"You'll now choose 4 skills from this list:\"\n print\n for i in skills:\n print i.title()\n print\n print \"Recommended skills for your \"+self.background+\" are: \"+str(backgrounds[self.background]['recommended'])\n for i in range(4):\n skill_choice.append(raw_input(\"Which Skill would you like for skill \"+str(i+1)+\"? \").title())\n print\n self.skills = skill_choice", "def WeightInitializer():\n return np.random.uniform(-1, 1)", "def Tinker05(self,dc,nu):\n if len(self.bias_par.keys()) == 0:\n a = 0.707\n b = 0.35\n c = 0.8\n else:\n a = self.bias_par['a']\n b = self.bias_par['b']\n c = self.bias_par['c']\n sa = a**0.5\n return 1.+(sa*(a*nu**2) + sa*b*(a*nu**2)**(1.-c) - (a*nu**2)**c/((a*nu**2)**c + \\\n b*(1.-c)*(1.-c/2.)))/(dc*sa)", "def punch(self):\n if self.weight < 5:\n return \"That tickles.\"\n elif self.weight < 15:\n return \"Hey that hurt!\"\n else:\n return \"OUCH!\"", "def bagging(x, y, max_depth, num_trees):\r\n\r\n hypotheses = {}\r\n # attribute_idx = np.array(range(data.dim))\r\n # generating attributes\r\n attributes = []\r\n cols = data.dim\r\n for i in range(cols):\r\n arr = np.unique(x[:, i])\r\n for value in arr:\r\n attributes.append((i, value))\r\n lena = len(x)\r\n # initializing weights to 1 for boosting\r\n alpha = 1\r\n w = np.ones((lena, 1), dtype=int) \r\n # iterating over j number of trees\r\n for j in range(num_trees):\r\n # generating a random array of indicies with replacement over the length of x\r\n new_array = np.random.choice(lena,size =lena,replace=True)\r\n #calling id3 over the indices of the new array\r\n tree = id3(x[new_array],y[new_array],attributes, max_depth, w)\r\n # appending to a global tree as a weighted pair\r\n hypotheses[j] = (alpha, tree)\r\n\r\n return hypotheses", "def training_pattern_setup(self, **overrides):\n or_dim = 'or' in self.dims\n gaussian = (self.dataset == 'Gaussian')\n pattern_parameters = {'size':(self.input_width if or_dim and gaussian\n else 3 * 0.1 if gaussian else 10.0),\n 'aspect_ratio': self.input_aspect if or_dim else 1.0,\n 'scale': self.contrast / 100.0}\n if self.dependent_gaussians:\n overrides['pattern_per_label'] = int(self.area*1.5)\n overrides['pattern_type'] = DependentGaussians\n pattern_parameters.update(dict(k=self.gaussian_k,\n circular=self.gaussian_circular))\n return super(EarlyVisionSCAL, self).training_pattern_setup(\n pattern_parameters=pattern_parameters,\n position_bound_x=self.area/2.0+self.v1aff_radius,\n position_bound_y=self.area/2.0+self.v1aff_radius, **overrides)", "def atom_weight(self, manager):\n identity = \"HOH\" if self.resname in WATER_RES_NAMES else self.identity()\n # Waters that don't have B-factors at least 1 stddev below the mean are\n # presumed to be correct\n if (identity == \"HOH\" and\n (self.atom.b > manager.b_mean_hoh - manager.b_stddev_hoh)):\n return 0\n if self.is_correctly_identified(identity = identity):\n return 0\n # B-factors/occupancies?\n if self.FOFC_PEAK in self.inaccuracies[identity] or self.atom.b < 1:\n return 1\n if self.FOFC_HOLE in self.inaccuracies[identity]:\n return -1\n return 0", "def bmi(self):\n if self.height == 0:\n raise ValueError(\"Height can't be zero\")\n return self.weight / (self.height * self.height)", "def mon_maker():\r\n random_mon = {'eyeratio':0.2, 'eyeL':30,\r\n 'mouthratio':0.8, 'mouthL':30,\r\n 'headL':40, 'headA':15,\r\n 'cheekL':25, 'cheekA':45,\r\n 'chinL': 30, 'chinA':90\r\n }\r\n return random_mon", "def variations():", "def test_weight_hh(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n gene1, gene2 = get_gru_node_gene(0, cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n for value in gene3.weight_hh:\n for v in value:\n if v == 0:\n p1 = True\n elif v == 1:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(10):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(np.linalg.norm(gene3.weight_hh - gene1.weight_hh), 0)\n self.assertNotEqual(np.linalg.norm(gene3.weight_hh - gene2.weight_hh), 0)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(10):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertNotEqual(np.linalg.norm(gene3.weight_hh - gene1.weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene3.weight_hh - gene2.weight_hh), 0)", "def test_weight(self):\n # create a coconut of each type\n self.nuts = [Coconut(variety) for variety in ['middle eastern',\n 'south asian',\n 'american']]\n \n # check that weights are as expected\n self.weights = [2.5, 3.0, 3.5]\n for i in range(0,3):\n self.assertEqual(self.nuts[i]._Coconut__weight,\n self.weights[i],\n \"The weight is wrong\")", "def copyWeights(self, shp, stray = 0, colour = (240,100,100)):\n self.weights = []\n self.bias = []\n if(stray == 0): # straight copy\n for i, wt in enumerate(shp.weights):\n self.weights.append(wt.copy())\n for i,bs in enumerate(shp.bias):\n self.bias.append(bs.copy())\n else: # Copy with some random added in\n for i, wt in enumerate(shp.weights):\n self.weights.append(np.add(wt.copy(), np.random.normal(0,stray,(shp.dimensions[i],shp.dimensions[i+1]))))\n for i,bs in enumerate(shp.bias):\n self.bias.append(np.add(bs.copy(), np.random.normal(0,stray,shp.dimensions[i+1])))\n self.normalizeWeights()\n self.colour = colour\n self.parentname = shp.name\n self.parentcolour = shp.colour\n self.setDimension(shp.inputdistance,shp.inputangle,shp.intermediates,shp.orders)", "def test_weighted_planetary_bodies_is_weighted_correctly(self):\n major_count = 0\n for planet in words.MAJOR_BODIES:\n major_count += words.WEIGHTED_PLANETARY_BODIES.count(planet)\n\n minor_count = 0\n for body in words.MINOR_BODIES:\n minor_count += words.WEIGHTED_PLANETARY_BODIES.count(body)\n\n weighted_planets = len(words.WEIGHTED_PLANETARY_BODIES)\n major_ratio = major_count / weighted_planets\n minor_ratio = minor_count / weighted_planets\n\n assert len(words.MAJOR_BODIES) < len(words.MINOR_BODIES)\n assert major_count + minor_count == weighted_planets\n assert 0 < minor_ratio < 0.50\n assert 0.49 < major_ratio < 0.51", "def __init__(self, owner, mixtureNumber):\n self.mixtureNumber = mixtureNumber\n self.owner = owner\n self.weight = 0.0\n self.pdf = None", "def build_discriminator():\n leakyrelu_alpha = 0.2\n momentum = 0.8\n input_shape = (256, 256, 3)\n\n input_layer = Input(shape=input_shape)\n\n # Add the first convolution block\n dis1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(input_layer)\n dis1 = LeakyReLU(alpha=leakyrelu_alpha)(dis1)\n\n # Add the 2nd convolution block\n dis2 = Conv2D(filters=64, kernel_size=3, strides=2, padding='same')(dis1)\n dis2 = LeakyReLU(alpha=leakyrelu_alpha)(dis2)\n dis2 = BatchNormalization(momentum=momentum)(dis2)\n\n # Add the third convolution block\n dis3 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same')(dis2)\n dis3 = LeakyReLU(alpha=leakyrelu_alpha)(dis3)\n dis3 = BatchNormalization(momentum=momentum)(dis3)\n\n # Add the fourth convolution block\n dis4 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same')(dis3)\n dis4 = LeakyReLU(alpha=leakyrelu_alpha)(dis4)\n dis4 = BatchNormalization(momentum=0.8)(dis4)\n\n # Add the fifth convolution block\n dis5 = Conv2D(256, kernel_size=3, strides=1, padding='same')(dis4)\n dis5 = LeakyReLU(alpha=leakyrelu_alpha)(dis5)\n dis5 = BatchNormalization(momentum=momentum)(dis5)\n\n # Add the sixth convolution block\n dis6 = Conv2D(filters=256, kernel_size=3, strides=2, padding='same')(dis5)\n dis6 = LeakyReLU(alpha=leakyrelu_alpha)(dis6)\n dis6 = BatchNormalization(momentum=momentum)(dis6)\n\n # Add the seventh convolution block\n dis7 = Conv2D(filters=512, kernel_size=3, strides=1, padding='same')(dis6)\n dis7 = LeakyReLU(alpha=leakyrelu_alpha)(dis7)\n dis7 = BatchNormalization(momentum=momentum)(dis7)\n\n # Add the eight convolution block\n dis8 = Conv2D(filters=512, kernel_size=3, strides=2, padding='same')(dis7)\n dis8 = LeakyReLU(alpha=leakyrelu_alpha)(dis8)\n dis8 = BatchNormalization(momentum=momentum)(dis8)\n\n # Add a dense layer\n #avgd = keras.layers.AveragePooling2D(pool_size=(4,4) , strides = (4,4))(dis8)\n\n #flat = keras.layers.Flatten()(dis8)\n dis9 = Dense(units=1024)(dis8)\n dis9 = LeakyReLU(alpha=0.2)(dis9)\n\n # Last dense layer - for classification\n output = Dense(units=1, activation='sigmoid')(dis9)\n\n model = Model(inputs=[input_layer], outputs=[output], name='discriminator')\n return model", "def dict_species_sums(mech):\n if mech == \"racm_esrl_vcp\":\n sum_dict = {}\n # Arrays for different gasses and pm groupings\n sum_dict.update(\n {\n \"noy_gas\": [\n \"hno3\",\n \"no\",\n \"no2\",\n \"no3\",\n \"pan\",\n \"tpan\",\n \"hono\",\n \"hno4\",\n \"onit\",\n \"n2o5\",\n \"ison\",\n \"nald\",\n \"mpan\",\n ]\n }\n )\n sum_dict.update({\"noy_gas_weight\": [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]})\n sum_dict.update(\n {\"noy_aer\": [\"no3ai\", \"no3aj\"]}\n ) # Need to confirm here if there is a size cutoff for noy obs?\n sum_dict.update({\"nox\": [\"no\", \"no2\"]})\n sum_dict.update({\"pm25_cl\": [\"clai\", \"claj\"]})\n sum_dict.update({\"pm25_cl_weight\": [1, 1]})\n sum_dict.update({\"pm25_ec\": [\"eci\", \"ecj\"]})\n sum_dict.update({\"pm25_ec_weight\": [1, 1]})\n sum_dict.update({\"pm25_na\": [\"naai\", \"naaj\"]})\n sum_dict.update({\"pm25_na_weight\": [1, 1]})\n sum_dict.update({\"pm25_nh4\": [\"nh4ai\", \"nh4aj\"]})\n sum_dict.update({\"pm25_nh4_weight\": [1, 1]})\n sum_dict.update({\"pm25_no3\": [\"no3ai\", \"no3aj\"]})\n sum_dict.update({\"pm25_no3_weight\": [1, 1]})\n sum_dict.update({\"pm25_so4\": [\"so4ai\", \"so4aj\"]})\n sum_dict.update({\"pm25_so4_weight\": [1, 1]})\n sum_dict.update(\n {\n \"pm25_om\": [\n \"asoa1i\",\n \"asoa1j\",\n \"asoa2i\",\n \"asoa2j\",\n \"asoa3i\",\n \"asoa3j\",\n \"asoa4i\",\n \"asoa4j\",\n \"bsoa1i\",\n \"bsoa1j\",\n \"bsoa2i\",\n \"bsoa2j\",\n \"bsoa3i\",\n \"bsoa3j\",\n \"bsoa4i\",\n \"bsoa4j\",\n \"orgpai\",\n \"orgpaj\",\n ]\n }\n )\n elif mech == \"redhc\":\n sum_dict = {}\n # Arrays for different gasses and pm groupings\n sum_dict.update({\"noy_gas\": [\"hno3\", \"no\", \"no2\", \"no3\", \"pan\", \"ho2no2\", \"onit\", \"n2o5\"]})\n sum_dict.update({\"noy_gas_weight\": [1, 1, 1, 1, 1, 1, 1, 2]})\n sum_dict.update(\n {\"noy_aer\": [\"no3ai\", \"no3aj\"]}\n ) # Need to confirm here if there is a size cutoff for noy obs?\n sum_dict.update({\"nox\": [\"no\", \"no2\"]})\n sum_dict.update({\"pm25_cl\": [\"clai\", \"claj\"]})\n sum_dict.update({\"pm25_cl_weight\": [1, 1]})\n sum_dict.update({\"pm25_ec\": [\"eci\", \"ecj\"]})\n sum_dict.update({\"pm25_ec_weight\": [1, 1]})\n sum_dict.update({\"pm25_na\": [\"naai\", \"naaj\"]})\n sum_dict.update({\"pm25_na_weight\": [1, 1]})\n sum_dict.update({\"pm25_nh4\": [\"nh4ai\", \"nh4aj\"]})\n sum_dict.update({\"pm25_nh4_weight\": [1, 1]})\n sum_dict.update({\"pm25_no3\": [\"no3ai\", \"no3aj\"]})\n sum_dict.update({\"pm25_no3_weight\": [1, 1]})\n sum_dict.update({\"pm25_so4\": [\"so4ai\", \"so4aj\"]})\n sum_dict.update({\"pm25_so4_weight\": [1, 1]})\n sum_dict.update(\n {\n \"pm25_om\": [\n \"asoa0j\",\n \"asoa0i\",\n \"asoa1i\",\n \"asoa1j\",\n \"asoa2i\",\n \"asoa2j\",\n \"asoa3i\",\n \"asoa3j\",\n \"bsoa1i\",\n \"bsoa1j\",\n \"bsoa2i\",\n \"bsoa2j\",\n \"bsoa3i\",\n \"bsoa3j\",\n \"poa0j\",\n \"poa0i\",\n \"poa1j\",\n \"poa1i\",\n \"poa2j\",\n \"poa2i\",\n \"poa3j\",\n \"poa3i\",\n ]\n }\n )\n\n else:\n raise NotImplementedError(\"Mechanism not supported, update _wrfchem_mm.py file in MONETIO\")\n\n return sum_dict", "def getWeightString(self, variables, wordsize, ignoreMSBs = 0):\n command = \"ASSERT((weight = BVPLUS(16,\"\n for var in variables:\n tmp = \"0b00000000@(BVPLUS(8, \"\n for bit in range(wordsize - ignoreMSBs):\n \"\"\"\n Ignore MSBs if they do not contribute to \n probability of the characteristic.\n \"\"\"\n tmp += \"0bin0000000@({0}[{1}:{1}]),\".format(var, bit, bit)\n command += tmp[:-1] + \")),\"\n command = command[:-1]\n command += \")));\"\n \n return command", "def punch(self):\n\n if self.weight < 5:\n return \"That tickles.\"\n elif self.weight >= 5 and self.weight < 15:\n return \"Hey that hurt!\"\n return \"OUCH!\"", "def calc_assn_weights():\n\t\n\t\t\t#\n\t\t\t#\n\ttext(\"\"\"INSERT INTO assignments (mentor_id, course_id, cost)\n\t\t\tSELECT M.mentor_id, C.course_id, SUM(COALESCE(PW.weight_value,PT.def_weight_val))\n\t\t\tFROM mentors M, courses C\n\t\t\tJOIN course2pref C2P ON C2P.course_id = C.course_id\n\t\t\tJOIN prefs P ON P.pref_id = C2P.pref_id\n\t\t\tJOIN pref_types PT ON PT.pref_type_id = P.pref_type_id\n\t\t\tJOIN pref_weights PW ON PW.pref_type_id = P.pref_type_id\n\t\t\tLEFT JOIN choices Ch ON Ch.mentor_id = M.mentor_id AND Ch.weight_id = PW.pref_id", "def dp_make_weight(egg_weights, target_weight, memo = {}):\r\n # construct table. outer loop: egg weights. inner loop: 0-target_weight\r\n # table will be stored in memo. key=egg_weight, value=list, indexed from 0-target_weight\r\n for i, w in enumerate(egg_weights):\r\n # initialize key-value pair for a given egg weight. Value is empty list to be filled in inner loop.\r\n memo[w] = []\r\n for j in range(target_weight + 1):\r\n # if weight is 0, no eggs\r\n if j == 0:\r\n memo[w].append(0)\r\n # if egg_weight is less than weight, minimize number of eggs\r\n elif w <= j:\r\n # to minimize: take the min of (using prior denomination to get same weight, using current denomation to get weight)\r\n # first item=prior egg value, same weight\r\n # second item=\"sub\" current egg value by subtracting it from weight and adding 1 to egg total\r\n \r\n # if first egg weight, no need to look at \"row\" above to minimize\r\n if i == 0:\r\n min_eggs = memo[w][j-w] + 1\r\n else:\r\n min_eggs = min(memo[egg_weights[i-1]][j], memo[w][j-w] + 1)\r\n memo[w].append(min_eggs)\r\n # else if egg_weight is more than weight, take prior denomination min number of eggs at j\r\n else:\r\n memo[w].append(memo[egg_weights[i-1]][j])\r\n\r\n # access bottom right value to get minimum number of coins (largest egg_weight at target_weight)\r\n # uncomment below to only returns min number of eggs\r\n #return memo[egg_weights[-1]][target_weight]\r\n\r\n # determine makeup of min number of egg: \r\n # cur_weight to keep track as we subtract from total weight\r\n cur_weight = target_weight\r\n \r\n # egg_choices: a dict that holds how many of each egg_weight are in the optimal solution\r\n egg_choices = {}\r\n \r\n #print(memo)\r\n \r\n # outer loop goes backwards from highest to smallest egg weight\r\n for i in range(len(egg_weights)-1, -1, -1):\r\n # check if equal to memo[i-1][j] (row above, same column). if not equal, i is in the set.\r\n while egg_weights[i] <= cur_weight:\r\n # also if smallest egg weight, keep subtracting until we get 0\r\n if i == 0 or (memo[egg_weights[i]][cur_weight] != memo[egg_weights[i-1]][cur_weight]):\r\n # if they are not equal, add to the count of i in the egg_choices dict\r\n if egg_weights[i] in egg_choices.keys():\r\n egg_choices[egg_weights[i]] += 1\r\n else:\r\n egg_choices[egg_weights[i]] = 1\r\n # subtract from current weight the egg weight accounted for\r\n cur_weight -= egg_weights[i]\r\n \r\n # break if all weight accounted for\r\n if cur_weight == 0:\r\n break\r\n \r\n # string together the min number of eggs and the composition\r\n out = str(memo[egg_weights[-1]][target_weight]) + ' ('\r\n \r\n # list of formatted value * key pairs\r\n eggs = []\r\n for key, value in egg_choices.items():\r\n eggs.append(str(value) + ' * ' + str(key))\r\n \r\n # join key/value pairs together\r\n out += ' + '.join(eggs)\r\n \r\n # finish off the string\r\n out += ' = ' + str(target_weight) + ')'\r\n return out", "def default_weight_initializer(self):\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]", "def test_weight(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n cfg.genome.weight_min_value = -2\n cfg.genome.weight_max_value = 2\n gene1, gene2 = get_connection_genes((-1, 0), cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n if gene3.weight == gene1.weight:\n p1 = True\n elif gene3.weight == gene2.weight:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(gene3.weight, gene1.weight)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertEqual(gene3.weight, gene2.weight)", "def discriminator_model():\n\n Discriminator = Sequential(name='Discriminator')\n\n # Downsampling : 32x32x3 --> 16x16x64\n Discriminator.add(Conv2D(filters=64, kernel_size=(5, 5), strides=2, padding='same', \n kernel_initializer=RandomNormal(stddev=GAUSS_SD), \n input_shape=DISCRIMINATOR_INPUT))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 16x16x64 --> 8x8x128\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 8x8x128 --> 4x4x256\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 4x4x256 --> 2x2x512\n Discriminator.add(Conv2D(filters=512, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Fully Connected Layer (classifier) , 2x2x512 (2048) --> 1\n Discriminator.add(Flatten())\n Discriminator.add(Dropout(DROPOUT))\n Discriminator.add(Dense(1))\n\n return Discriminator", "def weight_width(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def __init__(self):\n\n # there's always a 1/40 chance of Bob Johnson!\n self.name = weighted_choice(\n [(self.make_name(), 39), (self.bob, 1), (self.beef, 1)])", "def facility_weight(runs, weight, hbv_pars, regions_perturbed):\n \n from numpy import zeros\n \n if runs==1:\n fac_cov_weighted=zeros(len(hbv_pars))\n com_cov_weighted=zeros(len(hbv_pars))\n \n for idx,(bd,facility) in enumerate(zip(hbv_pars[\"birth_dose\"], hbv_pars[\"Facility\"])):\n if bd <= 0.5:\n fac_cov_weighted[idx]=bd+((bd*(weight-1)/(weight+1))*(2*(1-facility)))\n com_cov_weighted[idx]=bd-((bd*(weight-1)/(weight+1))*(2*facility))\n elif bd >0.5:\n fac_cov_weighted[idx]=bd+(((1-bd)*(weight-1)/(weight+1))*(2*(1-facility)))\n com_cov_weighted[idx]=bd-(((1-bd)*(weight-1)/(weight+1))*(2*facility))\n \n elif runs >1:\n fac_cov_weighted=zeros((len(hbv_pars),runs,1))\n com_cov_weighted=zeros((len(hbv_pars),runs,1))\n \n birth_dose=regions_perturbed[:,1,:].reshape((len(hbv_pars),runs,1))\n facility=regions_perturbed[:,4,:].reshape((len(hbv_pars),runs,1))\n \n for idx,i in enumerate(birth_dose[:,:,:]):\n for idy,bd in enumerate(i):\n if bd <= 0.5:\n fac_cov_weighted[idx,idy,0]=bd+((bd*(weight-1)/(weight+1))*(2*(1-facility[idx,idy,0])))\n com_cov_weighted[idx,idy,0]=bd-((bd*(weight-1)/(weight+1))*(2*facility[idx,idy,0]))\n elif bd > 0.5:\n fac_cov_weighted[idx,idy,0]=bd+(((1-bd)*(weight-1)/(weight+1))*(2*(1-facility[idx,idy,0])))\n com_cov_weighted[idx,idy,0]=bd-(((1-bd)*(weight-1)/(weight+1))*(2*facility[idx,idy,0]))\n\n return fac_cov_weighted, com_cov_weighted", "def punch(self):\n if self.weight < 5.0:\n return 'That tickles.'\n elif self.weight >= 5.0 and self.weight < 15.0:\n return 'Hey that hurt!'\n else:\n return 'OUCH!'", "def punch(self):\n # you are not working, futher investagtion needed...\n if self.weight < 5:\n return \"That tickles.\"\n elif self.weight < 15:\n return \"Hey that hurt!\"\n else:\n return \"OUCH!\"", "def _weight_boosting_random_state(name: str):\n return hp.randint(name, 5)", "def mixed_prob( means,stds,weights,validt):", "def low_fitness_animals():\n jungle_cell = topo.Jungle()\n herbivore = animals.Herbivores()\n carnivore = animals.Carnivores()\n carnivore.weight, carnivore.age = 1, 1000\n herbivore.weight, herbivore.age = 1, 1000\n herbivore.parameters[\"omega\"] = 1\n carnivore.parameters[\"omega\"] = 1\n jungle_cell.add_animal(herbivore)\n jungle_cell.add_animal(carnivore)\n return jungle_cell", "def get_sample_weights(self):\n target_to_weight = {}\n for target, count in self.class_count.items():\n target_to_weight[target] = self.total / count\n\n sample_weights = []\n for _, target in self.imgs:\n sample_weights.append(target_to_weight[target])\n\n return sample_weights", "def build_discriminator(shape):\n input_img = Input(shape=(shape)) \n x = Conv2D(64, (3, 3), padding='same')(input_img)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Flatten()(x)\n o = Dense(1,activation='sigmoid')(x)\n Discriminator=Model(input_img,o,name='discriminator')\n return input_img,o,Discriminator", "def qnode(n_wires):\n for i in range(n_wires):\n qml.Hadamard(i)\n return qml.classical_shadow(wires=range(n_wires))", "def _generate_weights(self):\n weights = [random.uniform(0, 1) for x in range(self.num_weights)]\n return self._normalize_weights(weights)" ]
[ "0.5955473", "0.5910066", "0.57713836", "0.57571644", "0.55510604", "0.5496855", "0.54449487", "0.5443018", "0.54126084", "0.5388004", "0.53517497", "0.532497", "0.53154194", "0.53051525", "0.5290943", "0.52838373", "0.52517015", "0.52406114", "0.5222552", "0.52106667", "0.5208522", "0.52048343", "0.5198334", "0.5196093", "0.51888245", "0.51727444", "0.5171455", "0.5164111", "0.5162347", "0.5141661", "0.51362175", "0.5130071", "0.5120534", "0.5120534", "0.51166046", "0.5113108", "0.5112093", "0.50752896", "0.50628984", "0.5056986", "0.5053927", "0.50517714", "0.50402594", "0.503439", "0.50327736", "0.5031598", "0.5019883", "0.5004036", "0.4993994", "0.49908468", "0.49897367", "0.49790502", "0.49774587", "0.49740213", "0.497345", "0.49703556", "0.49698597", "0.49608454", "0.49585035", "0.49471563", "0.49449444", "0.49394405", "0.49358723", "0.49321", "0.49309304", "0.4929456", "0.49246", "0.4924274", "0.49189076", "0.4914449", "0.4912302", "0.49121866", "0.4912152", "0.4908701", "0.49060807", "0.49017835", "0.48983625", "0.48965743", "0.4896308", "0.48958412", "0.48910597", "0.4885203", "0.4884862", "0.488097", "0.48723572", "0.4871643", "0.4870448", "0.48587233", "0.48540375", "0.48507258", "0.48469013", "0.48433205", "0.4842257", "0.48400623", "0.4839606", "0.4837886", "0.48316422", "0.4829527", "0.48268342", "0.48267436", "0.48244223" ]
0.0
-1
return list images of ebook perpage
def get_files(self): return self.ebook_file.get_files()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_images(self, page_number):", "def _getAllPageImages(context, size=(320, 452)):\n pdf = context.get_review_pdf()\n # import pdb; pdb.set_trace()\n if pdf:\n pdf_data = pdf[\"blob\"].open().read()\n if not pdf or not pdf_data:\n return \"%s has no pdf\" % (context.absolute_url()), None\n else:\n # Split the pdf, one file per page\n try:\n split_pdf_pages = RunSubprocess(\"pdftk\", output_params=\"burst output\")\n except SubprocessException, e:\n return e\n split_pdf_pages.create_tmp_input(suffix=\".pdf\", data=pdf_data)\n split_pdf_pages.create_tmp_output_dir()\n split_pdf_pages.output_path = os.path.join(\n split_pdf_pages.tmp_output_dir, \"%04d.pdf\"\n )\n split_pdf_pages.run()\n\n msg = tuple()\n if split_pdf_pages.errors != \"\":\n msg += (\"Message from split_pdf_pages:\" \"\\n%s\\n\" % split_pdf_pages.errors,)\n\n # Convert the pages to .gifs\n # rewritten to have one converter step per page as we have seen process\n # sizes larger than 2GB for 60 pages in a batch\n for filename in glob.glob(split_pdf_pages.tmp_output_dir + \"/*.pdf\"):\n pdf_to_image = RunSubprocess(\n \"convert\",\n input_params=\"-density 250\",\n input_path=filename,\n output_params=\"-resize %sx%s -background white -flatten\"\n % (size[0], size[1]),\n )\n outputname = \".\".join(filename.split(\"/\")[-1].split(\".\")[:-1]) + \".gif\"\n pdf_to_image.output_path = os.path.join(\n split_pdf_pages.tmp_output_dir, outputname\n )\n pdf_to_image.run()\n if pdf_to_image.errors != \"\":\n msg += (\"Message from pdfs_to_images:\" \"\\n%s\\n\" % pdf_to_image.errors,)\n\n pdf_to_image.clean_up()\n\n imgfiles = [\n gif\n for gif in os.listdir(split_pdf_pages.tmp_output_dir)\n if os.path.splitext(gif)[1] == \".gif\"\n ]\n imgfiles.sort()\n\n pages = []\n for img in imgfiles:\n img = open(os.path.join(split_pdf_pages.tmp_output_dir, img), \"r\")\n img_data = img.read()\n pages.append(img_data)\n img.close()\n\n # Remove temporary files\n split_pdf_pages.clean_up()\n\n if pages:\n imgfields = []\n for img in pages:\n IF = ImageField()\n IF.set(context, img)\n imgfields.append(IF)\n setattr(context, \"pagePictures\", imgfields)\n\n return msg or \"Successfully converted %s pages\" % len(pages)", "def test_get_photos_paging(self):\n pass", "def per_page():\n return 100", "def get_all_images(page):\n expiry_date = datetime.utcnow() - timedelta(days=constants.IMAGE_DURATION_IN_DAYS)\n records = ImageModel.query \\\n .filter(ImageModel.added_on > expiry_date) \\\n .order_by(desc(ImageModel.added_on)) \\\n .paginate(page=page, error_out=False, max_per_page=constants.IMAGE_PAGE_SIZE)\n return records.items, records.next_num, records.pages", "def gallery(request):\n\n gallery_images = GalleryImages.objects.all()\n gallery_images = gallery_images.order_by(\"-updated_at\")\n paginator = Paginator(gallery_images, 9) # Show 9 images per page.\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n context = {\"gallery_images\": gallery_images, \"page_obj\": page_obj}\n return render(request, \"gallery/gallery.html\", context)", "def get_images(eol_id):\n page = 1\n while True:\n details_url = f\"https://eol.org/api/pages/1.0/{eol_id}.json\"\n payload = {\"id\": eol_id, \n \"images_per_page\": 75,\n \"images_page\": page,\n }\n r = requests.get(details_url, params=payload)\n\n response = json.loads(r.text)\n content = response[\"taxonConcept\"]\n if not \"dataObjects\" in content:\n return\n\n for item in content[\"dataObjects\"]:\n yield item[\"mediaURL\"]\n page += 1", "def picture_list(request):\n paginator = Paginator(Picture.objects.all(), 9)\n page = request.GET.get('page') or 1\n\n picture_page = paginator.get_page(page)\n\n data = {\n \"page\": picture_page.number,\n \"pageCount\": picture_page.paginator.num_pages,\n \"data\": PictureListSerializer(picture_page, many=True).data\n }\n\n return JsonResponse(data, status=200)", "def imagePages(files, choice):\n options = [\"Byte\", \"Markov\", \"Hilbert\"]\n type = options[int(ui.prompt(\"Choose a visualization type\", options))]\n\n targets = []\n pageNames = []\n pageSize = 100\n pages = range(math.ceil(len(files)/pageSize))\n for page in pb.progressbar(pages):\n # print(\"\\nPage {}/{}\".format(page+1, len(pages)))\n gc.collect() # Garbage collect\n\n images = []\n start = page*pageSize\n if choice == \"Create\":\n images, targets = buildImages(files[start:start+pageSize], targets, type)\n elif choice == \"Load\":\n images, targets = loadImages(files[start:start+pageSize], targets)\n pageNames.append(\"./pages/images_page{}.npy\".format(page))\n np.save(pageNames[-1], images)\n return targets, pageNames", "def get_book_image_files( url ):\n try:\n soup = soupfy( url, encoding = 'shift_jis' )\n hrs = soup.findAll( 'hr' )\n table = hrs[ 1 ].next.next\n images = table.findAll( 'a' )\n files = [ image[ 'href' ] for image in images ]\n return files\n except AttributeError:\n number = 1\n files = []\n while True:\n page = soup.find( lambda tag : ( 'a' == tag.name and\n tag.string and\n '%dpage' % number == tag.string ) )\n if not page:\n break\n page = page[ 'href' ]\n scheme, netloc, path, _, _, _ = urlparse( url )\n dirname = os.path.dirname( path )\n path = '%s/%s' % ( dirname, page )\n url = urlunparse( ( scheme, netloc, path, '', '', '' ) )\n files.extend( get_book_image_files( url ) )\n number += 1\n return files\n except:\n stacktrace = traceback.format_exc()\n ERRORS.append( stacktrace )\n return []", "def get_gallery():\r\n to_segment = os.listdir(TO_SEGMENT)\r\n print(to_segment)\r\n return render_template(\"gallery.html\",\r\n image_names=to_segment,\r\n next_page_text=\"Segment Images! - (might take a couple mins)\",\r\n next_page=\"get_segmented_gallery\"\r\n )", "def list_images(self):\n raise NotImplementedError()", "def get_urls(num):\n url = \"https://books.google.at/books?id=77cdBQAAQBAJ&lpg=PP1&dq=%E5%82%85%E4%BD%A9%E6%A6%AE&pg=PA{}&jscmd=click3&vq=%E5%82%85%E4%BD%A9%E6%A6%AE\".format(num)\n res = requests.get(url)\n res_text = json.loads(res.text)\n pages = res_text[\"page\"]\n\n result = {}\n for p in pages:\n if 'src' in p:\n page_num = p['pid']\n page_src = p['src'] \n result[page_num] = page_src\n return result", "def get_ebooks(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n ebooks = []\n for book in json_data['docs']:\n if book['ebook_count_i'] >= 1:\n ebooks.append({'title': book['title'], 'ebook_count': book['ebook_count_i']})\n return ebooks", "def image_api():\n PAGE_SIZE=50\n page = int(request.args.get('page', 0))\n print page\n userid = current_user.id\n out= []\n query = db_session.query(Image, Batch.status).\\\n outerjoin(Batch, Image.batch==Batch.batch_id).\\\n filter(Image.user==userid)\n\n count = query.count()\n for row in query.limit(PAGE_SIZE).offset(page*PAGE_SIZE):\n out.append({\n \"url\": url_for('image_views.raw_image', image_path=row.Image.path),\n \"page\": url_for('image_views.view_image', image_id=row.Image.id),\n \"title\": row.Image.title,\n \"status\": row.status\n })\n\n return jsonify({\"images\": out, \"count\": count})", "def getimgs():", "def get_book_images( title, url ):\n image_files = get_book_image_files( url )\n try:\n os.mkdir( utf8( u'%s' % title ) )\n except OSError:\n pass\n except:\n stacktrace = traceback.format_exc()\n ERRORS.append( stacktrace )\n scheme, netloc, path, _, _, _ = urlparse( url )\n dirname = os.path.dirname( path )\n url = urlunparse( ( scheme, netloc, dirname, '', '', '' ) )\n for image_file in image_files:\n command = WGET % dict(\n title = title,\n referrer = u'%s/%s' % ( url, image_file ),\n directory = url,\n image_file = image_file.replace( u'html', u'jpg' ) )\n os.system( utf8( command ) )\n return image_files", "def image_list(request):\n return render_to_response('wainz/image_list.html', {\"images_and_votes\": ordered_images(0, 30, request.user)}, context_instance = RequestContext(request))", "def get_all_images(access_token):\n url = 'http://interview.agileengine.com/images'\n headers = {\n 'Authorization': 'Bearer ' + access_token\n }\n images = []\n try:\n logging.info(\"Fetching all the images\")\n response = requests.get(\n url,\n headers=headers\n )\n if response.ok: \n total_pages = response.json().get('pageCount')\n images = response.json().get('pictures')\n logging.info(f\"fetched 1 of {total_pages}\")\n for i in range(2,total_pages + 1):\n paginated_url = f'http://interview.agileengine.com/images?page={i}'\n response = requests.get(\n paginated_url,\n headers=headers\n )\n images += response.json().get('pictures')\n logging.info(f\"fetched {i} of {total_pages}\")\n \n detailed_images = []\n for image in images:\n detail_url = f\"http://interview.agileengine.com/images/{image.get('id')}\"\n \n logging.info(f\"Retrieving detail of {image['id']}\")\n response = requests.get(\n detail_url,\n headers=headers\n )\n if response.ok:\n detailed_images.append(response.json())\n return detailed_images\n except requests.exceptions.HTTPError:\n logging.exception('HTTP error')\n except requests.exceptions.ConnectionError:\n logging.exception('Connection error')\n except requests.exceptions.Timeout:\n logging.exception('Timeout error')\n except requests.exceptions.RequestException as e:\n logging.exception('Unexpected error')", "def cmd_gallery_items(client, args):\n gallery = client.gallery(args.section, args.sort, args.page, args.window,\n args.show_viral)\n data = [item.__dict__ for item in gallery]\n generate_output({'gallery': data}, args.output_file)", "def get_overall_page_list(num_of_items, objs_per_page):\n\tpage_list = []\n\ttry:\n\t\tremainder = (num_of_items % objs_per_page)\n\texcept ZeroDivisionError:\n\t\treturn page_list\n\tif remainder:\n\t\tnum_of_pages = int(num_of_items / objs_per_page)+1\n\telse:\n\t\tnum_of_pages = int(num_of_items / objs_per_page)\n\tfor page_num in xrange(num_of_pages):\n\t\tpage_list.append(str(page_num+1))\n\treturn page_list", "def list_images(db, n, usernick=None):\n cur = db.cursor()\n if usernick:\n sql = \"\"\"\n select * from (\n select * from images\n order by timestamp DESC\n )\n where usernick=?\n limit ?;\n \"\"\"\n cur.execute(sql, (usernick, n))\n else:\n sql = \"\"\"\n select * from (\n select * from images\n order by timestamp DESC\n )\n limit ?;\n \"\"\"\n cur.execute(sql, (n,))\n img_list = (list(cur))\n dict_list = []\n for i in img_list:\n i_dict = dict()\n i_dict['filename'] = i[0]\n i_dict['timestamp'] = i[1]\n i_dict['user'] = i[2]\n i_dict['likes'] = count_likes(db, i_dict['filename'])\n dict_list.append(i_dict)\n return dict_list", "def get_images(self, ctx, page):\n is_imgur = 'source' in page.meta and page.meta['source'] == 'imgur'\n if 'type' in page.meta and page.meta['type'] == 'album':\n album = page.meta\n images = []\n if is_imgur:\n pp.pprint(page.meta)\n # bind to template via json\n images = self.get_imgur_album_images(page)\n self.albums[album['slug']] = images\n else:\n # get paths of all of the images in the album\n srcs = []\n # get absolute paths of images in album for each file type\n for file_type in FILE_TYPES:\n imgs = glob.glob(\n GALLERY_DIR + album['slug'] + '/*.' + file_type\n )\n\n for img in imgs:\n img_rel_path = (\n REL_GALLERY_DIR +\n album['slug'] + '/' + img.split('/')[-1]\n )\n srcs.append(img_rel_path)\n\n # split full srcs and thumb srcs from srcs into two lists\n images = []\n thumb_srcs = filter(\n lambda src: src.split('/')[-1].startswith(THUMB_PREFIX),\n srcs\n )\n for thumb_src in thumb_srcs:\n src = thumb_src.replace(THUMB_PREFIX, '')\n thumb_width, thumb_height = self.calc_img_hw(thumb_src)\n width, height = self.calc_img_hw(src)\n images.append({\n 'thumb_src': thumb_src,\n 'thumb_width': thumb_width,\n 'thumb_height': thumb_height,\n\n 'src': src,\n 'width': width,\n 'height': height,\n })\n self.albums[album['slug']] = images", "def pic (self, list) : \n result = []\n for pmod in list :\n result.append (pmod.photo_uri)\n return result", "def generatePageImages(self, later=True):\n result = \"\"\n status = 1\n # make this asyncronous\n async = component.getUtility(IAsyncService)\n async_args = (self.context, (800, 1131))\n when = datetime.datetime.now(pytz.UTC) + datetime.timedelta(seconds=600)\n try:\n if later:\n async.queueJobWithDelay(None, when, _getAllPageImages, *async_args)\n else:\n apply(_getAllPageImages, async_args)\n except (component.ComponentLookupError, KeyError):\n logger.error(\"Could not setup async job, running synchronous\")\n apply(_getAllPageImages, async_args)\n # try:\n # result, pageimages = self._getAllPageImages((800,1131))\n # except SubprocessException, e:\n # result = \"Missing converter? -> \" + str(e)\n # pageimages = None\n if result:\n logger.warn(\"popen: %s\" % (result))\n if \"Error:\" in result:\n status = 0\n # if pageimages:\n # imgfields = []\n # for img in pageimages:\n # IF = ImageField()\n # IF.set(self.context, img)\n # imgfields.append(IF)\n # setattr(self.context, 'pagePictures', imgfields)\n return status", "def get_images(self,soup,Images):\n \n img=soup.find_all('a',href=re.compile(\"/photo.php?fbid=\"))\n img1=soup.find_all('a',href=re.compile(\"/photo\"))\n m=' '\n if img !=[]:\n img_href='https://www.facebook.com'+img[0]['href']\n m+=img_href+'\\n'\n \n elif img1 !=[]:\n img_href='https://www.facebook.com'+img1[0]['href']\n m+=img_href+'\\n'\n \n else:\n img=soup.find_all('a',href=re.compile(\"pcb\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n' \n \n \n else:\n img=soup.find_all('a',href=re.compile(\"photos\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n'\n \n Images.append(m)\n \n return Images", "def get_images(self):\n \n images = []\n for order in self.order_lst:\n o_items = order.get_items()\n images.append(o_items.get_image())\n \n return images", "def get_overview_pages(self):\n self.load_website()\n maxNumber = 1\n for pageIndex in self.soup.find_all('div', {'class':'paginate bg-muted'}):\n for link in pageIndex.find_all('a'):\n # try to convert string to number; if error it's not a number\n try:\n number = int(link.text)\n if number > maxNumber:\n maxNumber = number \n except ValueError:\n pass\n print('Screening complete: %d pages found - accessing first %s pages' % (maxNumber, self.maxPages))\n self.pages = [np.arange(1, maxNumber, 1)]", "def get_images(self):\r\n if self.images is None:\r\n self.images = {}\r\n for name, img_num in self.images.iteritems():\r\n if isinstance(img_num, int):\r\n yield (name, img_num)", "def list_images():\n return json_response(list_manifests())", "def paging_results(self):\n\n return 30", "def view_images(request):\n user_root = request.session['user_root']\n search_id = request.session['search_id']\n with open(os.path.join(user_root, search_id, 'info.json')) as f:\n info = json.load(f)\n object_id_list = info['object_id_list']\n image_type_list = info['image_type_list']\n search_pattern = info['search_pattern']\n image_dir = scan_images(user_root, search_id, image_type_list,relative_path=True)\n\n # Add flag for conditional representation.\n flag_scan = False\n flag_classifier=info['flag_classifier']\n if search_pattern == \"scan\":\n flag_scan = True\n bounding_box_dict = scan_bb_images(\n user_root, search_id, folder_name=\"scans\")\n else:\n bounding_box_dict = scan_bb_images(user_root, search_id)\n\n return render(request, 'gallery.html',\n {\"object_id_list\": object_id_list,\n \"image_dir\": image_dir,\n \"bounding_box\": bounding_box_dict,\n \"flag_scan\": flag_scan,\n \"flag_classifier\":flag_classifier,\n \"image_type_list\":image_type_list})", "def recipes_list(request):\n recipes = Recipe.objects.all().order_by('published_date')\n\n # prawidlowy sposob zbierania URLa - object.image.url\n # recipe = recipes[0]\n # print(\"path: \" + recipe.image.url)\n\n paginator = Paginator(recipes, INITIAL_PAGE_SIZE)\n page = paginator.page(1)\n\n context = {\n 'page': page,\n 'display_likes': True,\n }\n\n return render(request, 'recipes_index.html', context)", "def get(self):\n return PhotoGalleryService().get_all(), 200", "def get_image_list(self, account):\n images = self.driver(account).list_images()\n return [image.name for image in images]", "def get_available_images():\n return AVAILABLE_IMAGES", "def get_photos_url(self, page_number=1):\n return \"{}{}?client_id={}&per_page={}&page={}\".format(\n self.base_url, self.home_url, self.client_id, self.limit, page_number\n )", "def index(self, req):\n params = {\n 'filters': self._get_filters(req),\n 'limit': self._get_limit(req),\n }\n\n if 'marker' in req.str_params:\n params['marker'] = self._get_marker(req)\n\n images = db_api.image_get_all_public(None, **params)\n\n results = []\n for image in images:\n result = {}\n for field in DISPLAY_FIELDS_IN_INDEX:\n result[field] = image[field]\n results.append(result)\n return dict(images=results)", "def _get_apt_urls_per_page(self, pg_num):\n\n # get the URL for the specific page given its page number \n pg_url = self._get_page_url(pg_num)\n response = requests.get(pg_url)\n # scrape the HTML web content from rent.com\n results = response.content \n # a list that contains all the apartment URLs\n if not response.status_code == 404:\n soup = BeautifulSoup(results, 'lxml')\n apts = soup.find_all('a', attrs={'data-tid': 'property-title'})\n apt_urls = [apt['href'] for apt in apts]\n\n return apt_urls", "def page(request, pagenum):\n context = Paginator().filter(Book.objects.all(), pagenum)\n return render(request, 'books/bookListPage.html', context)", "def list_images(ec2): # pragma: no coverage\n response = ec2.describe_images(Filters=[{'Name': 'is-public',\n 'Values': ['false']}])\n response.pop('ResponseMetadata')\n printy(\"{:12}\\t{:20}\\t\\tCreationDate:\".format(\"ImageId\", \"Name\"))\n\n for image in response['Images']:\n if len(image[\"Name\"]) > 20:\n image['Name'] = image['Name'][:20] + \"...\"\n print(\"{ImageId}\\t{Name:20}\\t\\t{CreationDate}\".format(**image))", "def photos(self):\n if \"photos\" in self._prop_dict:\n return PhotosCollectionPage(self._prop_dict[\"photos\"])\n else:\n return None", "def pictures(self):\n return self.container['pictures']", "def cmd_gallery_random(client, args):\n gallery_random = client.gallery_random(args.page)\n data = [item.__dict__ for item in gallery_random]\n generate_output({'gallery_random': data}, args.output_file)", "def list(self):\n r = self.target.ttbd_iface_call(\"images\", \"list\", method = \"GET\")\n return r['result']", "def list_images():\n image_map = build_image_map()\n click.echo('')\n click.echo('List of available images (Name - Description)')\n click.echo('')\n for name in image_map:\n click.echo('{} -> {}'.format(name, image_map[name]))", "def list_all_size(self, path=None):\n user_id = path or self.user_id\n if not user_id:\n raise ValueError(\"You must either specify a user ID at \"\n \"storage instanciation or at \"\n \"list_image_and_thumb launching.\")\n url_types = ['url_o', 'url_s', 'url_q', 'url_t', 'url_l', 'url_m', 'url_n', 'url_z', 'url_c']\n params = {\n 'method': 'flickr.people.getPublicPhotos',\n 'user_id': user_id,\n 'extras': ','.join(url_types)\n }\n response = self.oauth_session.get(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail':\n raise FlickrError(json_response['message'])\n urls = [pho for pho in json_response['photos']['photo']]\n return urls", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def show_books_by_page(page):\n result = {'books': query.get_book_list()}\n return json.dumps(result, ensure_ascii=False)", "def GetPicturesForAll(self, limit = -1, since = -1):\n\n if (limit < 1):\n limit = self.limit\n\n url = self.__BuildGetUrl(\"pictures\", \"\", limit, since)\n return self.__GetJson(url, False)", "def show_images(images, db):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n show_files(files)", "def paginate(cls, papers, page=0, limit=30):\n offset = page * limit\n end = offset + limit\n if offset > len(papers):\n return []\n if end > len(papers):\n return papers[offeset:]\n return papers[offset:end]", "def get_all_page(url: str) -> list:\n url_book = get_url_book(url)\n return url_book", "def get_pages(epObject, fileDict):\r\n homePage = DOMAIN + epObject.ViewLink\r\n soup = make_soup(homePage)\r\n fileDict['pageUrls'].append(homePage)\r\n fileDict['pageFileNames'].append('index.html')\r\n fileDict['pageIds'].append(str(epObject.ObjectId))\r\n for a in soup.find_all('a', {'href': 'javascript://'}):\r\n if a['onclick'].find('GotoPage') > 0:\r\n pageId = get_page_id(str(a['onclick']), str(epObject.ObjectId))\r\n if pageId not in fileDict['pageIds']:\r\n address = homePage + \"&pageId={0}\".format(pageId)\r\n fileName = a.string.replace(' ', '').lower() + \".html\"\r\n fileDict['pageUrls'].append(address)\r\n fileDict['pageFileNames'].append(fileName)\r\n fileDict['pageIds'].append(pageId)\r\n return fileDict", "def gallery_upload(page_number=None):\n try:\n if page_number == None:\n pages = driver.find_elements_by_xpath('//div[@data-index]')\n page_number = len(pages)\n else:\n page_number = int(page_number)\n page_number = int(page_number)\n component = driver.find_element_by_xpath('//div[@data-index=%d]' % (page_number-1))\n return upload('image', scope=component)\n except Exception as e:\n return \"Error: \" + str(e)", "def get_images(outputdir, parent_key, key, searchurl, maximum, json_path):\n body, browser = build_browser(searchurl)\n\n urls = []\n\n while len(urls) < maximum:\n try:\n page_source = browser.page_source\n\n soup = BeautifulSoup(page_source, 'lxml')\n\n search_result_soup = get_div_child(soup.body, \"islrg\")\n images = search_result_soup.find_all('img')\n urls = get_url_from_images(images)\n print(urls)\n\n for i in range(50):\n scroll_down(body)\n # browser.find_element_by_xpath('//*[@id=\"islmp\"]/div/div/div/div')\n browser.find_element_by_class_name(\"mye4qd\").click()\n print(len(urls) < maximum)\n except ElementNotInteractableException as e: # There is no next page\n print(e)\n break\n\n\n\n if not os.path.exists(outputdir):\n os.makedirs(outputdir)\n\n write_urls(json_path, parent_key, key, urls)\n\n # download_urls(urls, outputdir)\n browser.close()", "def readImages(respository,*rescale):\n record = []\n onlyfiles = [f for f in listdir(respository) if isfile(join(respository, f))]\n for image in onlyfiles:\n record = record+[readImage(join(respository, image),[0,1,2],rescale)]\n return record\n pass", "def index(self, req):\n context = req.environ['nova.context']\n filters = self._get_filters(req)\n page_params = common.get_pagination_params(req)\n images = self._image_service.index(context, filters=filters,\n **page_params)\n builder = self.get_builder(req).build\n return dict(images=[builder(image, detail=False) for image in images])", "def get_top_100_data(self):\n self.driver.get(self.TOP_100_BOOKS_URL)\n\n cookies_button = self.driver.find_element_by_xpath(\"/html/body\")\n cookies_button.click()\n\n books_list = []\n\n print(\"Getting books data from page 1\")\n try:\n for page_numb in range(self.FIRST_PAGE_TO_CLICK, self.NMB_OF_PAGES+2):\n content = self.driver.page_source\n page_soup = BeautifulSoup(content, features='html.parser')\n books_list += self._get_books_from_page(page_soup)\n\n if page_numb == self.NMB_OF_PAGES+1:\n break\n self._load_page(page_numb)\n print(f\"Getting books data from page {page_numb}\")\n except:\n pass\n\n return books_list", "def recent(perpage = 500):\n photos = request(\"flickr.photos.getRecent\", {\n \"per_page\": perpage, \n \"extras\": all_extras})\n for photo in photos.getiterator(\"photo\"):\n yield Photo.fromapi(photo.attrib)", "def get_apartment_images(self, soup, apartment_dict):\n\n image_urls = []\n images_container = soup.find('div', class_='photos')\n images_container = images_container.find('div')\n\n # Iterate over images in gallery\n for image_container in images_container.find_all('div'):\n anchor_tag = image_container.find('a')\n if anchor_tag:\n image_urls.append(self.base_url + anchor_tag['href'])\n apartment_dict['image_urls'] = image_urls", "def get_images(self):\n \n return self.img_lst", "def get_images(self, start=0, limit=100):\n if not start:\n start = 0\n if not limit:\n limit = 100\n start = int(start)\n limit = int(limit)\n urls = self._image_urls[start:start + limit]\n message = \"%i Successful URLs found.\" % len(urls)\n return (urls, message)", "def get_links_page(page_num):\n\n session = r.Session()\n params = {'page': page_num}\n response = session.get(BASE_URL+'/music/albumreviews',\n params=params, headers=HEADERS)\n return response", "async def imageList(self, ctx: Context, imageType=\"icons\"):\n imageSingular = self.getSingularImageType(imageType)\n allImages = await self.config.guild(ctx.guild).get_attr(imageType)()\n if not allImages:\n await ctx.send(f\"There are no {imageType}, please add some first!\")\n return\n\n async with self.config.guild(ctx.guild).get_attr(f\"{imageType}Dates\")() as imageDates:\n imageDates = dict(sorted(imageDates.items()))\n msg = \"\"\n for changeDate, name in imageDates.items():\n # YYYY-MM-DD\n theDate = date.fromisoformat(f\"2020-{changeDate}\").strftime(\"%B %d\")\n msg += f\"{theDate}: {name}\\n\"\n notAssigned = set(allImages) - set(imageDates.values())\n if notAssigned:\n msg += f\"Unassigned: \"\n msg += \", \".join(notAssigned)\n pageList = []\n pages = list(pagify(msg, page_length=500))\n totalPages = len(pages)\n async for pageNumber, page in AsyncIter(pages).enumerate(start=1):\n embed = discord.Embed(\n title=f\"Server {imageSingular} changes for {ctx.guild.name}\", description=page\n )\n embed.set_footer(text=f\"Page {pageNumber}/{totalPages}\")\n pageList.append(embed)\n await menu(ctx, pageList, DEFAULT_CONTROLS)", "def getAll(owner_id=None, extended=None, offset=None, count=None, photo_sizes=None,\\\n no_service_albums=None, need_hidden=None, skip_hidden=None):\n params = {\n 'owner_id': owner_id,\n 'extended': extended,\n 'offset': offset,\n 'count': count,\n 'photo_sizes': photo_sizes,\n 'no_service_albums': no_service_albums,\n 'need_hidden': need_hidden,\n 'skip_hidden': skip_hidden\n }\n result = call('photos.getAll', **params)\n return parse_response(result)", "def page(self):\r\n limit = self.get_limit()\r\n offset = self.get_offset()\r\n count = self.get_count()\r\n objects = self.get_slice(limit, offset)\r\n meta = {\r\n 'offset': offset,\r\n 'limit': limit,\r\n 'total_count': count}\r\n\r\n if limit:\r\n meta['previous'] = self.get_previous(limit, offset)\r\n meta['next'] = self.get_next(limit, offset, count)\r\n\r\n return {\r\n self.collection_name: objects, 'meta': meta}", "def get_available_pages(self):\n pass", "def images(self, details=True, **query):\n img = _image.ImageDetail if details else _image.Image\n return list(self._list(img, paginated=True, **query))", "def fetch_photos(n):\n\n # This is the list we will use the pass back the photo information.\n data = []\n\n # First, we search for photos taken in Manchester.\n response = requests.get(f'https://api.flickr.com/services/rest/?method=flickr.photos.search&api_key={FLICKR_API_KEY}&lat=53.48&lon=-2.23&radius=10&radius_units=km&format=json&nojsoncallback=1')\n\n # Now loop through the photos.\n for photo in sample(response.json()['photos']['photo'], n):\n\n # We will search with the photo ID.\n id = photo['id']\n\n # Get the photo details. We can get the URL to the photo from here.\n response = requests.get(f'https://api.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key={FLICKR_API_KEY}&photo_id={id}&format=json&nojsoncallback=1')\n\n # Extract the photo URL from the response.\n url = response.json()['sizes']['size'][-1]['source']\n\n # Store our photo ID and URL.\n data.append({\n 'title': photo['title'],\n 'id': photo['id'],\n 'url': url,\n })\n\n # Send back our list of photos.\n return data", "def galleries(self) -> pulumi.Output[Sequence['outputs.WorkbookTemplateGalleryResponse']]:\n return pulumi.get(self, \"galleries\")", "def images_list(self, kwargs=None):\n\n try:\n scode, images = Rest.get('Image')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(images) == 0:\n Console.info(\"No images exist\")\n return\n\n n = 1\n e = {}\n for image in images:\n d = {}\n d['Ip'] = image['Ip']\n d['Id'] = image['Id']\n if image['RepoTags'] == None:\n d['Repository'] = image['RepoDigests'][0]\n else:\n d['Repository'] = image['RepoTags'][0]\n # d['Size'] = image['Size']\n d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2) # Converting the size to GB\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))", "def get_movie_thumbnails(time_period, page, filters):\n movies = Movie.objects.filter(**filters) \\\n .order_by('-item__popularity__' + time_period) \\\n .values('title', 'url', 'synopsis', 'image', 'theater_date') \\\n .distinct()\n paginator = Paginator(movies, 12)\n\n try:\n next_page = paginator.page(page).next_page_number()\n paginator.page(next_page)\n except (EmptyPage, InvalidPage):\n next_page = ''\n\n response = [{ \n 'title': escape(movie['title']),\n 'url': reverse('movie-profile', args=[movie['url']]),\n 'synopsis': escape(movie['synopsis'][:140]),\n 'image_url': get_thumbnail(movie['image'], 'x285').url,\n 'next': next_page \n } for movie in paginator.page(page)] \n\n return simplejson.dumps(response)", "def get_latest_photos(self, count = 30, page = 1):\n uri = 'photos/latest'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def images(self):\n return self._data[\"images\"]", "def getPages(this, chapterNo):\n # Set chapter file name\n fileName = os.path.join(this.location, \"Chapter \"+chapterNo+\".pdf\")\n\n # Check if chapter already exists\n if os.path.exists(fileName):\n # Chapter already exists, return\n return\n\n # Get chapter pages list\n res = requests.get(this.chapterList[chapterNo])\n # Getting response HTML\n resHTML = bs4.BeautifulSoup(res.text, features=\"html.parser\")\n # Getting only the links\n links = resHTML.select('img['+this.imageURLAttribute+']')\n\n # Initialise pages array\n pages = []\n # Iterate over each image\n for currentPage in tqdm(range(len(links)), desc=this.mangaName+\", Chapter \"+chapterNo, leave=False, unit=\"page\"):\n # Get image URL\n imageURL = links[currentPage].attrs[this.imageURLAttribute]\n # Get image from its URL\n image = requests.get(imageURL)\n # Append image to pages array\n pages.append(image.content)\n\n # Convert images into pdf format\n pdf_bytes = img2pdf.convert(pages)\n # Write to pdf\n fileObject = open(fileName, \"wb\")\n fileObject.write(pdf_bytes)\n fileObject.close()", "def get_gallery(self, section='hot', sort='viral', window='day',\n show_viral=True, limit=None):\n url = (\"https://api.imgur.com/3/gallery/{}/{}/{}/{}?showViral=\"\n \"{}\".format(section, sort, window, '{}', show_viral))\n resp = self._send_request(url, limit=limit)\n return [_get_album_or_image(thing, self) for thing in resp]", "def paginated(self) -> global___Snippet.Paginated:", "def displayPicture(self):\n size = 0\n image = None\n for url in self.__imageList:\n im = Image.open(requests.get(url, stream=True).raw)\n height, weight = im.size\n imgSize = height * weight\n\n print(url)\n print(size)\n if imgSize > size:\n image = im\n # if image:\n # image.show()", "def index(self, req):\n context = req.environ['nova.context']\n filters = self._get_filters(req)\n images = self._image_service.index(context, filters=filters)\n images = common.limited(images, req)\n builder = self.get_builder(req).build\n return dict(images=[builder(image, detail=False) for image in images])", "def list_image_names(write_folder, user_name, image_size):\n image_dir = f'{write_folder}/{user_name}/{image_size}'\n # print('image_dir',image_dir)\n return os.listdir(image_dir)", "async def fetch_all_images(sess: Session = Depends(get_db)):\n image_list = utils_com.get_com_image_list(sess)\n return image_list", "def getPictures (self, list) :\n\n result = []\n for event in list :\n eventEntry = {}\n eventEntry ['id'] = link = event.answer.id\n eventEntry ['time'] = event.timeOf\n eventEntry ['comments'] = event.answer.comments\n eventEntry ['location'] = self.where (event.answer)\n eventEntry ['problem'] = event.answer.survey ['problem_type']\n eventEntry ['pictures'] = self.pic (Picture.objects.filter (answer__id = link))\n result.append (eventEntry)\n \n return result", "def _get_page_range(self):\r\n return list(range(1, self.num_pages + 1))", "def fetch_urls(browser, number_publications):\n links = []\n links.extend(re.findall(\"/p/([^/]+)/\", browser.page_source))\n n_scrolls = scrolls(number_publications)\n\n for i in range(\n n_scrolls\n ): # collecting all the pictures links in order to see which ones contains location data\n print(\n Fore.WHITE +\n \"Scrolling the Instagram target profile, scraping pictures URLs ...\"\n + str(100 * i // n_scrolls) + \"% of the profile scrolled \",\n end=\"\\r\")\n browser.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight)\")\n links.extend(re.findall(\"/p/([^/]+)/\", browser.page_source))\n time.sleep(\n 1\n ) # dont change this, otherwise some scrolls won't be effective and all the data won't be scrapped\n\n print(Fore.WHITE + \"\\nPictures links collected: \" + Fore.GREEN + \"OK\")\n return list(dict.fromkeys(links)) # remove duplicates", "def photos(self):\n return self._photos", "def list_result_ephemerides_files(\n self, page_size: int = 100, page_token: str = None) -> Dict:\n params = {}\n if page_size < 0 or page_size > 100:\n page_size = 100\n params['pageSize'] = page_size\n if page_token:\n params['pageToken'] = page_token\n ephs = self._rp._rest.get(\n f'/projects/{self._rp._project}/jobs/{self._job_uuid}'\n f'/ephemerides?{urllib.parse.urlencode(params)}')\n return ephs", "def slice_pages(fname, pages, out_path=None, resolution=300, use_convert=True):\n if not out_path:\n prefix = '%s-' % fname[:-4]\n fd, out_path = tempfile.mkstemp(prefix=prefix, suffix='.png')\n os.close(fd)\n os.remove(out_path)\n\n fname = '%s[%s]' % (fname, ','.join(str(x - 1) for x in pages))\n\n if use_convert or not WAND_AVAILABLE:\n command = \"convert -density %d '%s' %s\"\n command = command % (resolution, fname, out_path)\n sh_args = shlex.split(str(command))\n ret = subprocess.call(sh_args)\n if ret > 0:\n raise Exception('Non-zero return code: \"%s\"' % command)\n else:\n from wand import image\n page_image_files = image.Image(\n filename=fname,\n resolution=resolution,\n )\n with page_image_files.convert('png') as f:\n f.save(filename=out_path)\n\n return glob.glob('%s*' % out_path[:-4])", "def get_image_links(data):\n painting_links = []\n\n print(data)\n\n for painting in data:\n painting_links.append(painting['image'])\n\n return painting_links", "def get_images(self, info):\n # Add all marked images\n if self._app[\"mark\"].marked:\n images = self._app[\"mark\"].marked\n if len(images) == 1:\n message = \"%s %d marked image\" % (info, len(images))\n else:\n message = \"%s %d marked images\" % (info, len(images))\n self._app[\"statusbar\"].message(message, \"info\")\n # Add the image shown\n else:\n images = [os.path.abspath(self._app.get_pos(True))]\n return images", "def _printout_images_info(design_path):\r\n _max_pic_number = 8\r\n images = dict()\r\n for foo in os.listdir(design_path):\r\n abs_foo = os.path.join(design_path, foo)\r\n if os.path.isfile(abs_foo):\r\n continue\r\n if foo.endswith(\"Images\"):\r\n images.setdefault(foo, list())\r\n for bar in os.listdir(abs_foo):\r\n if bar.endswith(\".png\"):\r\n images[foo].append(bar)\r\n if images:\r\n for k, v in list(images.items()):\r\n v.sort(key=sort_by_num, reverse=True)\r\n nine_images = dict()\r\n images_number = 0\r\n for i in range(0, 10):\r\n if images_number > _max_pic_number:\r\n break\r\n for k, v in list(images.items()):\r\n nine_images.setdefault(k, list())\r\n try:\r\n nine_images[k].append(v[i])\r\n images_number += 1\r\n if images_number > _max_pic_number:\r\n break\r\n except IndexError:\r\n continue\r\n say_it(\"\")\r\n say_it(\"Images Number: {}\".format(images_number))\r\n ii = 1\r\n for kk, vv in list(nine_images.items()):\r\n for foo in vv:\r\n say_it(\"-PNG{}: {}/{}\".format(ii, kk, foo))\r\n ii += 1", "def images(self):\n return self.gameimage_set.all()", "def get_image(self, index):\r\n \r\n # Get request to get all the links for all exercises\r\n image = requests.get(API.url_image, headers = self.headers).json()\r\n filename = download(image[index]['image'])", "def get_galleries(self):\n data = self._get('get_gallery_list')\n return data['galleries']", "def load_images_page(self):\n logging.info(\"loading images page {}\".format(self.horizon_images_url))\n\n return self._load_page_measure_time(self.driver,self.horizon_images_url,\n tag = \"Images Page\")", "def get_user_photos(self, user_id, count = 30, page = 1):\n uri = 'users/' + user_id + '/photos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def image_list(self):\n return self._image_list", "def pages(self):\n # The page list comes in three sections. Given radius=3:\n # 0 1 2 ... n-2 n-1 n n+1 n+2 ... m-2 m-1 m\n # Alas, some caveats:\n # - These sections might overlap.\n # - The current page might not be integral.\n delta = self.radius - 1 # since the below two are off by one\n before_current = int(math.ceil(self.current_page - 1))\n after_current = int(math.floor(self.current_page + 1))\n pages = []\n\n # First through current\n if before_current - delta <= 1:\n pages.extend(range(0, before_current + 1))\n else:\n pages.append(None)\n pages.extend(range(\n before_current - delta, before_current + 1))\n\n # Current\n pages.append(self.current_page)\n\n # Current through end\n if self.last_page is None:\n # Don't know the last page. Show one more and ..., if appropriate\n if self.next_item and \\\n after_current * self.page_size <= self.maximum_skip:\n\n pages.append(after_current)\n pages.append(None)\n return pages\n\n if after_current + delta >= self.last_page - 1:\n pages.extend(range(\n after_current, self.last_page + 1))\n else:\n pages.extend(range(after_current, after_current + delta + 1))\n pages.append(None)\n\n return pages", "def scrape(iterator, location, limit):\n for photo in iterator:\n filepath = \"{0}/{1}/{2}.jpg\".format(location,\n photo.flickrid % 100,\n photo.flickrid)\n if os.path.exists(filepath):\n log.info(\"Skipping duplicate {0}\".format(photo.flickrid))\n continue\n\n try:\n log.info(\"Downloading {0} ({1})\".format(photo.flickrid,\n photo.format))\n image = photo.download()\n try:\n image.save(filepath)\n except IOError:\n os.makedirs(os.path.dirname(filepath))\n image.save(filepath)\n except KeyboardInterrupt:\n raise\n except:\n pass\n\n limit -= 1\n if limit == 0:\n break", "def get_vr_photos(self, count = 30, page = 1):\n uri = 'photos/vr'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def get_pages(posts):\n pages = []\n for i in range(4, len(posts), 5):\n pages.append(posts[i-4: i+1])\n r = len(posts) % 5\n if r > 0:\n pages.append(posts[len(posts) - r:])\n\n return pages" ]
[ "0.76301074", "0.61921346", "0.61705476", "0.61475706", "0.60838944", "0.60602915", "0.60530084", "0.6047631", "0.6000588", "0.5965341", "0.59554744", "0.5820679", "0.5802052", "0.5781331", "0.5779564", "0.5755933", "0.5743276", "0.57430536", "0.57286274", "0.5711702", "0.56674343", "0.56098354", "0.55935603", "0.55796134", "0.5574545", "0.5569969", "0.5549591", "0.5544264", "0.55342376", "0.54994446", "0.5495631", "0.5478098", "0.54639316", "0.54603463", "0.5443958", "0.5440419", "0.54402", "0.54268193", "0.54175895", "0.5398883", "0.5392763", "0.53852075", "0.5375755", "0.5374266", "0.5373626", "0.5339335", "0.5330089", "0.5310572", "0.5308976", "0.530748", "0.5294608", "0.52867645", "0.5280095", "0.5267482", "0.5253039", "0.525019", "0.52488893", "0.52475923", "0.5221275", "0.5214196", "0.52095497", "0.520826", "0.5206233", "0.51936543", "0.5189905", "0.51887894", "0.5183664", "0.51833135", "0.5180426", "0.51683867", "0.51672274", "0.5166602", "0.5160045", "0.51579785", "0.51517606", "0.5150636", "0.5148407", "0.5138313", "0.513682", "0.5125853", "0.51194596", "0.5115555", "0.5113632", "0.51092374", "0.5107697", "0.5102119", "0.5101365", "0.51007694", "0.5100534", "0.5094553", "0.50922334", "0.5091911", "0.50916624", "0.5091294", "0.5090033", "0.5085925", "0.50840145", "0.5081351", "0.5074679", "0.5074584", "0.5069547" ]
0.0
-1
White balance for every channel independently
def white_balance(image, perc): new_channel = [] for channel in cv2.split(image): mi, ma = (np.percentile(channel, perc), np.percentile(channel, 100.0 - perc)) channel = np.uint8(np.clip((channel - mi) * 255.0 / (ma - mi), 0, 255)) new_channel.append(channel) imWB = np.dstack(new_channel) return imWB
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calibrate_white_balance(self, channel: LC):\n\n d_print(\"Warming up camera sensor...\", 1)\n\n # turn on channel light\n self.light_control(channel, 1)\n\n if channel == LC.WHITE:\n with picamera.PiCamera() as sensor:\n # set up the sensor with all its settings\n sensor.resolution = (128, 80)\n sensor.rotation = self.config[\"rotation\"]\n sensor.framerate = self.settings.framerate[channel]\n sensor.shutter_speed = self.settings.shutter_speed[channel]\n\n # set up the blue and red gains\n sensor.awb_mode = \"off\"\n rg, bg = (1.1, 1.1)\n sensor.awb_gains = (rg, bg)\n\n # now sleep and lock exposure\n time.sleep(20)\n sensor.exposure_mode = self.settings.exposure_mode\n\n # record camera data to array and scale up a numpy array\n #rgb = np.zeros((1216,1216,3), dtype=np.uint16)\n with picamera.array.PiRGBArray(sensor) as output:\n # capture images and analyze until convergence\n for i in range(30):\n output.truncate(0)\n sensor.capture(output, 'rgb')\n rgb = np.copy(output.array)\n\n #crop = rgb[508:708,666:966,:]\n crop = rgb[30:50,32:96,:]\n\n r, g, b = (np.mean(crop[..., i]) for i in range(3))\n d_print(\"\\trg: {:4.3f} bg: {:4.3f} --- ({:4.1f}, {:4.1f}, {:4.1f})\".format(rg, bg, r, g, b), 1)\n\n if abs(r - g) > 1:\n if r > g:\n rg -= 0.025\n else:\n rg += 0.025\n if abs(b - g) > 1:\n if b > g:\n bg -= 0.025\n else:\n bg += 0.025\n\n sensor.awb_gains = (rg, bg)\n else:\n rg = self.settings.wb[LC.GROWTH][\"r\"]\n bg = self.settings.wb[LC.GROWTH][\"b\"]\n\n # turn off channel light\n self.light_control(channel, 0)\n\n self.config[\"wb\"][channel] = dict()\n self.config[\"wb\"][channel][\"r\"] = rg\n self.config[\"wb\"][channel][\"b\"] = bg\n\n d_print(\"Done.\", 1)", "def whitebalance(self, img_cv2_mask):\n\n print(\"controller - whitebalance!\")\n if self.pressure_img.whitebalanced == False:\n img_whitebalanced = self.pressure_img.target_detector.whiteBalance()\n self.view.processing_gui.ask_whitebalance_confirmation(img_cv2_mask, img_whitebalanced)\n else:\n self.view.popupmsg(\"Ja s'ha aplicat la reducció.\")", "def whitebalance_confirmated(self, img_cv2_whitebalanced):\n\n print(\"controller - whitebalance_confirmated!\")\n self.pressure_img.whitebalanced = True\n self.pressure_img.mask = img_cv2_whitebalanced\n self.pressure_img.img = img_cv2_whitebalanced.copy()\n self.view.processing_gui.update_whitebalanced_label(img_cv2_whitebalanced)", "def sweep_relay():", "def blackAndWhite(image):\n blackPixel = (0, 0, 0)\n whitePixel = (255, 255, 255)\n for y in range(image.getHeight()):\n for x in range(image.getWidth()):\n (r, g, b) = image.getPixel(x, y)\n average = (r + g + b) // 3\n if average < 128:\n image.setPixel(x, y, blackPixel)\n else:\n image.setPixel(x, y, whitePixel)", "def black(self, x):\n n = x.shape[0]\n count_arr = np.array([[0] * 28] * 28) # array to keep the count for all [i,j] coordinates\n # Base: i = j = 0, count_arr[i, j] = 0\n for i in range(1, n):\n for j in range(1, n):\n if x[i, j] > 0:\n count_arr[i, j] = count_arr[i, j - 1] + count_arr[i - 1, j] - count_arr[i - 1, j - 1] + 1\n else:\n count_arr[i, j] = count_arr[i, j - 1] + count_arr[i - 1, j] - count_arr[i - 1, j - 1]\n self.count_black = count_arr", "def heat_balance(index):\n t = index[0]\n return (\n heat_hru_out[t]\n + pulp.lpSum([component_output[i, t] for i in index_heat_out])\n - pulp.lpSum([component_input[i, t] for i in index_heat_in])\n + heat_unserve[t]\n - heat_dump[t]\n == forecast[\"heat_load\"][t]\n )", "def filterWithWhite(image,sens):\r\n # convert image to hsv color space \r\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\r\n sensitivity = sens\r\n # define ranges of white color in HSV colorspace according to sensitivity\r\n lower_white = np.array([0,0,255-sensitivity])\r\n upper_white = np.array([255,sensitivity,255])\r\n # create a mask if the color value in pixel, lower<pixel<upper == 1 else 0 \r\n mask = cv2.inRange(hsv, lower_white, upper_white)\r\n # apply bitwise and to the image\r\n summ = cv2.bitwise_and(image.copy(), image.copy(), mask = mask)\r\n # turn image to gray for thresholding\r\n summ = cv2.cvtColor(summ,cv2.COLOR_BGR2GRAY)\r\n # blur for noise reducing \r\n summ = cv2.GaussianBlur(summ, (3, 3), 0)\r\n # threshold the pixel values higher than 70\r\n summ = cv2.threshold(summ,70,255,cv2.THRESH_BINARY)[1]\r\n return summ", "def obtain_filters_mask(model, threshold, cba_index, prune_index):\n\n num_pruned_bn = 0\n num_total_bn = 0\n num_remain_filters = []\n mask_remain_filters = []\n\n # The number of filters reserved must be a multiple of 8\n int_multiple = 8\n filter_switch = list(range(0, 1024, int_multiple))\n\n # cba_index stores all convolution layers with BN layer (the previous layer of YOLO layer is without BN layer)\n for index in cba_index:\n bn_module = model.module_list[index][1]\n if index in prune_index:\n mask = obtain_bn_mask(bn_module, threshold).cpu().numpy()\n num_layer_remain_bn = int(mask.sum())\n if num_layer_remain_bn < 8:\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-8]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n else:\n for i, _ in enumerate(filter_switch):\n if num_layer_remain_bn < filter_switch[i]:\n num_layer_remain_bn = filter_switch[i - 1]\n break\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-num_layer_remain_bn]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n\n num_remain_bn = int(mask.sum())\n num_pruned_bn = num_pruned_bn + mask.shape[0] - num_remain_bn\n\n if num_remain_bn == 0:\n print(\"Channels would be all pruned!\")\n raise Exception\n\n logger.info('layer index: %d \\t total channel: %d \\t remaining channel: %d',\n index, mask.shape[0], num_remain_bn)\n else:\n mask = np.ones(bn_module.weight.data.shape)\n num_remain_bn = mask.shape[0]\n num_total_bn += mask.shape[0]\n num_remain_filters.append(num_remain_bn)\n mask_remain_filters.append(mask.copy())\n\n prune_ratio = num_pruned_bn / num_total_bn\n logger.info('Prune channels: %d \\t Prune ratio: %.3f', num_pruned_bn, prune_ratio)\n\n return num_remain_filters, mask_remain_filters", "def white_balance(image, percentage=0.006):\n\n image_uint8 = (255.0 * image).astype(np.uint8)\n\n pixels_total = image.shape[0] * image.shape[1]\n threshold = percentage * pixels_total\n\n _stretch_values(image_uint8, 0, threshold)\n _stretch_values(image_uint8, 1, threshold)\n _stretch_values(image_uint8, 2, threshold)\n\n return image_uint8 / 255.0", "def cool_balance(index):\n t = index[0]\n return (\n pulp.lpSum([component_output[i, t] for i in index_cool_out])\n - pulp.lpSum([component_input[i, t] for i in index_cool_in])\n + pulp.lpSum([storage_disch[i, t] for i in heat_storage_names])\n - pulp.lpSum([storage_ch[i, t] for i in heat_storage_names])\n + cool_unserve[t]\n - cool_dump[t]\n == forecast[\"cool_load\"][t]\n )", "def _fixupChannels(self):\n\n # Add extra disabled channels as needed\n for index, ch in enumerate(self.channels):\n ch.index = index # fixup indexes\n\n self._fillChannels()", "def cool_down(heatmap, amount):\n heatmap[heatmap <= amount] = 0\n heatmap[heatmap > amount] -= amount\n return heatmap", "def test_color_balance_random_images(self, img):\n\n # color balance only works if every channel has at least two different\n # values, otherwise everything in that channel would be mapped to 0\n for channel in cv2.split(img):\n assume(len(np.unique(channel)) >= 2)\n\n balanced_img = balance_color(img, percentile=0)\n\n assert balanced_img.max() == 255, \\\n \"Maximum of a balanced image should be 255\"\n assert balanced_img.min() == 0, \\\n \"Minimum of a balanced image should be 0\"\n for channel in cv2.split(balanced_img):\n assert channel.max() == 255, \\\n \"Maximum of each channel should be 255\"\n assert channel.min() == 0, \\\n \"Minimum of each channel should be 0\"", "def noiseReduction(self):\n pass", "def _calculate_channel_mask(model: nn.Module, pruning_strategy: str, cuda=True) -> (List[int], List[torch.Tensor]):\n total = 0\n pruned = 0\n cfg = []\n cfg_mask = []\n for k, m in enumerate(model.modules()):\n if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n weight_copy = m.weight.data.abs().clone()\n total += weight_copy.shape[0]\n thre = _search_threshold(weight_copy,\n pruning_strategy)\n mask = weight_copy.gt(thre)\n if cuda:\n mask = mask.float().cuda()\n else:\n mask = mask.float()\n pruned = pruned + mask.shape[0] - torch.sum(mask)\n m.weight.data.mul_(mask)\n m.bias.data.mul_(mask)\n cfg.append(int(torch.sum(mask)))\n cfg_mask.append(mask.clone())\n print('layer index: {:d} \\t total channel: {:d} \\t remaining channel: {:d}'.\n format(k, mask.shape[0], int(torch.sum(mask))))\n elif isinstance(m, nn.MaxPool2d):\n cfg.append('M')\n\n return cfg, cfg_mask", "def forward(self,\n x: Tensor) \\\n -> Tensor:\n\n x = self.convs[0](x)\n res = x\n for i in range(self.num_rates):\n x = torch.tanh(self.filter_convs[i](\n x)) * torch.sigmoid(self.gate_convs[i](x))\n x = self.convs[i+1](x)\n res = res + x\n return res", "def _fillChannels(self):\n\n # Add extra disabled channels as needed\n index = len(self.channels)\n while index < self.iface.myInfo.max_channels:\n ch = channel_pb2.Channel()\n ch.role = channel_pb2.Channel.Role.DISABLED\n ch.index = index\n self.channels.append(ch)\n index += 1", "def Deband(clip: vs.VideoNode, radius: int = 17, threshold: float = 4,\n iterations: int = 1, grain: float = 4, chroma: bool = True)-> vs.VideoNode:\n if get_depth(clip) != 16:\n clip = depth(clip, 16)\n if chroma is True:\n clip = join([core.placebo.Deband(x, 1, iterations, threshold, radius, grain)\n for x in split(clip)])\n else:\n clip = core.placebo.Deband(clip, 1, iterations, threshold, radius, grain)\n return clip", "def blackcover(model, X, y, width, height, xskip, yskip):\n\t#wideth:44 , height:22, xship:22. yship:22\n max_loss = torch.zeros(y.shape[0]).to(y.device)\n max_delta = torch.ones_like(X).to(y.device)\n xtimes = 224//xskip\n ytimes = 224//yskip\n\n for i in range(xtimes):\n for j in range(ytimes):\n\n blackcover = np.ones([224,224,3]).astype(np.float32)*255\n blackcover[yskip*j:(yskip*j+height),xskip*i:(xskip*i+width),:] = 0 \n blackcover = transforms.ToTensor()(blackcover).to(y.device)\n\n #print(blackcover[:,1,1])\n # out = torchvision.utils.make_grid(blackcover)\n # imshow(out)\n \n\n all_loss = nn.CrossEntropyLoss(reduction='none')(model( X*blackcover), y )\n if(all_loss>=max_loss):\n max_delta = blackcover.detach()\n max_loss = torch.max(max_loss, all_loss)\n \n return max_delta", "def _add_mass_balance(self) -> None:\n # Add new accumulation / ablation on the layer ------------------------\n # Surface mass balance\n b = self.m * (self.ele_orig - self.ela)\n self.h += b # type: ignore\n self.h = self.h * (self.h > 0)\n\n # Update elevation with new glacier geometry\n self.ele = self.ele_orig + self.h\n ele = gaussian_filter(self.ele, sigma=3)\n self.ele = self.ele_orig * (self.h == 0) + ele * (self.h > 0)", "def zero_negative_weights(self):\n for k in range(len(self)):\n self[k] *= 0 if self[k] < 0 else 1\n self.finalized = True\n return self", "def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)", "def entropy_balance(self):\n return", "def entropy_balance(self):\n return", "def down_optimized_block(x, out_channels, name, act=tf.nn.relu):\n with tf.variable_scope(name):\n x_0 = x\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv1')\n x = act(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv2')\n x = dsample(x)\n x_0 = dsample(x_0)\n x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, name='sn_conv3')\n return x + x_0", "def black_and_white(img):\n\n # Brightness levels range from 0 to 255.\n # Change the colour of each pixel to black or white, depending on whether\n # its brightness is in the lower or upper half of this range.\n\n black = create_color(0, 0, 0)\n white = create_color(255, 255, 255)\n\n for x, y, col in img:\n red, green, blue = col\n \n brightness = (red + green + blue) / 3\n\n if brightness < 128:\n set_color(img, x, y, black)\n \n else: # brightness is between 128 and 255, inclusive\n set_color(img, x, y, white)", "def channel_padding(x):\n #keras.backend.concatenate([x, tf.zeros_like(x)], axis=-1)\n x0=keras.layers.Activation('sigmoid')(x)\n return keras.backend.concatenate([x, x0], axis=-1)", "def ohms(self):\n # Rwb = Rwiper + Rtotal * (counts / 256)\n # Rwa = Rwiper + Rtotal * ((256 - counts) / 256)\n g = 0\n rtotal=0.0\n reach=[]\n for chan in self.get_channel_list(self.nchans):\n self.rwa[chan] = float( 256 - self.vals[chan] ) / 256.0\n self.rwb[chan] = float( self.vals[chan] ) / 256.0\n self.rwa[chan] *= self.Rtotal\n self.rwb[chan] *= self.Rtotal \n self.rwa[chan] += self.Rwiper\n self.rwb[chan] += self.Rwiper", "def blank_tile_cascade(self, tile):\n nodes = self.find_connecting_indexes(tile)\n for node in nodes:\n if self.stack[node]['value'] != 0:\n self.stack[node]['flip'] = True\n continue\n else:\n if not self.stack[node]['flip']:\n self.stack[node]['flip'] = True\n self.tiles_remaining -= 1\n self.blank_tile_cascade(node)", "def blackbox_network():\n num_nodes = 6\n num_states = 2 ** num_nodes\n tpm = np.zeros((num_states, num_nodes))\n\n for index, previous_state in enumerate(all_states(num_nodes)):\n current_state = [0 for i in range(num_nodes)]\n if previous_state[5] == 1:\n current_state[0] = 1\n current_state[1] = 1\n if previous_state[0] == 1 and previous_state[1]:\n current_state[2] = 1\n if previous_state[2] == 1:\n current_state[3] = 1\n current_state[4] = 1\n if previous_state[3] == 1 and previous_state[4] == 1:\n current_state[5] = 1\n tpm[index, :] = current_state\n\n # fmt: off\n cm = np.array([\n [0, 0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [1, 1, 0, 0, 0, 0],\n ])\n # fmt: on\n\n return Network(tpm, cm, node_labels=LABELS[:tpm.shape[1]])", "def white_win(board):\r\n \r\n global w_castle\r\n global winner_white\r\n \r\n for i in winner_white:\r\n if i in board.white:\r\n w_castle += 1\r\n board.white.remove(i)\r\n if w_castle == 2:\r\n board.board_print()\r\n print(\"Game Over - White has won!\")\r\n return True\r\n if len(board.black) == 0:\r\n print(\"Game Over - White has won!\")\r\n return True\r\n return False", "def crossing_minimization(self):\n self.layer_sweep()", "def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n median = torch.median(x)\r\n\r\n global_threshold = torch.where(x < median, torch.tensor([0.]), x)\r\n maxpool = nn.MaxPool2d(7, stride=1, padding=3)\r\n pooled = maxpool(global_threshold)\r\n\r\n binarize = torch.where(pooled == x, torch.tensor([1.]), torch.tensor([0.]))\r\n\r\n output = torch.mul(binarize, x)\r\n \r\n return output", "def ramp_down(self) -> None:\n for stock in self.stocks:\n if stock.are_any_shares_owned():\n self.cash_balance = stock.sell(-1, self.cash_balance, self.buy_budget)", "def decayNetworks(self):\n self.internalNetwork.decayRates()\n self.gameNetwork.decayRates()", "def fill_missing_channels(P8gen, max_total_br, decay_chains, epsilon=1e-6):\n top_level_particles = get_top_level_particles(decay_chains)\n for particle in top_level_particles:\n my_total_br = compute_total_br(particle, decay_chains)\n remainder = 1 - my_total_br / max_total_br\n assert(remainder > -epsilon)\n assert(remainder < 1 + epsilon)\n if remainder > epsilon:\n add_dummy_channel(P8gen, particle, remainder)", "def forward(grid:ArcGrid) -> ArcGrid:\n grid = grid.copy()\n x, y = grid.shape\n\n for i in range(x):\n for j in range(y):\n if grid[i,j] != ArcColors.BLACK:\n count_nb_neighbours = 0\n for n in adjacent(grid, (i,j), diag=False):\n if grid[n] != ArcColors.BLACK:\n count_nb_neighbours+= 1\n\n if count_nb_neighbours < 2:\n grid[i,j] = ArcColors.BLACK\n\n return grid", "def update(self):\n if self.black + self.white == self.SIZE*self.SIZE:\n if self.black > self.white:\n self.gc.black_wins = True\n elif self.white > self.black:\n self.gc.white_wins = True\n else:\n self.gc.tie = True\n self.gc.black_num = self.black\n self.gc.white_num = self.white", "def Green_func(self):\n if self.bc == True:\n size = self.grid_size\n else:\n size = 2*self.grid_size\n self.Green = np.zeros([size, size])\n for x in range(len(self.Green[0])):\n for y in range(len(self.Green[1])):\n radius = np.sqrt(x**2 + y**2) \n if radius < self.soften: \n radius = self.soften\n self.Green[x, y]=1/(4 * np.pi * radius)\n if self.grid_size%2 == 0: \n self.Green[: size//2, size//2 : ] = np.flip(self.Green[: size//2, : size//2], axis = 1) # an intermittent step - the original grid has only been flipped once (2 x the original size)\n self.Green[ size//2 : , :] = np.flip(self.Green[: size//2, :], axis = 0)\n else: \n print(\"Exiting - Grid size is currently odd. Pleaset set to an even value.\")", "def c2_xavier_fill(module: nn.Module) -> None:\n # Caffe2 implementation of XavierFill in fact\n # corresponds to kaiming_uniform_ in PyTorch\n nn.init.kaiming_uniform_(module.weight, a=1) # pyre-ignore\n if module.bias is not None: # pyre-ignore\n nn.init.constant_(module.bias, 0)", "def balance_reactions(self):\n element_df = ccache.get_element_data_frame(self.cids)\n\n # find all reactions that contain only compounds that have formulae\n cpd_with_formulae = (element_df != 0).any(axis=1)\n logger.info('# compounds without a formula: %d'\n % sum(~cpd_with_formulae))\n\n rxn_with_formulae = \\\n (self.S.loc[~cpd_with_formulae, :] == 0).all(axis=0)\n logger.info('# reactions with full formulae: %d'\n % sum(rxn_with_formulae))\n\n # recalculate final conservation matrix\n to_balance = self.reaction_df['balance'].copy()\n logger.info('# reactions we need to check for balacne: %d'\n % to_balance.sum())\n\n to_balance = to_balance & rxn_with_formulae\n logger.info('# -> of which also have a formulae: %d'\n % to_balance.sum())\n\n # balance O atoms using water\n self.S.loc['KEGG:C00001', to_balance] -= \\\n element_df['O'].T @ self.S.loc[:, to_balance]\n\n # balance H atoms using protons\n self.S.loc['KEGG:C00080', to_balance] -= \\\n element_df['H'].T @ self.S.loc[:, to_balance]\n\n imbalance_matrix = element_df.T @ self.S\n to_remove = to_balance & imbalance_matrix.any(axis=0)\n logger.info('# --> of which are not balanced and should '\n 'be removed: %d' % to_remove.sum())\n\n if to_remove.sum() > 0:\n for i, row in self.S.loc[:, to_remove].T.iterrows():\n sprs = {cid: coeff for cid, coeff in row.items() if coeff != 0}\n reaction = Reaction(sprs)\n logger.warning('unbalanced reaction #%s: %s' %\n (i, reaction.write_formula()))\n for j, v in imbalance_matrix[i].items():\n logger.warning('there are %d more %s atoms on the '\n 'right-hand side' % (v, j))\n self.S = self.S.loc[:, ~to_remove]\n self.S.columns = range(self.S.shape[1])\n\n self.reaction_df = self.reaction_df.loc[self.S.columns, :]\n\n # now get rid of the protons, since we are applying Alberty's\n # framework where their potential is set to 0, and the pH is held\n # as a controlled parameter\n self.S.drop('KEGG:C00080', axis=0, inplace=True)\n\n logger.info('After removing %d unbalanced reactions, '\n 'the stoichiometric matrix contains: '\n '%d compounds and %d reactions' %\n (sum(to_remove), self.S.shape[0], self.S.shape[1]))", "def check_if_white_back_black_edge(pred):\n values = np.unique(pred)\n # print(values)\n\n # check if binary\n if len(values) > 2:\n print(\"Your prediction result has not been binarized, please prompt them to choose the appropriate threshold for binarization.\")\n raise ValueError\n\n white_pos = np.where(pred == 255)\n # print(len(white_pos[0]))\n white_count = len(white_pos[0])\n black_pos = np.where(pred == 0)\n # print(len(black_pos[0]))\n black_count = len(black_pos[0])\n # print(black_count / white_count)\n rate = black_count / white_count\n if rate < 5:\n print(\"The results must be submitted with white background and black edge. Please submit after correction.\")\n raise ValueError", "def filterBNW(bmp, threshold, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n if( intensity(bmp.pixels[h][w]) > threshold ):\n bmp.pixels[h][w] = WHITE\n else:\n bmp.pixels[h][w] = BLACK\n\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def forward(self, x):\n\n if x.dim() == 3:\n x = x.unsqueeze(1)\n x = x.transpose(1, 3)\n x = self.norm0(x)\n x = x.transpose(1, 3)\n\n x = self.conv_block1(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block2(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block3(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x, p=0.2, training=self.training)\n x3_out = self.conv_block4(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x3_out, p=0.2, training=self.training)\n x2_out = self.conv_block5(x, pool_size=(2, 2), pool_type=\"avg\")\n x = F.dropout(x2_out, p=0.2, training=self.training)\n x1_out = self.conv_block6(x, pool_size=(1, 1), pool_type=\"avg\")\n x = F.dropout(x1_out, p=0.2, training=self.training)\n x = torch.mean(x, dim=3)\n\n (x1, _) = torch.max(x, dim=2)\n x2 = torch.mean(x, dim=2)\n x = x1 + x2\n\n # [B x 1 x emb_dim]\n if not self.return_reps:\n return x.unsqueeze(1)\n\n return x.unsqueeze(1), (x1_out, x2_out, x3_out)", "def balance_actions(X, y, drop_prob):\n # Condificação da ação acelerar\n acceler = np.zeros(7)\n acceler[1] = 1.\n # Find out what samples are labeled as accelerate\n is_accel = np.all(y==acceler, axis=1)\n # Get the index of all other samples (not accelerate)\n other_actions_index = np.where(np.logical_not(is_accel))\n # Randomly pick drop some accelerate samples. Probabiliy of dropping is given by drop_prob\n drop_mask = np.random.rand(len(is_accel)) > drop_prob\n accel_keep = drop_mask * is_accel\n # Get the index of accelerate samples that were kept\n accel_keep_index = np.where(accel_keep)\n # Put all actions that we want to keep together\n final_keep = np.squeeze(np.hstack((other_actions_index, accel_keep_index)))\n final_keep = np.sort(final_keep)\n X_bal, y_bal = X[final_keep], y[final_keep]\n\n return X_bal, y_bal", "def white_balance(device, img, mode='hist',debug=None, roi=None):\n device += 1\n\n ori_img = np.copy(img)\n\n if roi is not None:\n roiint = all(isinstance(item, int) for item in roi)\n\n if len(roi) != 4 | roiint is False:\n fatal_error('If ROI is used ROI must have 4 elements as a list and all must be integers')\n else:\n pass\n\n if len(np.shape(img)) == 3:\n iy, ix, iz = np.shape(img)\n hmax=255\n type = np.uint8\n else:\n iy, ix = np.shape(img)\n if img.dtype == 'uint8':\n hmax=255\n type=np.uint8\n elif img.dtype == 'uint16':\n hmax=65536\n type=np.uint16\n\n mask = np.zeros((iy, ix, 3), dtype=np.uint8)\n\n if roi is None:\n x = 0\n y = 0\n w = ix\n h = iy\n\n else:\n x = roi[0]\n y = roi[1]\n w = roi[2]\n h = roi[3]\n\n if len(np.shape(img)) == 3:\n cv2.rectangle(ori_img, (x, y), (x + w, y + h), (0, 255, 0), 3)\n c1 = img[:, :, 0]\n c2 = img[:, :, 1]\n c3 = img[:, :, 2]\n if mode == 'hist':\n channel1 = _hist(c1, hmax, x, y, h, w, type)\n channel2 = _hist(c2, hmax, x, y, h, w, type)\n channel3 = _hist(c3, hmax, x, y, h, w, type)\n else:\n channel1 = _max(c1, hmax, mask, x, y, h, w, type)\n channel2 = _max(c2, hmax, mask, x, y, h, w, type)\n channel3 = _max(c3, hmax, mask, x, y, h, w, type)\n\n finalcorrected = np.dstack((channel1, channel2, channel3))\n\n else:\n cv2.rectangle(ori_img, (x, y), (x + w, y + h), (255, 255, 255), 3)\n if mode == 'hist':\n finalcorrected = _hist(img, hmax, x, y, h, w, type)\n elif mode == 'max':\n finalcorrected = _max(img, hmax, mask, x, y, h, w, type)\n\n if debug == 'print':\n print_image(ori_img, (str(device) + '_whitebalance_roi.png'))\n print_image(finalcorrected, (str(device) + '_whitebalance.png'))\n\n elif debug == 'plot':\n plot_image(ori_img, cmap='gray')\n plot_image(finalcorrected, cmap='gray')\n\n return device, finalcorrected", "def pre_filter_channels(self, channels=None): # pragma: no cover\n pass", "def _add_bal(self):\n\n c = self.components\n p = self.pipes\n\n # TODO No mass flow reversal yet\n if self.temperature_driven:\n\n lines = self.params['lines'].v()\n\n self.block.mix_temp = Var(self.TIME, lines)\n\n def _temp_bal_incoming(b, t, l):\n\n incoming_comps = collections.defaultdict(list)\n incoming_pipes = collections.defaultdict(list)\n\n for name, comp in c.items():\n if value(comp.get_mflo(t)) >= 0:\n incoming_comps['supply'].append(name)\n else:\n incoming_comps['return'].append(name)\n\n for name, pipe in p.items():\n if value(pipe.get_edge_mflo(self.name, t)) >= 0:\n incoming_pipes['supply'].append(name)\n else:\n incoming_pipes['return'].append(name)\n # Zero mass flow rate:\n if value(\n sum(c[comp].get_mflo(t) for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) == 0:\n # mixed temperature is average of all joined pipes, actual value should not matter,\n # because packages in pipes of this time step will have zero size and components do not take over\n # mixed temperature in case there is no mass flow\n\n return b.mix_temp[t, l] == (\n sum(c[comp].get_temperature(t, l) for comp in c) +\n sum(p[pipe].get_temperature(self.name, t, l) for\n pipe in p)) / (\n len(p) + len(c))\n\n\n else: # mass flow rate through the node\n return (sum(\n c[comp].get_mflo(t) for comp in incoming_comps[l]) +\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) * b.mix_temp[t, l] == \\\n sum(c[comp].get_mflo(t) * c[comp].get_temperature(t,\n l)\n for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) * p[\n pipe].get_edge_temperature(self.name, t, l)\n for pipe in incoming_pipes[l])\n\n self.block.def_mixed_temp = Constraint(self.TIME,\n lines,\n rule=_temp_bal_incoming)\n\n def _temp_bal_outgoing(b, t, l, comp):\n\n outgoing_comps = collections.defaultdict(list)\n outgoing_pipes = collections.defaultdict(list)\n\n for name, comp_obj in c.items():\n if comp_obj.get_mflo(t) >= 0:\n outgoing_comps['return'].append(name)\n else:\n outgoing_comps['supply'].append(name)\n\n for name, pipe_obj in p.items():\n if pipe_obj.get_edge_mflo(self.name, t) >= 0:\n outgoing_pipes['return'].append(name)\n else:\n outgoing_pipes['supply'].append(name)\n\n if t == 0:\n return Constraint.Skip\n if comp in outgoing_pipes[l]:\n return p[comp].get_edge_temperature(self.name, t, l) == \\\n b.mix_temp[t, l]\n elif comp in outgoing_comps[l]:\n return c[comp].get_temperature(t, l) == b.mix_temp[t, l]\n else:\n return Constraint.Skip\n\n self.block.outgoing_temp_comps = Constraint(self.TIME,\n lines,\n c.keys(),\n rule=_temp_bal_outgoing)\n self.block.outgoing_temp_pipes = Constraint(self.TIME,\n lines,\n p.keys(),\n rule=_temp_bal_outgoing)\n\n elif self.repr_days is None:\n\n def _heat_bal(b, t):\n return 0 == sum(\n self.components[i].get_heat(t) for i in self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME,\n rule=_heat_bal)\n\n def _mass_bal(b, t):\n return 0 == sum(\n self.components[i].get_mflo(t) for i in self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME,\n rule=_mass_bal)\n\n else:\n def _heat_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_heat(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_heat_bal)\n\n def _mass_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_mflo(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_mass_bal)", "def white_balance_multipliers(\n sensitivities: RGB_CameraSensitivities, illuminant: SpectralDistribution\n) -> NDArrayFloat:\n\n shape = sensitivities.shape\n if illuminant.shape != shape:\n runtime_warning(\n f'Aligning \"{illuminant.name}\" illuminant shape to \"{shape}\".'\n )\n illuminant = reshape_sd(illuminant, shape, copy=False)\n\n RGB_w = 1 / np.sum(\n sensitivities.values * illuminant.values[..., None], axis=0\n )\n RGB_w *= 1 / np.min(RGB_w)\n\n return RGB_w", "def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()", "def freeze_encoder(self):\n self.dfs_freeze(self.net.conv1)\n self.dfs_freeze(self.net.conv2)\n self.dfs_freeze(self.net.conv3)\n self.dfs_freeze(self.net.conv4)\n self.dfs_freeze(self.net.conv5)", "def down_block(x, out_channels, name, downsample=True, act=tf.nn.relu):\n with tf.variable_scope(name):\n input_channels = x.shape.as_list()[-1]\n x_0 = x\n x = act(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv1')\n x = act(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv2')\n if downsample:\n x = dsample(x)\n if downsample or input_channels != out_channels:\n x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, name='sn_conv3')\n if downsample:\n x_0 = dsample(x_0)\n return x_0 + x", "def ignore_biasbn(directions):\n for d in directions:\n if d.dim() <= 1:\n d.fill_(0)", "def hillClimber(self):\n if self.compare == set():\n self.compare = deepcopy(self)\n\n temperature = 500\n coolingRate = 0.9\n firstCosts = self.calculateCosts()\n iterationCount = 0\n\n while temperature > 1:\n iterationCount += 1\n for disconnectedHouse in self.disconnectedHouses:\n hillclimbSwitcher(disconnectedHouse, self, True)\n\n for nthChoiceHouse in self.nthChoiceHouses:\n if nthChoiceHouse.connection != \"NOT CONNECTED!\":\n hillclimbSwitcher(nthChoiceHouse, self)\n\n hillclimberHouses = self.houses\n shuffle(hillclimberHouses)\n\n for house in hillclimberHouses:\n if house.connection != \"NOT CONNECTED!\":\n hillclimbSwitcher(house, self, 1)\n\n self.calculateCosts()\n\n if acceptanceprobability(self.costs - 1, firstCosts, temperature) <= random():\n break\n else:\n temperature *= coolingRate\n\n print(\"This Configuration's minimum costs:\", self.compare.costs, \"euro\")\n firstCosts = self.costs\n\n print(\"hillclimber finished\")\n self = deepcopy(self.compare)\n\n if len(self.disconnectedHouses) != 0:\n for house in self.disconnectedHouses:\n print(\"Could not connect house\", house.id)", "def iterwhite():\n while True:\n for n in rng.randn(100):\n yield n", "def step_burning(self):\n self.set_state(\"white\")", "def approching_blackhole():\n blackhole = BlackHole()\n Rs = 8.0\n D_list = np.round(10**np.linspace(np.log10(50), np.log10(100000), 30))\n blackhole.open(blackhole.img_name, size=2000)\n\n for D in D_list:\n blackhole.compute(Rs, D)\n blackhole.img_save()", "def pink_brown():\n return pink_noise() + brown_noise()", "def balance_training_weight(w, y):\n sample_weight = w.copy()\n neg_mask = (y == 0)\n pos_mask = (y == 1)\n \n bkg_sum_weight = np.sum(sample_weight[neg_mask])\n sig_sum_weight = np.sum(sample_weight[pos_mask])\n\n sample_weight[pos_mask] = sample_weight[pos_mask] / sig_sum_weight\n sample_weight[neg_mask] = sample_weight[neg_mask] / bkg_sum_weight\n return sample_weight", "def flip_all(self, module, input, output):\n layer_from = 50 #for small GPU's use 25 or less, for larger ones we can use the full result of 50\n layer_to = 51\n if (self.get_current_layer() >= (layer_from*7)) and (self.get_current_layer() <= ((layer_to*7)+1)): # observation: is the direct relation to the size of the recurrent module\n output[:] = torch.zeros(output.shape) # puts all outputs from the layer to 0\n self.updateLayer()\n if self.get_current_layer() >= self.get_total_layers():\n self.reset_current_layer()", "def _augment_channelswap(audio):\n if audio.shape[0] == 2 and torch.FloatTensor(1).uniform_() < 0.5:\n return torch.flip(audio, [0])\n\n return audio", "def black_win(board):\r\n \r\n global b_castle\r\n global winner_black\r\n \r\n for i in winner_black:\r\n if i in board.black:\r\n b_castle += 1\r\n board.black.remove(i)\r\n if b_castle == 2:\r\n board.board_print()\r\n print(\"Game Over - Black has won!\")\r\n return True\r\n if len(board.white) == 0:\r\n print(\"Game Over - Black has won!\")\r\n return True\r\n return False", "def blackbody(self, nu, T):\n x = self.h*nu/(self.kB*T)\n result = 2.*self.h*nu**3 /self.c**2\n result /= np.exp(x) - 1.\n return result", "def sharpen_bands(self):\n for label in self.labels:\n self.sharp_bands[label] = self.bands[label] - self.gauss_bands[\n label]", "def bMinusbStar(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == -1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[i], self.prob.X[j])\n abcxx = 0\n for j in range(self.prob.num):\n abcxx += (self.alphas[j] + self.deltas[j]) * self.prob.xkernel(self.prob.X[i], self.prob.X[j])\n abcxx *= (1 / self.prob.gamma)\n running_total += -1 + abcxx - ayxx\n return running_total", "def collapse_wavefunction(self):\r\n #check normalised:\r\n n = sum(self.block_weights)\r\n if n != 1:\r\n #normalise here if required\r\n self.block_weights = [x/n for x in self.block_weights]\r\n #make choice\r\n choice = np.random.choice(self.block_opts, p = self.block_weights)\r\n #update self accordingly\r\n self.block_opts = [choice]\r\n self.block_weights = [1]\r\n self.collapsed = True\r\n self.propogate()\r\n self.arr = self.superposition()\r\n return", "def transition_function(grid, neighbourstates, neighbourcounts, decay_grid,\n water_decay_grid):\n\n global water_counter\n global ignition_grid\n neighbourstates = np.array(neighbourstates)\n init_grid = initial_grid.astype(int)\n ig_grid = np.array(ignition_grid)\n windspeed_ignition_modifiers = wind_speed_rvalue(\"NE\", 10)\n new_ig_grid = []\n for i, row in enumerate(grid):\n new_ig_grid.append([\n ignite(cell, neighbourstates[:, i, j],\n windspeed_ignition_modifiers) for j, cell in enumerate(row)\n ])\n new_ig_grid = np.array(new_ig_grid)\n started_to_burn = []\n for i, row in enumerate(grid):\n started_to_burn.append([\n started_burning(cell, ig_grid[i, j], new_ig_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[started_to_burn] = START_BURN\n ig_grid = np.add(new_ig_grid, ig_grid)\n full_burn = []\n for i, row in enumerate(grid):\n full_burn.append([\n fully_burning(cell, ig_grid[i, j], decay_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[full_burn] = BURNING\n end_burning = []\n for i, row in enumerate(grid):\n end_burning.append([\n ending_burn(cell, decay_grid[i, j], decay_values[int(\n initial_grid[i, j])]) for j, cell in enumerate(row)\n ])\n grid[end_burning] = END_BURN\n decay_grid[(grid == BURNING) | (grid == END_BURN)] -= 1\n burnt_out = (decay_grid == 0) # find those which have decayed to 0\n grid[(decay_grid == 0\n )] = BURNT #set all that have decayed to zero to BURNT(7)\n water_counter += 1\n\n if (water_counter == 100):\n grid[120:160, 80:120] = initial_grid[120:160, 80:120]\n water_decay_grid[(grid != LAKE)] -= 1 # take one off their decay value\n grid[(water_decay_grid == 0)] = BURNT # switch their state to 5\n ignition_grid = ig_grid\n return grid", "def _disperse_baseband(self, signal, dm):\n for x in range(signal.Nchan):\n sig = signal._data[x]\n f0 = signal._fcent\n dt = (1/signal._samprate).to('us')\n\n fourier = np.fft.rfft(sig)\n u = make_quant(np.fft.rfftfreq(2 * len(fourier) - 1,\n d=dt.to('s').value), 'MHz')\n f = u-signal.bw/2. # u in [0,bw], f in [-bw/2, bw/2]\n\n # Lorimer & Kramer 2006, eqn. 5.21\n H = np.exp(1j*2*np.pi*DM_K/((f+f0)*f0**2)*dm*f**2)\n\n product = fourier*H\n Dispersed = np.fft.irfft(product)\n\n signal._data[x] = Dispersed", "def forward(self, x):\n if len(self.convs) == 0:\n return x\n x = x.contiguous()\n for c, n in zip(self.convs, self.norms):\n x = c(x.permute(0, 2, 1)) # (B, C, T)\n x = n(x.permute(0, 2, 1)) # (B, T, C)\n d = torch.nn.functional.dropout(x, p=self.dropout, training=self.training)\n x = torch.relu(d)\n return d", "def brighten(rgb, factor):\n return [min(255, int(round(factor * c))) for c in rgb]", "def black_cap_price(discounts, forwards, strike, cap_vol, resets, maturities, \n notional=1000000):\n num_caplets = len(forwards)\n caplets = np.zeros(num_caplets)\n for i in range(num_caplets):\n caplets[i] = black_caplet(discounts[i], forwards[i], strike, cap_vol, \n resets[i], maturities[i], notional)\n \n return sum(caplets)", "def filter_fusion(luma_bin, sat_bin, grad_bin, mentor_bin):\n binary = np.zeros_like(luma_bin)\n binary[ (((grad_bin==1) | (sat_bin==1)) & (luma_bin==1)) | (mentor_bin==1) ] = 1\n\n # Erosion and dilation - Seems doesn't work. Mask-off\n #kernel = np.ones((5,5))\n #binary_dilation = cv2.dilate(binary, kernel, iterations=1)\n #binary_erosion = cv2.erode(binary_dilation, kernel, iterations=1)\n #binary = binary_erosion\n\n return binary", "def block(self, tree, factors):\n # first we apply strip mining to the loops given in factors\n for x in range(len(factors)):\n\n # we may want to not block a particular loop, e.g. when doing Rivera/Tseng blocking\n if factors[x] > 1:\n tree = StencilCacheBlocker.StripMineLoopByIndex(x*2, factors[x]).visit(tree)\n\n # now we move all the outer strip-mined loops to be outermost\n for x in range(1, len(factors)):\n if factors[x] > 1:\n tree = self.bubble(tree, 2*x, x)\n\n return tree", "def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0", "def drop_connect_pt(x, drop_ratio):\n keep_ratio = 1.0 - drop_ratio\n mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)\n mask.bernoulli_(keep_ratio)\n x.div_(keep_ratio)\n x.mul_(mask)\n return x", "def switch_to_no_feedforward_inputs(self):\n\n self.h_e=np.ones_like(self.inputs.noise_flat.T)*self.feed_forward_off_value\n self.h=np.vstack([self.h_e,self.h_i])", "def collectSmallBlind(self):\n\t\tif self.noOfPlayers() == 2:\n\t\t\tplayer = self.playerList[self.curDealerSeatNo]\n\t\telse:\n\t\t\tplayer, seatNo = self.findNthPlayerFromSeat(self.curDealerSeatNo, 1)\n\t\t\t\n\t\tif player.money < self.smallBlind:\n\t\t\tself.pots[-1:-1] = [player.money]\n\t\t\tself.currentBet[-1:-1] = [player.money]\n\t\t\tplayer.betAmount.append(player.money)\n\t\t\tplayer.money = 0\n\t\telse:\n\t\t\tplayer.removeMoney(self.smallBlind)\n\t\t\tself.pots[0] = self.pots[0] + self.smallBlind\n\t\t\tplayer.betAmount.append(self.smallBlind)", "def remove_offset(self):\n new_channel = sppasChannel()\n new_channel.set_sampwidth(self._sampwidth)\n new_channel.set_framerate(self._framerate)\n a = sppasAudioFrames(self._channel.get_frames(self._channel.get_nframes()), self._channel.get_sampwidth(), 1)\n avg = a.avg()\n new_channel.set_frames(a.bias(- avg))\n\n self._channel = new_channel", "def blank(self, channel):\n pass", "def discard(self):\n CHANNEL_MOVER_ACTIVE_FROM.discard(self.source_channel.id)\n CHANNEL_MOVER_ACTIVE_TO.discard(self.target_channel.id)\n \n status_message = self.status_message\n if (status_message is not None):\n try:\n del CHANNEL_MOVER_BY_STATUS_MESSAGE_ID[status_message.id]\n except KeyError:\n pass", "def remove_zero_element(state, pruned_model, pinned_in, pinned_out):\n zeros = []\n non_zeros = []\n was_conv = False\n first = True\n\n # Iterate through all the elements of the state_dict\n for k in state:\n # print(k)\n if \"weight\" in k:\n\n # Find in the pruned model the layer that corresponds to the current state_dict element\n current_module, next_module = find_module(pruned_model, k)\n\n if current_module is None:\n raise RuntimeError('The supplied model does not contain a module corresponding to key {}'.format(k))\n\n bias_key = k.replace(\"weight\", \"bias\")\n\n # The current element is a convolutional layer\n if isinstance(current_module, nn.Conv2d):\n # If the next layer is a convolutional with padding we do not remove the current layer neurons\n # Memorize that we encountered a conv layer\n was_conv = True\n # Sum the convolutional values for dimensions: input, h, w\n conv_sum = torch.sum(torch.abs(state[k]), dim=(1, 2, 3))\n\n # TODO add comment\n if first:\n if k.replace(\".weight\", \"\") not in pinned_in:\n stay_idx_prev = torch.where(torch.sum(torch.abs(state[k]), dim=(0, 2, 3)) != 0)[0]\n else:\n stay_idx_prev = torch.where(torch.sum(torch.abs(state[k]), dim=(0, 2, 3)) >= 0)[0]\n else:\n stay_idx_prev = non_zeros.view(-1)\n\n # Get which filters of the current layer are zeroed i.e. the sum of its element in ABS() must be = 0\n zeros = (conv_sum == 0).nonzero() if k.replace(\".weight\", \"\") not in pinned_out else (\n conv_sum < 0).nonzero()\n # Get which filters of the current layer are NON zero i.e. the sum of its element in ABS() must be != 0\n non_zeros = (conv_sum != 0).nonzero() if k.replace(\".weight\", \"\") not in pinned_out else (\n conv_sum >= 0).nonzero()\n # Get the number of output channels\n out_ch_num = state[k].shape[0]\n\n # Remove from the current layer all the zeroed filters\n stay_idx = torch.where(conv_sum != 0)[0] if k.replace(\".weight\", \"\") not in pinned_out \\\n else torch.where(conv_sum >= 0)[0]\n remove_idx = torch.where(conv_sum == 0)[0] if k.replace(\".weight\", \"\") not in pinned_out \\\n else torch.where(conv_sum < 0)[0]\n\n # IN ch\n state[k] = state[k][:, stay_idx_prev, :, :] if k.replace(\".weight\", \"\") not in pinned_in else state[k]\n # OUT ch\n state[k] = state[k][stay_idx, :, :, :]\n\n # Set to inf the biases corresponding to zeroed filters in the actual state_dict, marking them as \"to remove\"\n if bias_key in state:\n state[bias_key][remove_idx] = inf\n\n # The current element is a linear layer\n if isinstance(current_module, nn.Linear):\n if bias_key in state:\n for row in range(state[k].shape[0]):\n # Set to inf the biases corresponding to zeroed neurons in the actual state_dict, marking them as \"to remove\"\n if torch.sum(torch.abs(state[k][row])) == 0:\n state[bias_key][row] = inf\n\n # The previous layer was a convolutional\n if was_conv:\n was_conv = False\n # Evaluate how many FC neurons correspond to the previous CONV out channel\n neurons_per_channel = int(state[k].shape[1] / out_ch_num)\n remaining_neurons = []\n for z in non_zeros:\n # Compute the starting and end index of such neurons\n from_idx = z * neurons_per_channel\n to_idx = (z + 1) * neurons_per_channel\n remaining_neurons.append(state[k][:, from_idx:to_idx])\n\n # FC layer after a FC layer\n else:\n # Set to zero all the connection of the current layer corresponding to zeroed neurons of the previous layer\n remaining_neurons = []\n for z in non_zeros:\n remaining_neurons.append(state[k][:, z])\n\n # Get which neurons of the current channel are zeroed\n zeros = (torch.sum(torch.abs(state[k]), dim=1) == 0).nonzero()\n non_zeros = (torch.sum(torch.abs(state[k]), dim=1) != 0).nonzero()\n\n # Remove from the current layer all zeroed neurons and connections\n state[k] = torch.cat(remaining_neurons, 1) if remaining_neurons else state[k]\n state[k] = state[k][non_zeros.view(-1)]\n\n # Remove inf biases\n if \"bias\" in k:\n state[k] = state[k][state[k] != inf]\n\n first = False\n\n return state", "def cml_algo_encrypt(pixels_flat, pixel_num, rand, p, cycles, iterations):\n for cycle in range(1, cycles + 1):\n for pixel_index in range(pixel_num):\n\n pixel_float = pixels_flat[pixel_index - 1] / 255\n\n for _ in range(iterations):\n pixel_float = pwlcm(pixel_float, p)\n\n k = pixel_num * (cycle - 1) + pixel_index\n pixel_float = (pixel_float + rand[k]) % 1\n pixels_flat[pixel_index] = pixels_flat[pixel_index] + round(pixel_float * 255)\n\n if pixels_flat[pixel_index] > 255:\n pixels_flat[pixel_index] = pixels_flat[pixel_index] - 256", "def isolate_burned_pixels(array, upper, lower):\n not_burned = numpy.logical_or(array <= lower,\n array >= upper)\n array[not_burned] = 0\n return array", "def _burn_cpu():\n while True:\n random()*random()", "def white_kingside_castling(self):\n return (self.castling[0] and self.empty((10, 7))\n and self.empty((10, 8)) and not self.attacked((10, 6), BLACK)\n and not self.attacked((10, 7), BLACK))", "def forward(self, x):\r\n out = x + self.conv_block(x) # add skip connections\r\n return out", "def _vertical_blindspot_network(x):\n skips = [x]\n\n n = x\n n = _vshifted_conv(n, 48, 'enc_conv0')\n n = _vshifted_conv(n, 48, 'enc_conv1')\n n = _vshifted_pool(n)\n skips.append(n)\n\n n = _vshifted_conv(n, 48, 'enc_conv2')\n n = _vshifted_pool(n)\n skips.append(n)\n\n n = _vshifted_conv(n, 48, 'enc_conv3')\n n = _vshifted_pool(n)\n skips.append(n)\n\n n = _vshifted_conv(n, 48, 'enc_conv4')\n n = _vshifted_pool(n)\n skips.append(n)\n\n n = _vshifted_conv(n, 48, 'enc_conv5')\n n = _vshifted_pool(n)\n n = _vshifted_conv(n, 48, 'enc_conv6')\n\n #-----------------------------------------------\n n = UpSampling2D(2)(n)\n n = Concatenate(axis=3)([n, skips.pop()])\n n = _vshifted_conv(n, 96, 'dec_conv5')\n n = _vshifted_conv(n, 96, 'dec_conv5b')\n\n n = UpSampling2D(2)(n)\n n = Concatenate(axis=3)([n, skips.pop()])\n n = _vshifted_conv(n, 96, 'dec_conv4')\n n = _vshifted_conv(n, 96, 'dec_conv4b')\n\n n = UpSampling2D(2)(n)\n n = Concatenate(axis=3)([n, skips.pop()])\n n = _vshifted_conv(n, 96, 'dec_conv3')\n n = _vshifted_conv(n, 96, 'dec_conv3b')\n\n n = UpSampling2D(2)(n)\n n = Concatenate(axis=3)([n, skips.pop()])\n n = _vshifted_conv(n, 96, 'dec_conv2')\n n = _vshifted_conv(n, 96, 'dec_conv2b')\n\n n = UpSampling2D(2)(n)\n n = Concatenate(axis=3)([n, skips.pop()])\n n = _vshifted_conv(n, 96, 'dec_conv1a')\n n = _vshifted_conv(n, 96, 'dec_conv1b')\n\n # final pad and crop for blind spot\n n = ZeroPadding2D([[1,0],[0,0]])(n)\n n = Cropping2D([[0,1],[0,0]])(n)\n\n return n", "def drop_connect(x, drop_ratio):\n keep_ratio = 1.0 - drop_ratio\n mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)\n mask.bernoulli_(keep_ratio)\n x.div_(keep_ratio)\n x.mul_(mask)\n return x", "def drop_connect(x, drop_ratio):\n keep_ratio = 1.0 - drop_ratio\n mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)\n mask.bernoulli_(keep_ratio)\n x.div_(keep_ratio)\n x.mul_(mask)\n return x", "def up_block(x, out_channels, name, training=True):\n with tf.variable_scope(name):\n bn0 = ops.BatchNorm(name='bn_0')\n bn1 = ops.BatchNorm(name='bn_1')\n x_0 = x\n x = tf.nn.relu(bn0(x))\n x = usample(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv1')\n x = tf.nn.relu(bn1(x))\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv2')\n\n x_0 = usample(x_0)\n x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, training, 'snconv3')\n\n return x_0 + x", "def pink_noise():\n global curr_tick\n octave = octave_lookup[curr_tick]\n curr_noise[octave] = int(white_noise() / (5-octave))\n curr_tick += 1\n if curr_tick >= len(octave_lookup):\n curr_tick = 0\n return sum(curr_noise)", "def blue_channel(image: Image) -> Image:\n new_image = copy(image)\n # filter the intensities of every component in every pixel.\n for x, y, (r, g, b) in image:\n blue = create_color(0,0,b)\n set_color(new_image, x, y, blue)\n return new_image", "def __init__(\n self,\n in_channels=128,\n aux_channels=80,\n channels=64,\n out_channels=1,\n kernel_size=9,\n dilation=2,\n bias=True,\n noise_upsample_scales=[11, 2, 2, 2],\n noise_upsample_activation=\"LeakyReLU\",\n noise_upsample_activation_params={\"negative_slope\": 0.2},\n upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1],\n upsample_mode=\"nearest\",\n gated_function=\"softmax\",\n use_weight_norm=True,\n ):\n super().__init__()\n\n self.in_channels = in_channels\n\n noise_upsample = []\n in_chs = in_channels\n for noise_upsample_scale in noise_upsample_scales:\n # NOTE(kan-bayashi): How should we design noise upsampling part?\n noise_upsample += [\n torch.nn.ConvTranspose1d(\n in_chs,\n channels,\n noise_upsample_scale * 2,\n stride=noise_upsample_scale,\n padding=noise_upsample_scale // 2 + noise_upsample_scale % 2,\n output_padding=noise_upsample_scale % 2,\n bias=bias,\n )\n ]\n noise_upsample += [\n getattr(torch.nn, noise_upsample_activation)(\n **noise_upsample_activation_params\n )\n ]\n in_chs = channels\n self.noise_upsample = torch.nn.Sequential(*noise_upsample)\n self.noise_upsample_factor = np.prod(noise_upsample_scales)\n\n self.blocks = torch.nn.ModuleList()\n aux_chs = aux_channels\n for upsample_scale in upsample_scales:\n self.blocks += [\n TADEResBlock(\n in_channels=channels,\n aux_channels=aux_chs,\n kernel_size=kernel_size,\n dilation=dilation,\n bias=bias,\n upsample_factor=upsample_scale,\n upsample_mode=upsample_mode,\n gated_function=gated_function,\n ),\n ]\n aux_chs = channels\n self.upsample_factor = np.prod(upsample_scales)\n\n self.output_conv = torch.nn.Sequential(\n torch.nn.Conv1d(\n channels,\n out_channels,\n kernel_size,\n 1,\n bias=bias,\n padding=(kernel_size - 1) // 2,\n ),\n torch.nn.Tanh(),\n )\n\n # apply weight norm\n if use_weight_norm:\n self.apply_weight_norm()\n\n # reset parameters\n self.reset_parameters()", "def transfer_funds_randomly():\n\n source_id = get_random_account_id()\n sink_id = get_random_account_id()\n\n source = Account.get(id=source_id)\n amount = floor(source.balance / 2)\n\n if source.balance < amount:\n raise \"Insufficient funds\"\n\n source.balance -= amount\n sink = Account.get(id=sink_id)\n sink.balance += amount", "def chargeBatteries(self):\n self.currentBattery = self.maxBattery", "def __init__(self, in_ch=2048, out_ch=256):\n super(ChannelCompress, self).__init__()\n num_bottleneck = 1000\n add_block = []\n add_block += [nn.Linear(in_ch, num_bottleneck)]\n add_block += [nn.BatchNorm1d(num_bottleneck)]\n add_block += [nn.ReLU()]\n\n add_block += [nn.Linear(num_bottleneck, 500)]\n add_block += [nn.BatchNorm1d(500)]\n add_block += [nn.ReLU()]\n add_block += [nn.Linear(500, out_ch)]\n\n # Extra BN layer, need to be removed\n #add_block += [nn.BatchNorm1d(out_ch)]\n\n add_block = nn.Sequential(*add_block)\n add_block.apply(weights_init_kaiming)\n self.model = add_block", "def decay(self):\n self.push_pull_weight_ratio *= self.push_pull_weight_decay", "def fold(self):\n self.active = False\n self.hand = None\n self.bet = 0\n return True", "def mc_micro_sweep(self): \n for i in range(self.N):\n if random.random()>0.3:\n self.mc_update_micro_fixed(i,xy = True)" ]
[ "0.61220783", "0.5972675", "0.5931839", "0.56922317", "0.5463826", "0.54332715", "0.5367581", "0.528816", "0.5269515", "0.523269", "0.5217211", "0.520652", "0.5205482", "0.52013135", "0.5168077", "0.5151404", "0.5151039", "0.51505965", "0.51420283", "0.51318866", "0.5123756", "0.5122138", "0.511", "0.51007414", "0.51007414", "0.50787425", "0.5078039", "0.5076879", "0.5074788", "0.50705767", "0.5061253", "0.5049549", "0.5040824", "0.50353163", "0.5013147", "0.500223", "0.5002196", "0.49962154", "0.4995715", "0.49806106", "0.49784017", "0.4978198", "0.49766132", "0.49651647", "0.49643835", "0.49562454", "0.49541146", "0.49472076", "0.49374294", "0.49323612", "0.4928554", "0.4925191", "0.4921561", "0.49159214", "0.4914723", "0.49079782", "0.49004894", "0.48955885", "0.48894364", "0.4884537", "0.4877258", "0.48719642", "0.48519692", "0.48518968", "0.48514155", "0.48450726", "0.48432708", "0.4835941", "0.48353764", "0.48307443", "0.48299223", "0.48295492", "0.4829407", "0.48292097", "0.48282644", "0.48269492", "0.4824225", "0.4820157", "0.4814315", "0.4810073", "0.4806336", "0.47967827", "0.47929993", "0.47911796", "0.47908515", "0.47859392", "0.47858906", "0.4779964", "0.47772664", "0.47772664", "0.47772256", "0.47763488", "0.47741544", "0.4771997", "0.47625726", "0.47601914", "0.4759161", "0.47589952", "0.4758733", "0.47579622" ]
0.66862494
0
Open an audio file. write_type is "wav" or "mp3", if writing. Automatically determines the file format if reading.
def open(cls, filename, write_type = None): from wavfile import WavFile from mp3file import MP3File if (write_type == None): file_object = open(filename, "rb") try: layer = MP3(filename).info.layer except: layer = -1 if (layer == 3): return MP3File(filename, False) elif ((sndhdr.what(filename) or [None])[0] == "wav"): return WavFile(filename, False) else: raise FormatError("unrecognized audio format") exit() else: if (write_type == "wav"): return WavFile(filename, True) elif (write_type == "mp3"): return MP3File(filename, True) else: assert False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "def audio_try_play(self, type):\n wavfile = os.path.join(buildconfig.SPD_SOUND_DATA_PATH, \"test.wav\")\n\n binary = None\n\n if type == 'alsa':\n binary = 'aplay'\n elif type == 'pulse':\n binary = \"paplay\"\n else:\n raise NotImplementedError(\n \"Test for this audio system is not implemented\")\n\n if not shutil.which(binary):\n report(_(\"\"\"%s selected, but %s not installed. This might be a false\nwarning, but most likely sound is not working.\"\"\" % (type, binary)))\n reply = question(_(\"Are you sure that %s audio is working?\" % type), False)\n return reply\n\n cmd = '%s %s' % (binary, wavfile)\n if os.system(cmd):\n report(_(\"Can't play audio via\\n %s\" % cmd))\n report(_(\"\"\"Your audio doesn't seem to work, please fix audio first or choose\na different method.\"\"\"))\n return False\n\n\n reply = question(_(\"Did you hear the sound?\"), True)\n\n if not reply:\n report(_(\"\"\"Please examine the above output from the sound playback\nutility. If everything seems right, are you sure your audio is loud enough and\nnot muted in the mixer? Please fix your audio system first or choose a different\naudio output method in configuration.\"\"\"))\n return False\n else:\n report(_(\"Audio output '%s' works\" % type))\n return True", "def record_audio_to_file_and_get_wav(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()\n return WavFile(samples=frames, sample_width=sample_width, time=time, word=file_name)", "def audio_format(self):\n return self.__audio_format", "def play(self):\n assert pyaudio is not None, (\"You need to have pyaudio installed to \"\n \"use the play_wav function\")\n filename = os.path.join(tempfile.gettempdir(),\n '6003_wave_%s.wav' % abs(hash(tuple(self.samples))))\n self.save(filename)\n f = wave.open(filename, 'r')\n try:\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(),\n rate=f.getframerate(),\n output=True)\n\n data = f.readframes(10240)\n while data:\n stream.write(data)\n data = f.readframes(10240)\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n finally:\n f.close()\n os.unlink(filename)", "def generateAudio(audiotype: str, audiometadata: dict):\n try:\n audiotype = audiotype.lower()\n\n if audiotype == \"song\":\n file = Song(audiometadata)\n elif audiotype == \"podcast\":\n file = Podcast(audiometadata)\n elif audiotype == \"audiobook\":\n file = Audiobook(audiometadata)\n else:\n return None\n\n return file\n\n except MetadataValueError as error:\n raise MetadataValueError(error)\n\n except MetadataGenerationError as error:\n raise MetadataGenerationError(error)", "def to_audio(self, _in, _out, bitrate, file_format):\n\n # Default output parameter\n # If not current directory, append '/'\n if os.path.isdir(_out):\n _out = '' if _out == '.' else _out + '/'\n _out += self.get_name_from_path(_in,\n replace=True) + '.' + file_format\n _out = _out.replace('//', '/')\n self.out = _out\n\n # File format unchecked for single inputs\n if not check_is_video(_in):\n msg = \" is not a supported media type\"\n self.abort_conversion(\n self.get_name_from_path(_in) + msg)\n\n \"\"\"\n else:\n base_name = os.path.basename(_out)\n ext = os.path.splitext(base_name)[1]\n _out = _out.replace(ext, '.mp3')\n \"\"\"\n commands = ['ffmpeg', '-i', _in,\n '-vn', '-ar', '44100',\n '-ac', '2', '-ab',\n bitrate, _out]\n try:\n self.run_convert_commands(commands)\n except FileNotFoundError as er:\n res = require_ffmepg()\n\n if not res:\n self.abort_conversion(\"Dependecy not installed.\")", "def _open_stream(config):\n p = pyaudio.PyAudio()\n stream = p.open(\n format = config[\"format\"],\n channels = config[\"channels\"],\n rate = config[\"rate\"],\n input = True,\n frames_per_buffer = config[\"chunk\"]\n )\n return p, stream", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def record_and_get_wav(self, time):\n sample_width, frames = self.record_audio(time)\n return WavFile(samples=frames, sample_width=sample_width, time=time)", "def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')", "def _to_wav(self):\n self._status = 0\n fname = fm.file2wav(self.get_filename()) \n if fname != self.get_filename(): # can change the name\n self._set_filename(fname) # in case of wave transcoding\n self._status = 1", "def read_audio_from_stream(stream, sr=None, mono=False, duration=None, exp_format=\"wav\"):\n _, file_ext = stream.filename.rsplit('.', 1)\n ext_converter = {\n 'mp3': AudioSegment.from_mp3,\n }\n converter = ext_converter.get(file_ext)\n if not converter:\n raise InvalidUsage(f\"Invalid extension: {file_ext}\")\n\n with tempfile.NamedTemporaryFile() as ntf:\n sound = converter(stream)\n sound.export(ntf, format=exp_format)\n return read_audio(ntf.name, sr, mono, duration)", "def _write(self, source, times=1, file_flag=False, rs_times=None, rs_step=None):\n # if the device isnt initialized properly\n if self._device is None:\n raise SpeakerError\n\n self._duration = None\n self._paused = False\n self._canceled = False\n\n try:\n periodsize = Speaker.PERIOD_SIZE\n\n if file_flag:\n # Open the wav file\n f = wave.open(self._fix_path(source), 'rb') # add error checking here\n\n channels = f.getnchannels()\n framerate = f.getframerate()\n sample_width = f.getsampwidth()\n\n # Read data from file\n data = []\n sample = f.readframes(periodsize)\n while sample:\n data.append(sample)\n sample = f.readframes(periodsize)\n\n # Close file\n f.close()\n else:\n channels = self._channels\n framerate = self.framerate\n sample_width = self.SAMPLE_WIDTH\n\n # Read data from encoded string\n n = len(source)\n step = sample_width * periodsize\n data = [source[i:i+step] for i in range(0, n, step)] # add error checking here\n\n # calculate the duration of the track\n packets = len(data)\n packet_duration = periodsize / self.framerate\n self._duration = (packets * packet_duration)\n\n # Set Device attributes for playback\n self._device.setchannels(channels) # add error checking here\n self._device.setrate(framerate)\n self._device.setperiodsize(periodsize)\n \n # 8bit is unsigned in wav files\n if sample_width == 1:\n self._device.setformat(alsaaudio.PCM_FORMAT_U8)\n # Otherwise we assume signed data, little endian\n elif sample_width == 2:\n self._device.setformat(alsaaudio.PCM_FORMAT_S16_LE)\n elif sample_width == 3:\n self._device.setformat(alsaaudio.PCM_FORMAT_S24_3LE)\n elif sample_width == 4:\n self._device.setformat(alsaaudio.PCM_FORMAT_S32_LE)\n else:\n raise ValueError('Unsupported format')\n\n # Play n times the data\n \n self._play(data, times, rs_times, rs_step) # add error checking here\n except alsaaudio.ALSAAudioError as e:\n print(f\"Caugh is write: {e}\")\n raise SpeakerError\n\n except Exception as e:\n print(f\"Caugh is write: {e}\")\n raise SpeakerError", "def read_write_audio(self):\n\n if (self.local_audio_play and\n (self.mem_player.get_write_available() > self.cfg['Audio']['samples_per_frame'] * 2)):\n # send a frame from input to be played\n data_play = self.local_audio_play.popleft()\n\n if self.audio_playing and isinstance(data_play, Frame):\n if len(data_play) == self.cfg['Audio']['samples_per_frame'] * 2:\n self.last_frame_id = self.mem_player.put_frame(data_play.payload)\n self.cfg['Logging']['session_logger'].rec_write(self.audio_playing, data_play.payload)\n\n elif isinstance(data_play, Command):\n if data_play.parsed['__name__'] == 'utterance_start':\n self.audio_playing = data_play.parsed['fname']\n self.message_queue.append(\n (Command('play_utterance_start(user_id=\"{uid}\",fname=\"{fname}\")'\n .format(uid=data_play.parsed['user_id'], fname=data_play.parsed['fname']),\n 'VoipIO', 'HUB'),\n self.last_frame_id))\n try:\n if data_play.parsed['log'] == \"true\":\n self.cfg['Logging']['session_logger'].rec_start(\"system\", data_play.parsed['fname'])\n except SessionLoggerException as e:\n self.cfg['Logging']['system_logger'].exception(e)\n\n if self.audio_playing and data_play.parsed['__name__'] == 'utterance_end':\n self.audio_playing = None\n self.message_queue.append(\n (Command('play_utterance_end(user_id=\"{uid}\",fname=\"{fname})'\n .format(uid=data_play.parsed['user_id'], fname=data_play.parsed['fname']),\n 'VoipIO', 'HUB'),\n self.last_frame_id))\n try:\n if data_play.parsed['log'] == \"true\":\n self.cfg['Logging']['session_logger'].rec_end(data_play.parsed['fname'])\n except SessionLoggerException as e:\n self.cfg['Logging']['system_logger'].exception(e)\n\n if (self.mem_capture.get_read_available() > self.cfg['Audio']['samples_per_frame'] * 2):\n # Get and send recorded data, it must be read at the other end.\n data_rec = self.mem_capture.get_frame()\n\n # send the audio only if the call is connected\n # ignore any audio signal left after the call was disconnected\n if self.audio_recording:\n self.audio_record.send(Frame(data_rec))", "def record_audio_to_file(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def choose_audio(self):\n\n language = self.choose_language()\n track = self.choose_track(os.path.join(\"audio\", language))\n path = os.path.join(\"audio\", language, track)\n return path", "def readAudioFile(path):\n\n extension = os.path.splitext(path)[1]\n\n try:\n # Commented below, as we don't need this\n # #if extension.lower() == '.wav':\n # #[Fs, x] = wavfile.read(path)\n # if extension.lower() == '.aif' or extension.lower() == '.aiff':\n # s = aifc.open(path, 'r')\n # nframes = s.getnframes()\n # strsig = s.readframes(nframes)\n # x = numpy.fromstring(strsig, numpy.short).byteswap()\n # Fs = s.getframerate()\n if extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg':\n try:\n audiofile = AudioSegment.from_file(path)\n except:\n print(\"Error: file not found or other I/O error. \"\n \"(DECODING FAILED)\")\n return -1 ,-1\n\n if audiofile.sample_width == 2:\n data = numpy.fromstring(audiofile._data, numpy.int16)\n elif audiofile.sample_width == 4:\n data = numpy.fromstring(audiofile._data, numpy.int32)\n else:\n return -1, -1\n Fs = audiofile.frame_rate\n x = numpy.array(data[0::audiofile.channels]).T\n else:\n print(\"Error in readAudioFile(): Unknown file type!\")\n return -1, -1\n except IOError:\n print(\"Error: file not found or other I/O error.\")\n return -1, -1\n\n if x.ndim == 2:\n if x.shape[1] == 2:\n x = x.flatten()\n\n return Fs, x", "def create_audio_file():\n # Get the response from boto3\n raw_audio = generate_audio()\n # pull the Audiostream object from the response from boto3\n raw_audio = raw_audio[\"AudioStream\"]\n # create output location\n # process the whole block\n with closing(raw_audio) as audio:\n with open(\"output_audio.mp3\", \"wb\") as file:\n file.write(raw_audio.read())", "def _open(path):\n try:\n fileobj = File(os.fspath(path))\n if fileobj is None:\n raise MutagenError\n if getattr(fileobj, 'tags', None) is None:\n fileobj.add_tags()\n return fileobj\n except (AttributeError, MutagenError):\n raise exceptions.NotAnAudioFileError(path)", "def convert_wav(src_wav, dst_wav, subtype='PCM_16'):\n assert os.path.exists(src_wav), \"{} not exists!\".format(src_wav)\n data, sr = soundfile.read(src_wav)\n soundfile.write(dst_wav, data, sr, subtype=subtype)", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def test_alaw(self):\n duration = 1\n num_channels = 1\n sample_rate = 8000\n path = self.get_temp_path(\"data.wav\")\n sox_utils.gen_audio_file(\n path, sample_rate=sample_rate, num_channels=num_channels, bit_depth=8, encoding=\"a-law\", duration=duration\n )\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == 8\n assert info.encoding == \"ALAW\"", "def is_audio(self):\n val = False\n if self.__dict__['codec_type']:\n if str(self.__dict__['codec_type']) == 'audio':\n val = True\n return val", "def play_audio():\n play_file = input(\"Would you like to play the file we created (y/n)? \")\n if play_file == \"y\":\n os.system(\"open output_audio.mp3\")\n else:\n print(\"Thanks for using our service, the file exists in your directory where you ran this file.\")", "def sound(*args, endTime: Union[time, bool]=None, file: Union[AnyStr, bool]=\"\", length:\n bool=True, mute: bool=True, name: Union[AnyStr, bool]=\"\", offset: Union[time,\n bool]=None, sourceEnd: Union[time, bool]=None, sourceStart: Union[time, bool]=None,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def write_wav(self, full_out_file = None):\n\n if full_out_file is None:\n \n (out_file, out_dir) = misc.save_file(FilterSpec='*.wav', DialogTitle='Write sound to ...', \n DefaultName='')\n full_out_file = os.path.join(out_dir, out_file)\n if full_out_file is None:\n print('Output discarded.')\n return 0\n else:\n full_out_file = os.path.abspath(full_out_file)\n (out_dir , out_file) = os.path.split(full_out_file)\n\n write(str(full_out_file), int(self.rate), self.data)\n print('Sounddata written to ' + out_file + ', with a sample rate of ' + str(self.rate))\n print('OutDir: ' + out_dir)\n \n return full_out_file", "def read_audio(filename, sample_rate = 44100):\n loader = essentia.standard.MonoLoader(filename = filename, sampleRate = sample_rate)\n audio = loader()\n return audio", "def play_audio(filename):\n chunk = 1024\n wf = wave.open(filename, 'rb')\n pa = pyaudio.PyAudio()\n stream = pa.open(\n format=pa.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True\n )\n data_stream = wf.readframes(chunk)\n while data_stream:\n stream.write(data_stream)\n data_stream = wf.readframes(chunk)\n stream.close()\n pa.terminate()", "def write_wave(path, audio, sample_rate):\n with contextlib.closing(wave.open(path, 'wb')) as wf:\n wf.setnchannels(1)\n wf.setsampwidth(2)\n wf.setframerate(sample_rate)\n wf.writeframes(audio)", "def get_audio(path):\n return send_from_directory('audio', path)", "def test_wav(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def write(\n path: Union[str, Path],\n music: \"Music\",\n kind: Optional[str] = None,\n **kwargs: Any\n):\n if kind is None:\n if str(path).lower().endswith((\".mid\", \".midi\")):\n kind = \"midi\"\n elif (\n str(path).lower().endswith((\".mxl\", \".xml\", \".mxml\", \".musicxml\"))\n ):\n kind = \"musicxml\"\n elif str(path).lower().endswith(\".abc\"):\n kind = \"abc\"\n elif str(path).lower().endswith((\"wav\", \"aiff\", \"flac\", \"oga\")):\n kind = \"audio\"\n else:\n raise ValueError(\n \"Got unsupported file format (expect MIDI, MusicXML, ABC, \"\n \"WAV, AIFF, FLAC or OGA).\"\n )\n if kind == \"midi\":\n return write_midi(path, music, **kwargs)\n if kind == \"musicxml\":\n return write_musicxml(path, music, **kwargs)\n if kind == \"abc\":\n return write_abc(path, music)\n if kind == \"audio\":\n return write_audio(path, music, **kwargs)\n raise ValueError(\"`kind` must be 'midi', 'musicxml', 'abc' or 'audio'.\")", "def recorder():\n # Following block gets rid of annoying config errors by ALSA\n def py_error_handler(filename, line, function, err, fmt):\n pass\n ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)\n c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)\n asound = cdll.LoadLibrary('libasound.so')\n asound.snd_lib_error_set_handler(c_error_handler) \n\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n yield stream\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "def load_audio(path):\r\n if path[-4:] == \".wav\":\r\n fs, data = load_wav(path)\r\n\r\n elif path[-4:] == \".mp3\":\r\n fs, data = load_mp3(path)\r\n\r\n else:\r\n raise ValueError(\"Wrong file format, use mp3 or wav\")\r\n\r\n return fs, data", "def audio_media_type(name):\n return name.endswith(('.ogg', '.oga', '.m4a'))", "def generate_audio():\n text, lang = introduction()\n ses = boto3.Session(profile_name=\"default\")\n pol = ses.client(\"polly\")\n res = pol.synthesize_speech(Text=text, LanguageCode=lang, OutputFormat=\"mp3\", VoiceId=VOICE)\n return res", "def play(self, context=None):\n\n self.nowPlaying = True\n\n # Open file for reading\n wf = wave.open(self.path + '/' + self.name, 'rb')\n p = pyaudio.PyAudio()\n\n # Open stream for playback\n stream = p.open( format = p.get_format_from_width( wf.getsampwidth() ),\n channels = wf.getnchannels(),\n rate = wf.getframerate(), output = True)\n\n # Read file in chunks of 1024 bytes\n data = wf.readframes(1024)\n\n # Read while there is data left to read\n # If nowPlaying is False, user has clicked Stop\n while data != '' and self.nowPlaying:\n stream.write(data)\n data = wf.readframes(1024)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n self.nowPlaying = False\n\n # Callback to UI to signal that audio has finished playing\n if context is not None:\n context.stopAudio()", "def make_wav(text, speed=1.0, emotion='normal', output_file='__temp.wav', output_dir=os.getcwd()):\n open_jtalk = [OPENJTALK_BINPATH + '/open_jtalk']\n mech = ['-x', OPENJTALK_DICPATH]\n htsvoice = ['-m', OPENJTALK_VOICEPATH.format(emotion=emotion)]\n speed = ['-r', str(speed)]\n outwav = ['-ow', os.path.join(output_dir, output_file)]\n cmd = open_jtalk + mech + htsvoice + speed + outwav\n c = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n c.stdin.write(text.encode('utf-8'))\n c.stdin.close()\n c.wait()\n return os.path.join(output_dir, output_file)", "def audio(self):\n self.log_string += 'Audio file'\n self._media_processing()", "def direct_play(file_name,\n sample_rate = 44100,\n chunk = 1024,\n channel = 1,\n width = 2):\n \n data_file = open(file_name, 'rb', chunk)\n\n # PyAudio instance\n p = pyaudio.PyAudio()\n\n # Open stream\n stream = p.open(format = p.get_format_from_width(width),\n channels = channel,\n rate = sample_rate,\n output = True)\n\n data = data_file.read(chunk)\n\n # Playing the data\n while data != \"\":\n stream.write(data)\n data = data_file.read(chunk)\n\n # Ending things\n stream.stop_stream()\n stream.close()\n p.terminate()", "def convert_to_wav(mp3_filename):\n\n wav_filename = mp3_filename[:-4] + \".wav\"\n complete_mp3FileName = os.path.join(MP3_FOLDER, mp3_filename)\n complete_wavFileName = os.path.join(WAV_FOLDER, wav_filename)\n\n mp3_file = AudioSegment.from_mp3(complete_mp3FileName)\n mp3_file.export(complete_wavFileName, format=\"wav\")\n\n print(f\"The mp3 file {complete_mp3FileName} was successfully converted to \" \\\n + f\"the wav file {complete_wavFileName}.\")", "def on_open(ws):\n data = {\n \"action\": \"start\",\n # this means we get to send it straight raw sampling\n \"content-type\": \"audio/l16;rate=%d\" % RATE,\n \"interim_results\": True,\n \"profanity_filter\": False\n }\n\n # Send the initial control message which sets expectations for the\n # binary stream that follows:\n ws.send(json.dumps(data).encode('utf8'))\n # Spin off a dedicated thread where we are going to read and\n # stream out audio.\n threading.Thread(target=read_audio, args=[ws]).start()", "def has_audio(self):\n return self.__audio_format is not None", "def SaveAsAudioFile(default_dir=None, default_file=None):\n\n if default_dir is None:\n default_dir = os.path.dirname(paths.sppas)\n\n if default_file is None:\n default_file = \"newfile.wav\"\n\n save_file = None\n\n wildcard = create_wildcard(\"All files\", sppas.src.audiodata.aio.extensions)\n wildcard += '|' + create_wildcard(\"Wave\", sppas.src.audiodata.aio.ext_wav)\n wildcard += '|' + create_wildcard(\"SunAu\", sppas.src.audiodata.aio.ext_sunau)\n\n dlg = wx.FileDialog(\n None,\n message=\"Choose a file name...\",\n defaultDir=default_dir,\n defaultFile=default_file,\n wildcard=wildcard,\n style=wx.FD_SAVE | wx.FD_CHANGE_DIR )\n\n if dlg.ShowModal() == wx.ID_OK:\n save_file = dlg.GetPath()\n\n dlg.Destroy()\n\n return save_file", "def load_jam_audio(\n jam_in, audio_file, validate=True, strict=True, fmt=\"auto\", **kwargs\n):\n\n if isinstance(jam_in, jams.JAMS):\n jam = jam_in\n elif jam_in is None:\n jam = jams.JAMS()\n else:\n jam = jams.load(jam_in, validate=validate, strict=strict, fmt=fmt)\n\n y, sr = librosa.load(audio_file, **kwargs)\n\n if jam.file_metadata.duration is None:\n jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr)\n\n return jam_pack(jam, _audio=dict(y=y, sr=sr))", "def set_audio_sink(core, filenameOrHandle):\n\tres = wave.open(filenameOrHandle, \"wb\")\n\tres.setnchannels(2)\n\tres.setsampwidth(2)\n\tres.setframerate(SNES_OUTPUT_FREQUENCY)\n\tres.setcomptype('NONE', 'not compressed')\n\n\tdef audio_sample(left, right):\n\t\t# We can safely use .writeframesraw() here because the header will be\n\t\t# corrected once we call .close()\n\t\tres.writeframesraw(sndstruct.pack(left, right))\n\n\tcore.set_audio_sample_cb(audio_sample)\n\n\treturn res", "def read_sound(self):\n sound = True # Sound is ON by default - for Android marker\n\n try:\n if self.store.exists(SOUND_STORE):\n current_sound_str = self.store.get(SOUND_STORE)['sound']\n if current_sound_str == 'ON':\n sound = True\n elif current_sound_str == 'OFF':\n sound = False\n else:\n sound = False\n except:\n print 'Exception when reading Galaxy sound configuration from JSON file!'\n\n return sound", "def getAudio(self):\n audioString = self.inStream.read(self.BUFFERSIZE)\n self.newAudio = True\n return numpy.fromstring(audioString, dtype=numpy.int16)", "def write_wav(fname, samps, sampling_rate=16000, normalize=True):\n\t# for multi-channel, accept ndarray [Nsamples, Nchannels]\n\tif samps.ndim != 1 and samps.shape[0] < samps.shape[1]:\n\t\tsamps = np.transpose(samps)\n\t\tsamps = np.squeeze(samps)\n\t# same as MATLAB and kaldi\n\tif normalize:\n\t\tsamps = samps * MAX_INT16\n\t\tsamps = samps.astype(np.int16)\n\tfdir = os.path.dirname(fname)\n\tif fdir and not os.path.exists(fdir):\n\t\tos.makedirs(fdir)\n\t# NOTE: librosa 0.6.0 seems could not write non-float narray\n\t# so use scipy.io.wavfile instead\n\twavfile.write(fname, sampling_rate, samps)", "def audiofile(self, directory=None):\n\n return self.make_path(directory, '.wav')", "def read(self, path, size, offset, fh, *args, **pargs):\n with self.rwlock:\n if(path in self._open_subtracks):\n real = False\n # Update the last accessed time.\n self._open_subtracks[path]['Last Access'] = time.time()\n # Store the requested offset.\n self._open_subtracks[path]['Positions'][fh] = offset\n else:\n real = True\n if(real):\n # For all non-FLACCue files, just access it normally.\n os.lseek(fh, offset, 0)\n return os.read(fh, size)\n # Wait for the file to finish opening.\n while(True):\n with(self.rwlock):\n self._open_subtracks[path]['Last Access'] = time.time()\n if(self._open_subtracks[path]['Audio'] is not None):\n audio = self._open_subtracks[path]['Audio']\n break\n time.sleep(0.1)\n # Return the data requested.\n if(offset > len(audio)):\n # If we're looking near the end of the file,\n # handle the fact that compression could change the size.\n reported_size = self.getattr(path)['st_size']\n if(offset < reported_size):\n offset = len(audio) - (reported_size - offset)\n return audio[offset:offset+size].tobytes()", "def read_wave(path):\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def test_ulaw(self):\n duration = 1\n num_channels = 1\n sample_rate = 8000\n path = self.get_temp_path(\"data.wav\")\n sox_utils.gen_audio_file(\n path, sample_rate=sample_rate, num_channels=num_channels, bit_depth=8, encoding=\"u-law\", duration=duration\n )\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == 8\n assert info.encoding == \"ULAW\"", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def make_audio(audio_path):\n content, sample_rate = librosa.load(audio_path, sr=16000)\n del sample_rate\n if content.dtype in (np.float32, np.float64):\n content = (content * np.iinfo(np.int16).max).astype(np.int16)\n return speech.RecognitionAudio(content=content.tobytes())", "def modulated_play(file_name,\n sample_rate = 44100,\n chunk = 1024,\n channel = 1,\n width = 2):\n\n data = np.fromfile(file_name, dtype = np.uint8)\n wave = custom.modulate(data)\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format = p.get_format_from_width(width),\n channels = channel,\n rate = sample_rate,\n output = True)\n\n stream.write(wave)\n\n stream.stop_stream()\n stream.close()\n p.terminate()", "def audio_file_save(folder_path, current_time, data, name_by_date):\r\n\r\n name_by_time = current_time + '.wav' #timestamp for the audio file name\r\n usage = disk_usage(folder_path)\r\n if usage.used / usage.total < args.storage_threshold:\r\n file_path = os.path.join(folder_path, name_by_time)\r\n\r\n if args.resampling:\r\n sampling_rate = args.resampling_rate\r\n audio = audio_resampling(data)\r\n else:\r\n sampling_rate = args.recording_samplerate\r\n audio = data\r\n\r\n sf.write(file_path , audio, sampling_rate)\r\n\r\n else:\r\n name = os.path.join(folder_path, name_by_date + '.txt')\r\n f = open(name, 'a')\r\n f.write(current_time + '\\t Activity Detected \\n')\r\n f.close()", "def get_audio(filepath, restrict=restrict_range, use_librosa=False, normalize=True):\n try:\n audio, fs = librosa.load(path=filepath, sr=22050)\n except Exception as e:\n fs, audio_ro = scipy.io.wavfile.read(filepath)\n audio = np.copy(audio_ro) / 32767\n if fs != 22050:\n print(\"incorrect fs\")\n return None\n # frame-wise calculation\n if restrict:\n start = start_sec * fs\n end = end_sec * fs\n audio = np.array(audio[start:end], dtype=np.float32)\n if normalize is True:\n audio = (cqt_params['normalizing_constant'] * audio) / np.std(audio[np.abs(audio > 0.00001)])\n return audio", "def mp3_to_wav(mp3_file, wav_file, encoder='mpg123'):\n if encoder == 'mpg123':\n bash_command = ['mpg123', '-w', wav_file, '--mono', mp3_file]\n else:\n bash_command = ['ffmpeg', '-i', mp3_file, wav_file]\n subprocess.run(bash_command)", "def post(self, request, format=None, *args, **kwargs):\n try:\n data = request.data\n audio_file = kwargs.get('audioFileType','')\n audio_file_type = audio_file.lower()\n\n if audio_file_type == 'audiobook':\n serializer = AudioBookSerializer(data=data)\n if serializer.is_valid():\n\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if audio_file_type == 'podcast':\n serializer = PodcastSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if audio_file_type == 'song':\n serializer = SongSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n except Exception:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def wavplay(filename):\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\tprint(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\telse:\n\t\tif sys.platform == \"linux\" or sys.platform == \"linux2\":\n\t\t # linux\n\t\t subprocess.call([\"aplay\", filename])\n\n\t\telif sys.platform == \"darwin\":\n\t\t\t# OS X\n\t\t\tsubprocess.call([\"afplay\", filename])\n\t\telse:\n\t\t\tprint(\"Platform not recognized\")", "def create_stream(self, audio_format):\n mainloop = self.mainloop()\n assert mainloop is not None\n assert self.is_ready\n\n sample_spec = self.create_sample_spec(audio_format)\n channel_map = None\n\n # TODO It is now recommended to use pa_stream_new_with_proplist()\n stream = pa.pa_stream_new(self._pa_context,\n str(id(self)).encode('ASCII'),\n sample_spec,\n channel_map)\n self.check_not_null(stream)\n return PulseAudioStream(mainloop, self, stream)", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def mp3_to_wav(show_progress=True):\n\n # Define a devnull var to supress subprocess output\n devnull = open(os.devnull, 'w')\n\n # Get a list of the filepath for each of the mp3 files in each subdirectory of data/fma_small\n file_list = glob.glob('./../data/fma_small/*/*.mp3')\n\n # Get the number of files N and initialize a counter\n N = len(file_list)\n counter = 0\n\n # For each file/filepath, convert that file to wav format and save it to data/wavs/*/*.wav (so as a wave file)\n for filepath in file_list:\n\n # Every 100 file conversions, print a progress update\n if counter % 50 == 49 and show_progress:\n progress = str(round(100 * counter / N, 2))\n print('File conversion ' + progress + '% complete.')\n\n # Get the file name from the path and define a new path for the wav file\n file_name = filepath[24:-4]\n new_path = './../data/wavs/' + file_name + '.wav'\n\n # Call the subprocess using ffmpeg to convert the file to wav format (and supress all the output)\n subprocess.call(['ffmpeg', '-i', filepath, new_path], stdout=devnull)\n\n # Increment the counter\n counter += 1", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def __newSampleFile(self):\n self.__newFileName()\n self.__sampleFile = wav.open(self.__fileName, self.OPEN_MODE)\n self.__sampleFile.setnchannels(NUM_CHANNELS)\n self.__sampleFile.setsampwidth(self.__audio.get_sample_size(self.FORMAT))\n self.__sampleFile.setframerate(FS)", "def record_audio(self, time):\n p = pyaudio.PyAudio()\n stream = p.open(format=self.format,\n channels=self.channels,\n rate=self.rate,\n input=True,\n frames_per_buffer=self.chunk)\n\n print(\"* recording\")\n\n frames = []\n for i in range(0, int(self.rate / self.chunk * time)):\n data = stream.read(self.chunk)\n frames.append(data)\n\n print(\"* done recording\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n return p.get_sample_size(self.format), b''.join(frames)", "def convert(file):\n extension = file[-4:]\n if extension == '.wav':\n return file\n if not exists(file):\n raise IOError('%s file not found' % file)\n if not extension in SUPPORTED_EXTENSION:\n raise IOError('%s file format is not supported' % file)\n if not exists(CONVERTION_DIRECTORY):\n makedirs(CONVERTION_DIRECTORY)\n filename = splitext(basename(file))[0]\n path = join(CONVERTION_DIRECTORY, filename + '.wav')\n if (not exists(path)):\n logging.info(\"Converting file %s\" % file)\n CONVERTERS[extension](file).export(path, format='wav')\n return path", "def write_wav(filename, data, rate = 44100):\r\n \r\n # Compress the data (the input format is likely to be float64)\r\n # Make sure that the format is readable by Librosa\r\n maxv = np.iinfo(np.int16).max\r\n lb_write_wav(filename, (data * maxv).astype(np.int16), rate) \r\n \r\n return(None)", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def play_audio(audio_bytes: bytes):\n with wave.open(io.BytesIO(audio_bytes)) as wav:\n channels = wav.getnchannels()\n rate = wav.getframerate()\n width = wav.getsampwidth()\n print(wav.getparams())\n\n pya = pyaudio.PyAudio()\n stream = pya.open(\n format=pya.get_format_from_width(width=width),\n channels=channels,\n rate=rate,\n output=True,\n )\n log.debug(\"Audio started\")\n stream.write(audio_bytes)\n stream.stop_stream()\n stream.close()\n pya.terminate()\n log.info(\"Audio finished\")", "def test_wav_multiple_channels(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def wavPlayer(data, rate, scale=False, autoplay=False):\r\n #if np.max(abs(data)) > 1 or scale:\r\n # data = data/np.max(abs(data))\r\n #data = (2**13*data).astype(np.int16)\r\n \r\n buffer = BytesIO()\r\n buffer.write(b'RIFF')\r\n buffer.write(b'\\x00\\x00\\x00\\x00')\r\n buffer.write(b'WAVE')\r\n \r\n buffer.write(b'fmt ')\r\n if data.ndim == 1:\r\n noc = 1\r\n else:\r\n noc = data.shape[1]\r\n \r\n bits = data.dtype.itemsize * 8\r\n sbytes = rate*(bits // 8)*noc\r\n ba = noc * (bits // 8)\r\n buffer.write(struct.pack('<ihHIIHH', 16, 1, noc, rate, sbytes, ba, bits))\r\n\r\n # data chunk\r\n buffer.write(b'data')\r\n buffer.write(struct.pack('<i', data.nbytes))\r\n\r\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\r\n data = data.byteswap()\r\n\r\n buffer.write(data.astype(np.int16).tostring())\r\n\r\n # Determine file size and place it in correct position at start of the file.\r\n size = buffer.tell()\r\n buffer.seek(4)\r\n buffer.write(struct.pack('<i', size-8))\r\n \r\n val = buffer.getvalue()\r\n autoplay = \" autoplay=\\\"autoplay\\\"\"*autoplay + \"\"\r\n \r\n src = \"\"\"<audio controls=\"controls\" style=\"width:600px\"{autoplay}>\r\n <source controls src=\"data:audio/wav;base64,{base64}\" type=\"audio/wav\" />\r\n Your browser does not support the audio element.\r\n </audio>\"\"\".format(base64=base64.b64encode(val).decode(\"ascii\"), autoplay=autoplay)\r\n display(HTML(src))", "def _play_audio(audio_file, no_audio_alerts=False):\n if not no_audio_alerts:\n audio_path = join(os.getcwd(), 'docs', 'audio', audio_file) # hard-coded path to file, can be changed\n wave_obj = sa.WaveObject.from_wave_file(audio_path)\n play_obj = wave_obj.play()\n play_obj.stop()", "def audio_io(f):\n\n @functools.wraps(f)\n def dec(args, stdin=None, stdout=None, stderr=None, spec=None, stack=None):\n ain = _stash_get_audio(stdin, stderr, spec)\n aout = f(ain, args, stdin=stdin, stdout=stdout, stderr=stderr, spec=spec)\n rtn = _stash_set_audio(aout, stdout, stderr, spec)\n return rtn\n\n return dec", "def sound(self, where, stream=True):\n cook = cookie()\n S = Sound(cook, self)\n self.call('sound', cook, where, stream and 1 or 0)\n return S", "def _wave(self):\n try:\n return wave.open(StringIO(self.contents))\n except wave.Error, err:\n err.message += \"\\nInvalid wave file: %s\" % self\n err.args = (err.message,)\n raise", "def play(self):\n\n try:\n if self.source is None:\n # If there is no source-file, write the data to a temporary WAV-file ...\n tmpFile = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)\n tmpFile.close()\n self.write_wav(tmpFile.name)\n \n # ... and play that file\n if sys.platform=='win32':\n winsound.PlaySound(tmpFile.name, winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', tmpFile.name]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(tmpFile.name)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', tmpFile.name]\n #subprocess.run(cmd)\n \n elif os.path.exists(self.source):\n # If you have a given input file ...\n print('Playing ' + self.source)\n \n # ... then play that one\n if sys.platform == 'win32':\n winsound.PlaySound(str(self.source), winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', str(self.source)]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(self.source)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', self.source]\n #subprocess.run(cmd)\n \n except SystemError:\n print('If you don''t have FFMPEG available, you can e.g. use installed audio-files. E.g.:')\n print('import subprocess')\n print('subprocess.run([r\"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe\", r\"C:\\Music\\14_Streets_of_Philadelphia.mp3\"])')", "def record_audio(self):\n stream = self.audio.open(format=DEFAULT_FORMAT,\n channels=DEFAULT_CHANNELS,\n rate=DEFAULT_RATE,\n input=True,\n frames_per_buffer=DEFAULT_CHUNK_SIZE)\n\n print(\"Recording...\")\n\n for i in range(0, int(DEFAULT_RATE / DEFAULT_CHUNK_SIZE * RECORD_SECONDS)):\n data = stream.read(DEFAULT_CHUNK_SIZE)\n self.frames.append(data)\n\n print(\"Done.\")\n\n stream.stop_stream()\n stream.close()", "def raw_to_wav(data, path, rate=44100):\n wavfile.write(path, rate, data)", "def save_audio(self, name=DEFAULT_OUT_NAME):\n print(\"Saving...\")\n wf = wave.open(name+'.wav', 'wb')\n wf.setnchannels(DEFAULT_CHANNELS)\n wf.setsampwidth(self.audio.get_sample_size(DEFAULT_FORMAT))\n wf.setframerate(DEFAULT_RATE)\n wf.writeframes(b''.join(self.frames))\n wf.close()\n print('Saved')", "def polly_write_response(inresponse: object):\n if \"AudioStream\" in inresponse:\n with closing(inresponse[\"AudioStream\"]) as stream:\n output = os.path.join(os.getcwd(), \"speech.mp3\")\n print(output)\n try:\n with open(output, \"wb\") as file:\n file.write(stream.read())\n except IOError as error:\n print(error)\n sys.exit(1)\n else:\n print(\"Could not stream audio\")\n sys.exit(1)", "def play_wav_on_index(audio_data, stream_object):\n\n stream_object.write(audio_data)", "def sound_mode(self):\n return self._sound_mode", "async def sound(self, ctx, name='default', start=0):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n \n if not (ctx.author.voice or voice):\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")\n return\n \n ffmpeg_path = os.environ['FFMPEG_PATH']\n sound_path = f'sounds/{name}.ogg'\n ffmpeg_opts = {'options': f'-ss {start}'}\n\n if not os.path.isfile(sound_path):\n if name == 'default':\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(\"No sound specified.\")\n else:\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(\"Sound file not found.\")\n return\n \n audio = discord.FFmpegPCMAudio(executable=ffmpeg_path,\n source=sound_path, **ffmpeg_opts)\n sound = discord.PCMVolumeTransformer(audio)\n \n if not voice:\n await self.join(ctx)\n\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n voice.stop()\n \n voice.play(sound)\n await ctx.send(f\"Playing `{name}.ogg`.\")", "def audio(self, src=None, **kw):\n if src:\n kw['src'] = src\n kw['type'] = 'audio/mpeg'\n return '<audio preload=\"auto\" controls=\"controls\">\\n%s\\nYour browser does not support HTML5 audio.\\n</audio>' % self._open('source', **kw)", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def save_sound(filename,sound,sample_freq,num_channels):\n #open a wave file in write ('w') mode, this will create the file\n file=wave.open(filename,'w')\n #set the framerate aka sample frequency\n file.setframerate(sample_freq)\n #set the number of the channels\n file.setnchannels(num_channels)\n #the size of the one sample in bytes\n file.setsampwidth(2)\n #write the actual sound to the file, notice the call to get_raw\n file.writeframesraw(sound.get_raw())\n file.close()", "def test_simple_wav_source_constructor():\n TESTPATH = \"/usr/local/share/testfile.wav\"\n test01 = WavSource(dpath=TESTPATH)\n debug(test01)\n assert(test01.path == TESTPATH)\n assert(test01.fname == \"testfile.wav\")\n assert(test01.root == \"testfile\")\n assert(test01.ext == \".wav\")\n assert(test01.isValidExtension(\".wav\") is True)", "def put(self, request, pk, format=None, *args, **kwargs):\n try:\n data = request.data\n audio_file = kwargs.get('audioFileType','')\n audio_file_type = audio_file.lower()\n \n if audio_file_type == 'audiobook':\n audio_instance = get_object_or_404(AudioBook, id=pk)\n serializer = AudioBookSerializer(audio_instance, data=data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if audio_file_type == 'podcast':\n audio_instance = get_object_or_404(Podcast, id=pk)\n \n serializer = PodcastSerializer(audio_instance, data=data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if audio_file_type == 'song':\n audio_instance = get_object_or_404(Song, id=pk)\n serializer = SongSerializer(audio_instance, data=data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_200_OK)\n except Exception:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def _create(self, sound: (list, tuple), data: dict):\n # data substitution\n # TODO: use other params\n inversion: int = data['inversion']\n single_tone: bool = data['single_tone']\n with_bass: bool = data['with_bass']\n bass_note: int = data['bass_note']\n transpose: int = data['transpose']\n octave: int = data['octave']\n pitch: float = data['pitch']\n sec: float = data['sec']\n sampling_rate: int = data['sampling_rate']\n volume_adjustment: (str, float) = data['volume_adjustment']\n title: str = data['title']\n at: str = data['at']\n\n # -*- data sanitization -*-\n # transpose range\n if transpose < -11 or 11 < transpose:\n raise ValueError('\\'transpose\\' should be between -11 and 11.')\n\n # pitch range\n if pitch < 410 or 494 < pitch:\n raise ValueError('\\'pitch\\' should be between 410 and 494.')\n\n # file title regulation\n if not re.fullmatch(r'.+?\\.wav$', title):\n title += '.wav'\n\n # wave initialization\n wave = SoundGenerator.oscillator(0, sec, sampling_rate)\n # -*- end of the data sanitization -*-\n\n # elements' frequencies\n fn = -1 # fn is a num the one before\n\n # wave synthesize\n for i in sound:\n if fn >= i:\n # 15 = 12(octave) + 3(C base-> A base convert)\n f = pitch * 2 ** ((15 + i) / 12)\n else:\n f = pitch * 2 ** ((3 + i) / 12)\n\n wave += SoundGenerator.oscillator(f, sec, sampling_rate)\n\n # memory a number the one before\n fn = i\n\n # volume controlling\n if volume_adjustment == 'auto':\n wave *= 0.1\n elif isinstance(volume_adjustment, (int, float)):\n wave *= volume_adjustment\n else:\n ValueError('\\'volume_adjustment\\' should be \\'auto\\' or float.')\n\n # wave convert\n wave = (wave * float(2 ** 15 - 1)).astype(np.int16)\n\n # path management\n if at is None:\n pth = os.path.join(os.getcwd(), title)\n else:\n os.chdir(at)\n pth = os.path.join(os.getcwd(), title)\n\n # make wave_file\n wavfile.write(pth, sampling_rate, wave)", "def open(self, path, flags, *args, **pargs):\n # We don't want FLACCue messing with actual data.\n # Only allow Read-Only access.\n if((flags | os.O_RDONLY) == 0):\n raise ValueError('Can only open files read-only.')\n raw_path = path\n path, meta = self.find_cue_path(path, verbose=self._verbose)\n # Handle the FLACCue files.\n if('.flaccuesplit.' in path):\n # Get a path to the actual file name.\n # Note that files accessed through FLACCue will\n # still read normally--we just need to trim off the song\n # times and fix the file extension.\n path, flaccue_details = path.split('.flaccuesplit.')\n path = self.clean_path(path)\n times, extension = os.path.splitext(flaccue_details)\n\n # Now get the start and end times.\n start, end = times.split('.')\n # Convert them from strings to floating point seconds.\n # Minutes:Seconds:Frames\n # 75 frames per second.\n start_split = [int(x) for x in start.split(':')]\n if(len(start_split) != 3):\n start_time = 0\n else:\n start_time = start_split[0]*60 + start_split[1] + start_split[2]/75\n end_split = [int(x) for x in end.split(':')]\n if(len(end_split) != 3):\n # Nothing longer than 10 hours.\n end_time = 3600*10\n else:\n end_time = end_split[0]*60 + end_split[1] + end_split[2]/75\n\n # Hold a file handle for the actual file.\n fd = os.open(path, flags, *args, **pargs)\n with self.rwlock:\n # If we've already processed this file and still have it in memory.\n if(raw_path in self._open_subtracks):\n if(self._open_subtracks[raw_path]['Audio'] is not None):\n # Update the stored info.\n self._open_subtracks[raw_path]['Last Access'] = time.time()\n self._open_subtracks[raw_path]['Positions'][fd] = 0\n # Return the file handle.\n return fd\n else:\n # We're still processing this track. Wait for it to finish.\n process = False\n self._open_subtracks[raw_path]['Positions'][fd] = 0\n else:\n # This is a new track to process.\n process = True\n self._open_subtracks[raw_path] = {'Positions': {fd: 0},\n 'Last Access': time.time(),\n 'Audio': None,\n }\n if(process):\n def load():\n if(self._verbose):\n print(f'Loading {raw_path}...', flush=True)\n # Otherwise, we have to process the FLAC file to extract the track.\n # Open the file with FFMPEG.\n track = ffmpeg.input(path)\n if(self._use_tempfile):\n # Use a tempfile so ffmpeg can update metadata after finishing\n # compression.\n with tempfile.TemporaryDirectory() as temp:\n filename = os.path.join(temp, f'temp.{self._format}')\n # Set the output to convert to a temporary file.\n # Trim it to start at start_time and end at end_time.\n try:\n output = track.output(filename, ss=start_time, to=end_time,\n format=self._format, compression_level=0,\n **meta)\n except TypeError:\n # compression_level not supported for the format?\n output = track.output(filename, ss=start_time, to=end_time,\n format=self._format, **meta)\n # Do the conversion.\n output.run()\n # Read the temporary file in as a bytes buffer.\n with open(filename, 'rb') as f:\n data = f.read()\n else:\n # Set the output to convert to a wave file and pipe to stdout.\n # Trim it to start at start_time and end at end_time.\n try:\n output = track.output('pipe:', ss=start_time, to=end_time,\n format=self._format, compression_level=0,\n **meta)\n except TypeError:\n # compression_level not supported for the format?\n output = track.output('pipe:', ss=start_time, to=end_time,\n format=self._format, **meta)\n # Do the conversion. Capture stdout into a buffer.\n data, _ = output.run(capture_stdout=True)\n # Convert the buffer to a numpy array. Use bytes to access just like a\n # normal file.\n # Convert the buffer to a numpy array. Use bytes to access just like a\n # normal file.\n audio = numpy.frombuffer(data, dtype=numpy.uint8)\n\n with(self.rwlock):\n # Keep a copy of the data in memory.\n self._open_subtracks[raw_path]['Last Access'] = time.time()\n self._open_subtracks[raw_path]['Audio'] = audio\n\n # Define a function that will clean up the memory use once it hasn't been\n # used for a while.\n def cleanup():\n # Wait until there has been no access to the data for 60 seconds.\n while(True):\n with(self.rwlock):\n # Do this all within the same lock to avoid potential changes\n # in between the check and deletion.\n if(time.time() - self._open_subtracks[raw_path]['Last Access'] > 60 and\n len(self._open_subtracks[raw_path]['Positions']) == 0):\n del self._open_subtracks[raw_path]\n break\n # Check every 5 seconds.\n time.sleep(5)\n if(self._verbose):\n print(f'{raw_path} closed.', flush=True)\n\n # Start a thread running that function.\n thread = threading.Thread(target=cleanup)\n thread.start()\n\n # Start a thread running that function.\n thread = threading.Thread(target=load)\n thread.start()\n # Return the file handle.\n return fd\n else:\n # With any other file, just pass it along normally.\n # This allows FLAC files to be read with a FLACCue path.\n # Note that you do not want to run this as root as this will\n # give anyone read access to any file.\n return os.open(path, flags, *args, **pargs)", "def read_audio(file_path, resample_rate=None, to_mono=False):\n return librosa.load(file_path, sr=resample_rate, mono=to_mono)", "def get(self, request, pk, format=None, *args, **kwargs):\n try:\n audio_file = kwargs.get('audioFileType','')\n audio_file_type = audio_file.lower()\n\n if audio_file_type == 'audiobook':\n obj = get_object_or_404(AudioBook, id=pk)\n serializer = AudioBookSerializer(obj)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n if audio_file_type == 'podcast':\n obj = get_object_or_404(Podcast, id=pk)\n serializer = PodcastSerializer(obj)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n if audio_file_type == 'song':\n obj = get_object_or_404(Song, id=pk)\n serializer = SongSerializer(obj)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n except Exception:\n return Response(status=status.HTTP_400_BAD_REQUEST)" ]
[ "0.600975", "0.59122187", "0.5795555", "0.5784565", "0.57556313", "0.5655955", "0.5629821", "0.562926", "0.5615369", "0.553294", "0.5507199", "0.54114854", "0.536809", "0.53627324", "0.53616863", "0.53572196", "0.53521544", "0.53502065", "0.5343416", "0.53366435", "0.53349227", "0.53318644", "0.5317694", "0.5271728", "0.52638865", "0.5258433", "0.5258399", "0.5254582", "0.5252497", "0.5234239", "0.52340114", "0.523361", "0.52292967", "0.5206333", "0.51640755", "0.515522", "0.51455295", "0.51290804", "0.5127689", "0.51234293", "0.5120464", "0.50878376", "0.50741684", "0.5044272", "0.5043305", "0.50424844", "0.5042155", "0.5042027", "0.5032014", "0.5025475", "0.50230455", "0.50215715", "0.50204414", "0.5018273", "0.50092506", "0.50060457", "0.49974182", "0.49875212", "0.49841842", "0.49810338", "0.49712765", "0.4964335", "0.49568617", "0.4930046", "0.4927271", "0.49247098", "0.49145713", "0.49144182", "0.49142948", "0.4902897", "0.49016872", "0.48982203", "0.4897648", "0.4893373", "0.4893086", "0.4884092", "0.48831213", "0.48811105", "0.48693973", "0.48688963", "0.48683387", "0.48673436", "0.48562065", "0.48485917", "0.48453072", "0.48390535", "0.48356786", "0.48298672", "0.48284853", "0.48221847", "0.48200086", "0.48169145", "0.48152483", "0.48119044", "0.48019928", "0.4799771", "0.47953877", "0.47732416", "0.47724494", "0.47676566" ]
0.80431217
0
Read the file by sector, as determined by sector_size (in frames). A sector is an array of channels, each of which is an array of magnitudes. Returns a tuple containing a generator for the sectors and the number of frames not covered by the generator.
def read_sectors(self, sector_size): in_n_sectors, in_n_extra_frames = divmod(self.get_n_frames(), sector_size) return (self.__read_sectors_generator(sector_size, in_n_sectors), in_n_extra_frames)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sectors_for_bytes(self,count):\n return (count+self.sectorsize-1)//self.sectorsize", "def read_chunks(\n file_object: \"IO\", chunk_size: int = None\n) -> \"Generator[Union[str, bytes], None, None]\":\n chunk_size = chunk_size if chunk_size and isinstance(chunk_size, int) else 2**24\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def chunkify(self, size=1024*1024*5):\n with open(self.file_name_raw, 'rb') as file:\n chunk_end = file.tell()\n while True:\n chunk_start = chunk_end\n file.seek(size, 1)\n file.readline()\n chunk_end = file.tell()\n\n if chunk_end > self.file_end:\n chunk_end = self.file_end\n yield chunk_start, chunk_end - chunk_start\n break\n else:\n yield chunk_start, chunk_end - chunk_start", "def _inner_file_reader(\r\n file_name: str,\r\n chunk_size: int = 32*1024*1024,\r\n delimiter: str = \"\\n\"):\r\n with open(file_name, 'r', encoding=\"utf8\") as f:\r\n carry_forward = \"\"\r\n chunk = \"INITIALIZED\"\r\n while len(chunk) > 0:\r\n chunk = f.read(chunk_size)\r\n augmented_chunk = carry_forward + chunk\r\n lines = augmented_chunk.split(delimiter)\r\n carry_forward = lines.pop()\r\n yield from lines\r\n if carry_forward:\r\n yield carry_forward", "def iterload(filename, chunk=100, **kwargs):\n stride = kwargs.get('stride', 1)\n atom_indices = cast_indices(kwargs.get('atom_indices', None))\n if chunk % stride != 0 and filename.endswith('.dcd'):\n raise ValueError('Stride must be a divisor of chunk. stride=%d does not go '\n 'evenly into chunk=%d' % (stride, chunk))\n if chunk == 0:\n yield load(filename, **kwargs)\n # If chunk was 0 then we want to avoid filetype-specific code in case of undefined behavior in various file parsers.\n else:\n skip = kwargs.pop('skip', 0)\n if filename.endswith('.h5'):\n if 'top' in kwargs:\n warnings.warn('top= kwarg ignored since file contains topology information')\n\n with HDF5TrajectoryFile(filename) as f:\n if skip > 0:\n xyz, _, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n if atom_indices is None:\n topology = f.topology\n else:\n topology = f.topology.subset(atom_indices)\n\n while True:\n data = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if data == []:\n raise StopIteration()\n in_units_of(data.coordinates, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(data.cell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)\n yield Trajectory(xyz=data.coordinates, topology=topology,\n time=data.time, unitcell_lengths=data.cell_lengths,\n unitcell_angles=data.cell_angles)\n\n if filename.endswith('.lh5'):\n if 'top' in kwargs:\n warnings.warn('top= kwarg ignored since file contains topology information')\n with LH5TrajectoryFile(filename) as f:\n if atom_indices is None:\n topology = f.topology\n else:\n topology = f.topology.subset(atom_indices)\n\n ptr = 0\n if skip > 0:\n xyz, _, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n xyz = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n time = np.arange(ptr, ptr+len(xyz)*stride, stride)\n ptr += len(xyz)*stride\n yield Trajectory(xyz=xyz, topology=topology, time=time)\n\n elif filename.endswith('.xtc'):\n topology = _parse_topology(kwargs.get('top', None))\n with XTCTrajectoryFile(filename) as f:\n if skip > 0:\n xyz, _, _, _ = f.read(skip)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n xyz, time, step, box = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(box, f.distance_unit, Trajectory._distance_unit, inplace=True)\n trajectory = Trajectory(xyz=xyz, topology=topology, time=time)\n trajectory.unitcell_vectors = box\n yield trajectory\n\n elif filename.endswith('.dcd'):\n topology = _parse_topology(kwargs.get('top', None))\n with DCDTrajectoryFile(filename) as f:\n ptr = 0\n if skip > 0:\n xyz, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n # for reasons that I have not investigated, dcdtrajectory file chunk and stride\n # together work like this method, but HDF5/XTC do not.\n xyz, box_length, box_angle = f.read(chunk, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(box_length, f.distance_unit, Trajectory._distance_unit, inplace=True)\n time = np.arange(ptr, ptr+len(xyz)*stride, stride)\n ptr += len(xyz)*stride\n yield Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=box_length,\n unitcell_angles=box_angle)\n\n else:\n log.critical(\"loading complete traj into mem! This might no be desired.\")\n t = load(filename, **kwargs)\n for i in range(skip, len(t), chunk):\n yield t[i:i+chunk]", "def build_chunks(read_bytes, file_size, chunk_size):\n\n chunks = []\n\n index = 0\n start = 0\n\n while start < file_size:\n end = min(start + chunk_size, file_size)\n size = end - start\n\n chunk = FileChunk(index, size, partial(read_bytes, start, size))\n chunks.append(chunk)\n\n index += 1\n start += chunk_size\n\n return chunks", "def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd", "def discover_sectors(path_to_file_under_examination,option):\n\n # Variables #\n\n start_carve_sector, end_carve_sector = [],[]\n current__cluster,_current__cluster = 0,0\n\n # Pointing to file and of file cluster total\n # number calculation\n # Different methods for raw image file\n # or for mounted drive\n\n file = open(path_to_file_under_examination, 'rb')\n\n _clusters_total = int(os.path.getsize(path_to_file_under_examination)/_cluster)\n file.seek(cluster_offset * sector)\n print('Clusters to analyse total:',str(_clusters_total),'...')\n\n # Scanning for headers and footers #\n\n while current__cluster <= _clusters_total:\n\n # This is reading one cluster and then moving\n # the pointer one further cluster\n # This approach will not find\n # NTFS resident files\n # And this will not find ZIP files,\n # which are smaller than a cluster\n # Embedded signature and time-sresponses\n # containing files are appr 13 Kb\n # So they can't really be residents\n # This approach will not find\n # non-contiguously clustered files\n\n try:\n current_cluster = file.read(_cluster)\n except Exception as e:\n return start_carve_sector, end_carve_sector\n\n current__cluster += 1\n\n # This will apply the header #\n\n #header_lenght is the lenghts required for signature to work\n beginning_string_to_analyze = current_cluster[0:header_lenght]\n result = re.search(header_hex_code,beginning_string_to_analyze)\n\n # Action if header is present #\n\n if result:\n if result.group(0):\n \n start_carve_sector.append(int(cluster_offset) # Will\n # remember where file starts\n + clusters_per_sectors * (current__cluster - 1))\n\n _current__cluster = 1\n\n while _current__cluster <= maximum_filesize: # Here is\n # administratively set max lenght\n\n # This will read next cluster and move further one cluster #\n\n current_cluster = file.read(_cluster)\n\n _current__cluster += 1\n current__cluster += 1\n\n # This will apply the footer, first to the whole cluster\n # And second to the tail of the next cluster together with the\n # current cluster\n\n result2 = re.search(footer_hex_code,current_cluster)\n if result2:\n if result2.group(0):\n if option == 'algorithm': # 'Algorithmic' read of flag for tail lenght\n if result2.span()[1] + result2.group(0)[21] + result2.group(0)[20] >= len(current_cluster):\n end_carve_sector.append(int(cluster_offset)\\\n + 1 + (clusters_per_sectors)* (current__cluster))\n # result2.group(0)[21] + result2.group(0)[20] are\n # the value of the trailer lenght\n else:\n end_carve_sector.append(int(cluster_offset)\\\n + (clusters_per_sectors)* (current__cluster))\n else:\n if result2.span()[1] == len(current_cluster):\n end_carve_sector.append(int(cluster_offset)\\\n + 1 + (clusters_per_sectors)* (current__cluster))\n else:\n end_carve_sector.append(int(cluster_offset)\\\n + (clusters_per_sectors)* (current__cluster))\n\n cluster_tail_2 = file.read(_cluster)[0:sector] #This\n # is additional cluster-read, not the same read\n joined_tail_2 = current_cluster + cluster_tail_2\n result4 = re.search(footer_hex_code,joined_tail_2)\n if result4:\n if result4.group(0):\n if result2 is None:\n if option == 'algorithm': # 'Algorithmic' read of flag for tail lenght\n if result4.span()[1] + result4.group(0)[21] + result4.group(0)[20] >= len(joined_tail_2):\n end_carve_sector.append(int(cluster_offset)\\\n + 2 + (clusters_per_sectors) * (current__cluster))\n # result4.group(0)[21] + result4.group(0)[20] are\n # the value of the trailer lenght\n else:\n end_carve_sector.append(int(cluster_offset)\\\n + 1 + (clusters_per_sectors) * (current__cluster))\n else:\n if result4.span()[1] == len(joined_tail_2):\n end_carve_sector.append(int(cluster_offset)\\\n + 2 + (clusters_per_sectors) * (current__cluster))\n else:\n end_carve_sector.append(int(cluster_offset)\\\n + 1 + (clusters_per_sectors) * (current__cluster))\n\n file.seek(cluster_offset*sector\n + current__cluster*_cluster)\n\n if result2 or result4:\n break\n destination = path_to_file_under_examination.split('\\\\')[-1]\n print('Scan complete at cluster: ' +str(current__cluster - 1)\\\n + ' ' + str(len(start_carve_sector)) +','\n + str(len(end_carve_sector)) + ' start and end sectors found in '\\\n + destination)\n file.close()\n\n return start_carve_sector,end_carve_sector", "def generate_read_list(num_files, world_size):\n return np.array_split(np.arange(num_files), world_size)", "def partial_reader(filename, chunk_size):\n try:\n file = open(filename, 'rb')\n while True:\n chunk = file.read(chunk_size)\n if not chunk:\n return\n yield chunk\n except IOError as e:\n logger.error(\"IOError: %s\" %(str(e)), exc_info=True)\n return", "def chunkedread(self, fp, chunksize):\n\n with fp.open('rb') as f:\n while True:\n chunk = f.read(chunksize)\n if not chunk:\n break\n yield (chunk)", "def read_in_chunks(file_object, chunk_size=32):\r\n while True:\r\n data = file_object.read(chunk_size)\r\n if not data:\r\n break\r\n yield data", "def run_for_sector(self,sector_number,count=1):\n return byterun(bytes=count*self.sectorsize,img_offset=sector_number * self.sectorsize)", "def read_in_chunks(file_object, chunk_size):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def get_chunks(self,file_size):\n chunk_start = 0\n chunk_size = 0xA00000 # 10485760 bytes, default max ssl buffer size\n while chunk_start + chunk_size <= file_size:\n yield(chunk_start, chunk_size)\n chunk_start += chunk_size\n final_chunk_size = file_size - chunk_start\n yield(chunk_start, final_chunk_size)", "def _read_in_chunks(self, file_object, blocksize=4096, chunks=-1,\n shard_index=None):\n i = 0\n while chunks:\n data = file_object.read(blocksize)\n if not data:\n break\n yield data\n i += 1\n\n chunks -= 1", "def read_in_chunks(filePath, chunk_size=1024*1024):\r\n file_object = open(filePath)\r\n while True:\r\n chunk_data = file_object.read(chunk_size)\r\n if not chunk_data:\r\n break\r\n yield chunk_data", "def _read_blocks(input_data, size=2**20):\n\n if isinstance(input_data, (BufferedReader, BytesIO)):\n f = input_data\n opened = False\n elif input_data == '-':\n f = sys.stdin.buffer # read binary instead of unicode\n opened = False\n else:\n f = open(input_data, 'rb')\n opened = True\n\n try:\n\n data = f.read(size)\n while len(data) > 0:\n yield data\n data = f.read(size)\n finally:\n if opened:\n f.close()", "def runs_for_sectors(self,sectors):\n\n runs = [byterun(bytes=self.sectorsize,img_offset=x*self.sectorsize) for x in sectors]\n return combine_runs(runs)", "def read_in_chunks(infile, chunk_size=1024*64):\n while True:\n chunk = infile.read(chunk_size)\n if chunk:\n yield chunk\n else:\n # The chunk was empty, which means we're at the end\n # of the file\n return", "def sectors_for_run(self,run):\n start_sector = run.img_offset/self.sectorsize\n sector_count = self.sectors_for_bytes(run.bytes)\n return range(start_sector,start_sector+sector_count)", "def read_chunks(file_object, chunk_size=1024):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def file_sync_read_chunks(radosobject, chunksize, nr, offset=0):\n radosobject.seek(offset * chunksize)\n while nr:\n remains = chunksize\n chunk = ''\n while 1:\n s = radosobject.sync_read(remains)\n if not s:\n if chunk:\n yield chunk\n return\n chunk += s\n remains -= len(s)\n if remains <= 0:\n break\n yield chunk\n nr -= 1", "def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]", "def get_lzo_blocks(lzo_file):\n\n num_compressed_chksms, num_decompressed_chksms = _parse_header(lzo_file)\n\n while True:\n decompressed_blocksize, = struct.unpack(\">I\", lzo_file.read(4))\n if decompressed_blocksize == 0:\n break\n\n compressed_blocksize, = struct.unpack(\">I\", lzo_file.read(4))\n\n num_chksms_to_skip = num_decompressed_chksms\n if decompressed_blocksize == compressed_blocksize:\n num_chksms_to_skip += num_compressed_chksms\n\n skip = 4 * num_chksms_to_skip\n\n position = lzo_file.tell()\n\n block_start = position - 8 # Rewind back to before the block headers\n next_block = position + compressed_blocksize + skip\n\n yield block_start\n\n lzo_file.seek(next_block) # Seek to the next block", "def read(self, size):\n if size % self.recordsize != 0:\n raise ValueError(\"Cannot read a non-integer number of records\")\n\n # ensure we do not read beyond end\n size = min(size, len(self.files) * self.filesize - self.offset)\n if size <= 0:\n raise EOFError('At end of file!')\n\n # allocate buffer.\n z = np.empty(size, dtype=np.int8)\n\n # read one or more pieces\n iz = 0\n while(iz < size):\n self._seek(self.offset)\n block, already_read = divmod(self.offset, self.filesize)\n fh_size = min(size - iz, self.filesize - already_read)\n z[iz:iz+fh_size] = np.fromstring(self.fh_raw.read(fh_size),\n dtype=z.dtype)\n iz += fh_size\n self.offset += fh_size\n\n return z", "def get_chunks(size):\n chunk_start = 0\n chunk_size = 0x20000\n\n while chunk_start + chunk_size < size:\n yield (chunk_start, chunk_size)\n chunk_start += chunk_size\n if chunk_size < 0x100000:\n chunk_size += 0x20000\n\n if chunk_start < size:\n yield (chunk_start, size - chunk_start)", "def __read_files(self, kind:str, prog:progress=None):\n\t\tself.readed_sectors = 0\n\t\tself.read_elapsed = 0.0\t\n\t\t\n\t\tself.__ipc_send_progress(prog, 0)\n\n\t\tself.filematrix.reset(kind=='dynamic')\n\n\t\twhile not self.filematrix.done():\t\t\n\t\t\tif (self.__check_terminated()):\n\t\t\t\treturn;\t\t\t\n\t\t\t\t\t\t\n\t\t\tfp = self.filematrix.next()\t\n\n\t\t\t####logging.info('read path:' + fp.path + ', size: ' + str(fp.size) + ', seed: ' + str(fp.rand_seed))\n\t\t\t\n\t\t\tif not os.path.exists(fp.folder):\n\t\t\t\traise_error(FileExistsError, myerror.dir_error)\n\n\t\t\tfile_time = 0.0\n\t\t\tstart = time.time()\t\t\t\n\t\t\t\n\t\t\twith iolib.fopen(fp.path, 'rd') as f:\n\t\t\t\tremain = fp.size\n\t\t\t\tfile_time = 0.0\n\t\t\t\tstart = 0.0\n\t\t\t\telapsed = 0.0\t\t\t\t\n\t\t\t\t\n\t\t\t\twhile (remain != 0):\n\t\t\t\t\tchunk_sectors = min(remain, self.max_buff_size)\t\t\t\t\t\t\t\t\t\n\t\t\t\t\texpected = self.__random_chunk_pattern(chunk_sectors, fp.rand_seed)\t\t\t\t\t\n\t\t\t\t\t#expected = self.__next_chunk_pattern(chunk_sectors)\t\n\n\t\t\t\t\tif (self.__check_terminated()):\n\t\t\t\t\t\treturn;\n\n\t\t\t\t\treal, bytesRead, elapsed = iolib.read(512 * chunk_sectors, f)\n\t\t\t\t\tfile_time += elapsed\n\t\t\t\t\t\t\t\n\t\t\t\t\tif (real != expected):\n\t\t\t\t\t\tif (self.__check_terminated()):\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\traise_exception(BaseException, myerror.pattern_error, \"compare error at the file:\" + fp.path)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\tself.readed_sectors += int(bytesRead / 512)\n\t\t\t\t\tremain = remain - chunk_sectors\n\t\t\t\t\n\t\t\t\tself.read_elapsed += file_time\t\t\n\t\t\t\ttime.sleep(0.001)\t\t\n\n\t\t\tself.__ipc_send_progress(prog, self.filematrix.get_progress())", "def read_in_chunks(file_object, chunk_size=1024):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def read_in_chunks(file_object, chunk_size=1024):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def read_in_chunks(file_object, chunk_size=1024):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def read_in_chunks(file_object, chunk_size=1024):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def read_chunk(fobj, chunk_size = 2048):\n\twhile True:\n\t\tchunk = fobj.read(chunk_size)\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk", "def iterate_chunks(file, chunk_size):\n chunk = file.read(chunk_size)\n while chunk:\n yield chunk\n chunk = file.read(chunk_size)", "def read_in_chunks(file_object, chunk_size=1024):\r\n while True:\r\n data = file_object.read(chunk_size)\r\n if not data:\r\n break\r\n yield data", "def read(self, size):\n if size % self.recordsize != 0:\n raise ValueError(\"Cannot read a non-integer number of records\")\n\n # ensure we do not read beyond end\n size = min(size, len(self.indices) * self.blocksize - self.offset)\n if size <= 0:\n raise EOFError('At end of file in MultiFile.read')\n\n # allocate buffer for MPI read\n z = np.empty(size, dtype=np.int8)\n\n # read one or more pieces\n iz = 0\n while(iz < size):\n block, already_read = divmod(self.offset, self.blocksize)\n fh_size = min(size - iz, self.blocksize - already_read)\n fh_index = self.indices[block]\n if fh_index >= 0:\n z[iz:iz+fh_size] = np.fromstring(self.fh_raw[fh_index]\n .read(fh_size), dtype=z.dtype)\n else:\n z[iz:iz+fh_size] = 0\n self.offset += fh_size\n iz += fh_size\n\n return z", "def _read_block_v20(self, size, blk_size=5):\n arr = np.empty(size)\n for offset in range(0, size, blk_size):\n values = self._next_noncomment().split()\n arr[offset:offset+blk_size] = np.array(values, dtype=np.float64)\n return arr", "def read_in_chunks(file_object, chunk_size=5000000):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def read_partpositions(filename, nspec, ctable=True, clevel=5, cname=\"lz4\", quantize=None):\n\n CHUNKSIZE = 10 * 1000\n xmass_dtype = [('xmass_%d' % (i + 1), 'f4') for i in range(nspec)]\n # note age is calculated from itramem by adding itimein\n out_fields = [\n ('npoint', 'i4'), ('xtra1', 'f4'), ('ytra1', 'f4'), ('ztra1', 'f4'),\n ('itramem', 'i4'), ('topo', 'f4'), ('pvi', 'f4'), ('qvi', 'f4'),\n ('rhoi', 'f4'), ('hmixi', 'f4'), ('tri', 'f4'), ('tti', 'f4')] + xmass_dtype\n raw_fields = [('begin_recsize', 'i4')] + out_fields + [('end_recsize', 'i4')]\n raw_rectype = np.dtype(raw_fields)\n recsize = raw_rectype.itemsize\n\n cparams = bcolz.cparams(clevel=clevel, cname=cname)\n if quantize is not None and quantize > 0:\n out = get_quantized_ctable(raw_rectype, cparams=cparams, quantize=quantize, expectedlen=int(1e6))\n else:\n out = bcolz.zeros(0, dtype=raw_rectype, cparams=cparams, expectedlen=int(1e6))\n\n with open(filename, \"rb\", buffering=1) as f:\n # The timein value is at the beginning of the file\n reclen = np.ndarray(shape=(1,), buffer=f.read(4), dtype=\"i4\")[0]\n assert reclen == 4\n itimein = np.ndarray(shape=(1,), buffer=f.read(4), dtype=\"i4\")\n reclen = np.ndarray(shape=(1,), buffer=f.read(4), dtype=\"i4\")[0]\n assert reclen == 4\n nrec = 0\n while True:\n # Try to read a complete chunk\n data = f.read(CHUNKSIZE * recsize)\n read_records = int(len(data) / recsize) # the actual number of records read\n chunk = np.ndarray(shape=(read_records,), buffer=data, dtype=raw_rectype)\n # Add the chunk to the out array\n out.append(chunk[:read_records])\n nrec += read_records\n if read_records < CHUNKSIZE:\n # We reached the end of the file\n break\n\n # Truncate at the max length (last row is always a sentinel, so remove it)\n out.trim(1)\n # Remove the first and last columns\n out.delcol(\"begin_recsize\")\n out.delcol(\"end_recsize\")\n\n if ctable:\n return out\n else:\n return out[:]", "def read(cls, file, size=BLOCK_SIZE):\n while True:\n data = file.read(size)\n\n if data:\n yield data\n else:\n raise StopIteration", "def iter_chunks(file: io.BytesIO, chunk_size: int = 64 * 1024):\n while True:\n data = file.read(chunk_size)\n if not data:\n break\n yield data", "def file_segment(target, segment_length):\n check_segment_length(segment_length)\n\n if not hasattr(target, 'read'):\n raise TypeError(\n 'Expecting an object that has read attr, received one without!'\n )\n\n while True:\n segment = list(itertools.islice(target, segment_length))\n if not segment:\n return\n yield segment", "def partition_c(n_sample, size, n_cpus):\n n_sample0 = int(math.ceil(size/n_cpus))\n if (n_sample0 > n_sample):\n n_sample = n_sample0\n\n block_id = []\n nbatch = 0\n e = 0\n while (e < size):\n s = n_sample*nbatch\n e = np.min([s + n_sample, size])\n block_id += [(s, e)]\n nbatch += 1\n return block_id, nbatch", "def chunked_reader(name):\n with open(name, \"rb\") as src:\n for chunk in iter(lambda: src.read(4096), b\"\"):\n yield chunk", "def _file_iter(f, size):\n chunk = f.read(size)\n while chunk:\n yield chunk\n chunk = f.read(size)", "def gen_chunk(self, path: pathlib.Path, chunk_size: int = 4096) \\\n -> Iterator[bytes]:\n\n with open(path, 'rb') as f:\n yield f.read(chunk_size)\n yield f.read()", "def chunk_reader(fobj, chunk_size=1024):\r\n while True:\r\n chunk = fobj.read(chunk_size)\r\n if not chunk:\r\n return\r\n yield chunk", "def chunk_reader(fobj, chunk_size=1024):\r\n while True:\r\n chunk = fobj.read(chunk_size)\r\n if not chunk:\r\n return\r\n yield chunk", "def chunk_reader(fobj, chunk_size=1024):\n\twhile True:\n\t\tchunk = fobj.read(chunk_size)\n\t\tif not chunk:\n\t\t\treturn\n\t\tyield chunk", "def read_block(file, block_size):\n block = b\"\"\n for i in range(block_size):\n this_byte = file.read(1)\n # If the last block consumed the last char in file:\n if this_byte == b\"\" and i == 0:\n return (-1, False)\n # If we reach EOF prematurely:\n elif this_byte == b\"\":\n block += chr(0).encode()*(block_size - i)\n return (block, False)\n else:\n block += this_byte\n return (block, True)", "def reader(filename,only_length=False):\n print(\"Counting lines in file %s\"%filename)\n total_lines=0\n for n,line in enumerate(open(filename,\"r\")):\n total_lines+=1\n \n if only_length:\n return total_lines\n \n X,Y,Z,W,J=[np.zeros(total_lines) for _ in range(5)]\n \n for n, line in enumerate(open(filename, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d from file %s\" %(n,total_lines,filename))\n split_line=np.array(line.split(\" \"), dtype=float) \n X[n]=split_line[0];\n Y[n]=split_line[1];\n Z[n]=split_line[2];\n W[n]=split_line[3];\n J[n]=int(split_line[4]);\n return X,Y,Z,W,J", "def nrows(filename):\n with fopen(filename, 'rb') as fdsc:\n bufgen = takewhile(lambda x: x, (fdsc.read(1024*1024) for _ in repeat(None)))\n return sum([buf.count(b'\\n') for buf in bufgen])", "def read(self, path, size, offset, fh, *args, **pargs):\n with self.rwlock:\n if(path in self._open_subtracks):\n real = False\n # Update the last accessed time.\n self._open_subtracks[path]['Last Access'] = time.time()\n # Store the requested offset.\n self._open_subtracks[path]['Positions'][fh] = offset\n else:\n real = True\n if(real):\n # For all non-FLACCue files, just access it normally.\n os.lseek(fh, offset, 0)\n return os.read(fh, size)\n # Wait for the file to finish opening.\n while(True):\n with(self.rwlock):\n self._open_subtracks[path]['Last Access'] = time.time()\n if(self._open_subtracks[path]['Audio'] is not None):\n audio = self._open_subtracks[path]['Audio']\n break\n time.sleep(0.1)\n # Return the data requested.\n if(offset > len(audio)):\n # If we're looking near the end of the file,\n # handle the fact that compression could change the size.\n reported_size = self.getattr(path)['st_size']\n if(offset < reported_size):\n offset = len(audio) - (reported_size - offset)\n return audio[offset:offset+size].tobytes()", "def gen_blocks(self, count=None):\n if not count:\n count = self.num_blk\n for x in range(0, count*32, 32):\n buf = self._read(x)\n yield x, buf", "def read_file_allocation_table(self):\n\n def construct_fat_format(num_clusters):\n return self.endian_fmt + (\n ('H' if self.fat16x else 'L') * num_clusters)\n\n fat_offset = self.byte_offset_to_physical_offset(self.fat_byte_offset)\n self.infile.seek(fat_offset)\n fat_format = construct_fat_format(self.max_clusters)\n fat_length = struct.calcsize(fat_format)\n\n LOG.debug(\"FAT Offset: %s\", POSITION)\n LOG.debug(\"FAT Length: %08x\", fat_length)\n\n fat_table = self.infile.read(fat_length)\n return [entry for entry in struct.unpack(fat_format, fat_table)]", "def _chunks2(l, cs):\n cs = [2, 4, 8, 16, 32, 64]\n ci = 0\n lin = 0\n while True:\n li = lin\n if li > (len(l) - 1):\n break\n lin = li + cs[ci]\n ci = (ci + 1) % len(cs)\n yield l[li:lin]", "def read_file_area(self, size):\n return self.infile.read(size)", "def gc_blocks(seq, block_size):\n\n # Make all capital\n seq = seq.upper()\n iterations = len(seq) // block_size\n\n # Iterate through finding the GC content\n gc = []\n for i in range(iterations):\n block = seq[i*block_size:(i+1)*block_size]\n gc.append((block.count('G') + block.count('C')) / block_size)\n return tuple(gc)", "def _get_nparts(filename,headersize,itemsize):\n return (os.path.getsize(filename)-headersize)/itemsize", "async def async_readchunks(self, size: int):\n while True:\n data = await self.read(size)\n if data:\n await yield_(data)\n else:\n return", "def full_chromosomes(reader):\n for line in reader.header.get_lines(\"contig\"):\n if line.id in CHROMS:\n name = line.id\n length = line.length or 1_000_000_000\n yield \"{}:{}-{}\".format(name, 1, length)", "def read_in_chunks(file_object, chunk_size=1000000):\n while True:\n data = [line.strip() for line in itertools.islice(file_object, chunk_size)]\n if not data:\n break\n yield data", "def sizes(self, fileids=None, categories=None):\n # Resolve the fileids and the categories\n\n # Create a generator, getting every path and computing filesize\n for path in self.abspaths(self.fileids()):\n yield os.path.getsize(path)", "def open_with_size(filename, *args):\n\n f = open_(filename, *args)\n if isinstance(f, gzip.GzipFile):\n fo = open(f.name, 'rb')\n fo.seek(-4, 2)\n r = fo.read()\n fo.close()\n return f, struct.unpack('<I', r)[0]\n else:\n f.seek(0, os.SEEK_END)\n buflen = f.tell()\n f.seek(4, os.SEEK_SET)\n return f, buflen", "def blocks_read(file, filesize):\n # core.PACKET_SIZE = getPacketSize(filesize, args.blocks)\n blocks_n = math.ceil(filesize / core.PACKET_SIZE)\n blocks = []\n\n # Read data by blocks of size core.PACKET_SIZE\n for i in range(blocks_n):\n \n data = bytearray(file.read(core.PACKET_SIZE))\n\n if not data:\n raise \"stop\"\n\n # The last read bytes needs a right padding to be XORed in the future\n if len(data) != core.PACKET_SIZE:\n data = data + bytearray(core.PACKET_SIZE - len(data))\n assert i == blocks_n-1, \"Packet #{} has a not handled size of {} bytes\".format(i, len(blocks[i]))\n\n # Paquets are condensed in the right array type\n blocks.append(np.frombuffer(data, dtype=core.NUMPY_TYPE))\n return blocks", "def chunks(cipher, size):\n\treturn [cipher[i*size:(i+1)*size] for i in range(int(math.ceil(len(cipher)*1.0/size)))]", "def _read_frames(filename: str) -> Iterator[Tuple[CritterType, numpy.ndarray]]:\n frame_skip = 0\n last_section = None\n last_frame = None\n\n good_frames: Dict[Tuple[CritterType, int], numpy.ndarray] = {}\n\n cap = cv2.VideoCapture(filename)\n while True:\n ret, frame = cap.read()\n if not ret:\n break # Video is over\n\n if frame_skip > 0:\n frame_skip -= 1\n continue\n\n if frame.shape[:2] == (1080, 1920):\n frame = cv2.resize(frame, (1280, 720))\n\n assert frame.shape[:2] == (720, 1280), \\\n 'Invalid resolution: {1}x{0}'.format(*frame.shape)\n\n if not detect(frame):\n continue # Skip frames that are not showing critterpedia.\n\n # Detect a dark line that shows up only in Pictures Mode.\n mode_detector = frame[20:24, 600:800].mean(axis=(0, 1))\n if numpy.linalg.norm(mode_detector - (199, 234, 237)) > 50:\n raise AssertionError('Critterpedia is in Pictures Mode.')\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if filename.endswith('.jpg'): # Handle screenshots\n yield _detect_critter_section(gray), frame[149:623, :]\n continue\n\n if last_frame is None:\n last_frame = frame\n continue\n\n critter_section = _detect_critter_section(gray)\n if critter_section != last_section:\n if last_section is not None:\n frame_skip = 15\n last_section = critter_section\n continue\n\n # Grab the last frame for each side and section combination.\n if last_frame[570:600, :70, 2].min() > 230:\n good_frames[critter_section, 0] = last_frame\n elif last_frame[570:600, -70:, 2].min() > 230:\n good_frames[critter_section, 1] = last_frame\n\n last_frame = frame\n\n cap.release()\n\n for (critter_type, _), frame in good_frames.items():\n # Crop the region containing critter icons.\n yield critter_type, frame[149:623, :]", "def iter_sector(self, id):\n # hard coded because I suck at math.\n # this is a map of sector id to tuples of points: \n # (starting row, starting column), (ending row, ending column)\n # where the end values are inclusive.\n sector_map = {\n 0: ((0, 0), (2, 2)),\n 1: ((3, 0), (5, 2)),\n 2: ((6, 0), (8, 2)),\n 3: ((0, 3), (2, 5)),\n 4: ((3, 3), (5, 5)),\n 5: ((6, 3), (8, 5)),\n 6: ((0, 6), (6, 8)),\n 7: ((3, 6), (5, 8)),\n 8: ((6, 6), (8, 8)),\n }\n\n sector = sector_map.get(id)\n if not sector:\n raise Exception('Invalid sector ID: {}'.format(id))\n (start_col, start_row), (end_col, end_row) = sector\n for row in range(start_row, end_row + 1):\n for col in range(start_col, end_col + 1):\n yield self.table[row][col], row, col", "def safe_read_chunk(self, name, offset=0, length=1024):\n full_path = self.path(name)\n byte_buffer = None\n l = 0\n if self.exists(name):\n with caches['default'].lock('{}_{}'.format(full_path, 'reader')):\n fd = self.__volume.fopen(full_path, 'r')\n try:\n fsize = self.__volume.getsize(full_path)\n if offset < fsize:\n rlength = fsize - offset\n l = length\n if rlength < length:\n l = rlength\n fd.lseek(offset)\n byte_buffer = fd.read(l)\n finally:\n fd.close()\n return (byte_buffer, l)", "def read_cine_protocol(series_dicom_header):\n assert len(series_dicom_header.StudyInstanceUID.unique()) == 1, 'Trying to read dicoms from multiple studies!'\n assert len(series_dicom_header.SpacingBetweenSlices.unique()) == 1\n\n SpacingBetweenSlices = list(series_dicom_header.SpacingBetweenSlices)[0]\n SliceLocations = series_dicom_header.SliceLocation.unique()\n number_of_slices = len(SliceLocations) \n\n phases_per_slice = [len(series_dicom_header[series_dicom_header.SliceLocation==SliceLocation].InstanceNumber) \n for SliceLocation in series_dicom_header.SliceLocation.unique()]\n number_of_phases = phases_per_slice[0]\n\n if len(np.unique(phases_per_slice)) != 1:\n warnings.warn('Number of phases is variable across slice locations! Could be real or error, check!.')\n return None\n \n print('Found cine study with (number_of_slices, number_of_phases)', number_of_slices, number_of_phases)\n pixel_array = pydicom.read_file(series_dicom_header.iloc[0].FileName).pixel_array\n \n sax_4D = np.zeros((pixel_array.shape +(number_of_slices, number_of_phases)), dtype=pixel_array.dtype)\n \n dicom_4D_paths = {}\n for SliceIndex, SliceLocation in enumerate(sorted(SliceLocations)):\n slice_header = series_dicom_header[series_dicom_header.SliceLocation==SliceLocation]\n dicom_4D_paths[SliceIndex] = []\n for InstanceIndex, InstanceNumber in enumerate(sorted(slice_header.InstanceNumber)):\n DicomFileName = slice_header[slice_header.InstanceNumber==InstanceNumber].FileName.item()\n dicom = pydicom.read_file(DicomFileName)\n sax_4D[:,:,SliceIndex,InstanceIndex] += dicom.pixel_array\n\n dicom_4D_paths[SliceIndex] += [DicomFileName]\n\n affine = read_affine(series_dicom_header.iloc[series_dicom_header.SliceLocation.argmin()])\n\n sax_4D = nib.Nifti1Image(sax_4D, affine=affine), \n sax_4D.SpacingBetweenSlices = SpacingBetweenSlices\n\n return sax_4D, dicom_4D_paths", "def read_in_chunks(self, file_object, chunk_size=1024):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data", "def iter_chunks(self, chunk_size, depths=True, step_size=None):\n step_size = step_size or chunk_size\n\n i = 0\n while i < self.height:\n if depths:\n yield self.img[i:i+chunk_size], self.depths[i:i+chunk_size]\n else:\n yield self.img[i:i+chunk_size]\n i += step_size", "def _iter_CUs_in_section(stream, structs, parser):\n stream.seek(0, os.SEEK_END)\n endpos = stream.tell()\n stream.seek(0, os.SEEK_SET)\n\n offset = 0\n while offset < endpos:\n header = struct_parse(parser, stream, offset)\n if header.offset_count > 0:\n offset_parser = structs.Dwarf_uint64 if header.is64 else structs.Dwarf_uint32\n header['offsets'] = struct_parse(Array(header.offset_count, offset_parser('')), stream)\n else:\n header['offsets'] = False\n yield header\n offset = header.offset_after_length + header.unit_length", "def vertical_cylinders(xy_size: int, z_depth: int, dtype=np.uint8):\n shape = (xy_size, xy_size, z_depth)\n image_size_px = shape[0] // 3\n z_depth = shape[2]\n half_atom = image_size_px // 2\n quarter_atom = image_size_px // 4\n cylinders = [\n # center cylinder, z-aligned, 64x64 radius = 16\n ((image_size_px + half_atom, image_size_px + half_atom, 0),\n (image_size_px + half_atom, image_size_px + half_atom, z_depth - 1),\n image_size_px // 4),\n # first tile overlapping to other tiles, z-aligned, 64x64 radius = 16\n ((image_size_px - quarter_atom, image_size_px - quarter_atom, 0),\n (image_size_px - quarter_atom, image_size_px - quarter_atom, z_depth - 1),\n image_size_px // 4),\n # lower middle tile overlapping to other tiles, z-aligned, 64x64 radius = 8\n ((image_size_px * 2 + quarter_atom, image_size_px + half_atom, 0),\n (image_size_px * 2 + quarter_atom, image_size_px + half_atom, z_depth - 1),\n image_size_px // 8),\n ]\n data_mask = create_cylinders_volume(shape, cylinders, foreground=1, dtype=dtype)\n return data_mask, cylinders", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def readImageDataPerByte(self, open_url):\n data = open_url.read(1024)\n while data:\n yield data\n data = open_url.read(1024)", "def sectorize(position):\r\n x, y, z = normalize(position)\r\n x, y, z = x // SECTOR_SIZE, y // SECTOR_SIZE, z // SECTOR_SIZE\r\n return (x, 0, z)", "def _read_adsc_chunk(self, chunk):\n try:\n (a_size, self._nchannels, ansamples, acquifreq,\n sampwidth, highest, lowest, zero,\n reccode, recver) = unpack(\n '<L' # a_size 4\n 'H' # _nchannels 2 (nch)\n 'L' # ansamples 4\n 'L' # acquifreq 4\n 'H' # sampwidth 2 (bps)\n 'i' # highest 4\n 'i' # lowest 4\n 'i' # zero 4\n 'H' # reccode 2\n 'H', # recver 2\n chunk.read(32)\n )\n except struct.error:\n raise EOFError from None\n self._sampwidth = (sampwidth + 7) // 8\n if not self._sampwidth:\n raise Error('bad sample width')\n if not self._nchannels:\n raise Error('bad # of channels')\n self._framesize = self._nchannels * self._sampwidth\n self._comptype = 'NONE'\n self._compname = 'not compressed'", "def bytes_in_block(block_size: int, i: int) -> slice:\n return slice(block_size * i, block_size * (i + 1))", "def sector_angles(self) -> np.ndarray:\n return self._sector_angles", "def sectorize(position):\n x, y, z = normalize(position)\n x, y, z = x // SECTOR_SIZE, y // SECTOR_SIZE, z // SECTOR_SIZE\n return (x, 0, z)", "def _read_until(self, c, chunk_size=96):\n s = io.BytesIO()\n fp = self._fp\n eof = False\n\n while True:\n chunk = fp.read(chunk_size)\n\n if not chunk:\n # The end of the file was reached. We'll bail out of the loop\n # and return everything we've read so far.\n eof = True\n break\n\n i = chunk.find(c)\n\n if i == -1:\n # We didn't find the character. Store the entire chunk.\n s.write(chunk)\n else:\n # We found the character. Store everything up to and including\n # it, and then go back in the stream for the next read.\n s.write(chunk[:i + 1])\n fp.seek(i + 1 - len(chunk), os.SEEK_CUR)\n break\n\n result = s.getvalue()\n s.close()\n\n return result, eof", "def partition_cr(n_sample, size, n_cpus):\n\n # divid the block by n_cpu partitions, with size n_sample0\n # if the divided chunk is smaller than the requested chunk n_sample\n # use the requested chunk size\n n_sample0 = int(math.ceil(np.sqrt(size*size/n_cpus/2.)))\n if (n_sample0 > n_sample):\n n_sample = n_sample0\n\n block_id=[]\n nbatch = 0\n nbatch1 = 0\n nbatch2 = 0\n e1 = 0\n while (e1 < size):\n s1 = int(n_sample*nbatch1)\n e1 = int(np.min([s1 + n_sample, size]))\n nbatch2 = nbatch1\n nbatch1 += 1\n e2 = 0\n while (e2 <size):\n s2 = int(n_sample*nbatch2)\n e2 = int(np.min([s2 + n_sample, size]))\n block_id += [(s1, e1, s2, e2)]\n nbatch2 += 1\n nbatch += 1\n\n return block_id, nbatch", "def slice_generator(\n sequence_length,\n n_blocks):\n return ((int(round((b - 1) * sequence_length/n_blocks)),\n int(round(b * sequence_length/n_blocks)))\n for b in range(1, n_blocks+1))", "async def read_chunk(self, size: int = ...) -> bytes:\n ...", "def chunks(seq, size):\n for i in range(0, len(seq), size):\n yield seq[i:i + size]", "def read_files(filenames, gram_size=1):\n assert isinstance(filenames, list), \"filenames argument must be a list\"\n parser = MorParser()\n for fn in filenames:\n for uid, speaker, ngram in generate_chunks(parser.parse(fn), gram_size):\n yield fn, uid, speaker, ngram", "def stream_channels(self, paths, chunk_size=10000000, verbose=False):\n def gen(paths, chunk_size):\n datasets = [self.file[path] for path in paths]\n length = len(datasets[0]) # Assume all are the same size\n offset = 0\n col_sizes = [dataset.shape[0] if dataset.ndim>1 else 1 for dataset in datasets]\n total_cols = sum(col_sizes)\n buf = None\n while offset < length:\n if length - offset > chunk_size:\n chunk = chunk_size\n if buf is None:\n buf = np.zeros((chunk, total_cols))\n else:\n chunk = length - offset\n buf = np.zeros((chunk, total_cols))\n verbose and print(\"batch {0} of ({2:.2%}) from {3} datasets\".format(\n offset, length, offset/length, len(datasets)))\n running_col = 0\n for (i, dst) in enumerate(datasets):\n num_cols = col_sizes[i]\n read_values = dst[offset:(offset+chunk)]\n read_values.resize((chunk, num_cols))\n buf[:, running_col:(running_col + num_cols)] = read_values\n running_col += num_cols\n yield buf\n offset += chunk\n return Stream(gen(paths, chunk_size), chunk_size=chunk_size)", "def load_cbin(filename,channel=0):\n \n # .cbin files are big endian, 16 bit signed int, hence dtype=\">i2\" below\n data = np.fromfile(filename,dtype=\">i2\")\n recfile = filename[:-5] + '.rec'\n rec_dict = readrecf(recfile)\n data = data[channel::rec_dict['num_channels']] # step by number of channels\n sample_freq = rec_dict['sample_freq']\n return data, sample_freq", "def _read_sdsc_chunk(self, chunk):\n try:\n (s_size, acronym, paraname,\n unitname, snsamples, self._framerate,\n self._s_max, self._s_min, cmax, self._czero,\n imax, fmax) = unpack(\n '<L' # s_size 4\n 'L' # acronym 4\n '80s' # paraname 80\n '16s' # unitname 16\n 'L' # snsamples 4\n 'L' # _framerate 4 (Freq)\n 'h' # s_max 2\n 'h' # s_min 2\n 'h' # cmax 2\n 'h' # _czero 2\n 'i' # imax 4\n 'L', # fmax 8\n chunk.read(128)\n )\n except struct.error:\n raise EOFError from None\n\n # handle redundant characters\n self._paraname = paraname.replace(b'\\x00', b'').decode('ascii')\n self._unitname = unitname.replace(b'\\x00', b'').decode('ascii')\n\n # Calibration setting\n self._signaldynamic = float(cmax - self._czero)\n self._valueatmax = float(imax) + fmax / float(100000)", "def get_read_length(read_fn, n_read=10000):\n with GzipFile(read_fn, mode='rb') as f:\n h = SeqIO.QualityIO.FastqGeneralIterator(f)\n i = 0\n l = []\n while i < n_read:\n try:\n t = h.next()\n l.append(len(t[1]))\n i += 1\n except StopIteration:\n logger.warning(\"Requested %d reads but reached the end of the file after %d\", n_read, i)\n return int(np.round(np.mean(l)))", "def read_block(f, offset, length, delimiter=None):\n if delimiter:\n f.seek(offset)\n seek_delimiter(f, delimiter, 2**16)\n start = f.tell()\n length -= start - offset\n\n f.seek(start + length)\n seek_delimiter(f, delimiter, 2**16)\n end = f.tell()\n\n offset = start\n length = end - start\n\n f.seek(offset)\n b = f.read(length)\n return b", "def chunks(iterator, size):\n for index in range(0, len(iterator), size):\n yield iterator[index:index + size]", "def read_in_chunks(stream, chunk_size):\n while True:\n data = stream.read(chunk_size)\n if not data:\n break\n yield BytesIO(data)", "def readcif(filename, **kwds):\n \n # Read the unit cell parameters\n a, b, c, alf, bet, gam = [[]]*6\n with open(filename, 'r') as f:\n \n for line in f:\n if \"length_a\" in line:\n a = numgrab(line)\n elif \"length_b\" in line:\n b = numgrab(line)\n elif \"length_c\" in line:\n c = numgrab(line)\n elif \"angle_alpha\" in line:\n alf = numgrab(line)\n elif \"angle_beta\" in line:\n bet = numgrab(line)\n elif \"angle_gamma\" in line:\n gam = numgrab(line)\n \n crystVec = a + b + c + alf + bet + gam\n \n # Read atomic coordinates\n cifdata = pd.read_csv(filename, delim_whitespace=True, header=None, **kwds)\n atomLabels = np.array(cifdata.values[:,0], dtype='str')\n coords = np.array(cifdata.values[:,1:4]).astype('float64')\n\n return atomLabels, coords, crystVec", "def __next_chunk_pattern(self, chunk_sectors:int):\n\t\tcurr_pat = next(self.pat_it, None)\t\t\t\n\n\t\tif (curr_pat is None):\n\t\t\tself.__reset_pat()\n\t\t\tcurr_pat = next(self.pat_it, None)\n\t\t\t\t\t\n\t\treturn bytearray(curr_pat[0:chunk_sectors * 512])", "def sector(self):\n return self.sub_sector.sector", "def get_iter(self, numPerIter=None):\n if numPerIter == None:\n numPerIter = self.chunk_size\n while True:\n els = self.read(numPerIter)[:]\n if els.shape[0] == 0:\n break\n yield els", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def buffered_read(file_object, chunk_size=1024):\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n yield data" ]
[ "0.5300185", "0.52004045", "0.5030646", "0.50120246", "0.500339", "0.49684152", "0.4958921", "0.49562106", "0.492392", "0.48962128", "0.4890391", "0.4856208", "0.48293597", "0.4815566", "0.47790107", "0.4773855", "0.47645763", "0.47498384", "0.47443956", "0.47405732", "0.4736557", "0.47355908", "0.4720226", "0.46996182", "0.46715614", "0.46624812", "0.46572974", "0.46433702", "0.46274567", "0.46274567", "0.46274567", "0.46274567", "0.46249843", "0.46239406", "0.4622397", "0.46066386", "0.4599787", "0.4597671", "0.45966822", "0.45840156", "0.45783216", "0.4575535", "0.45330063", "0.45201507", "0.4511581", "0.4510879", "0.45044363", "0.45044363", "0.45021448", "0.44855154", "0.4480472", "0.44702405", "0.44632825", "0.44551873", "0.44453713", "0.44358993", "0.4434843", "0.44283593", "0.4416504", "0.44143176", "0.44043598", "0.4398236", "0.43864375", "0.43854672", "0.43781188", "0.43765664", "0.4375272", "0.4362211", "0.43618324", "0.43608537", "0.43585956", "0.43500337", "0.43405795", "0.4339521", "0.43351662", "0.43343198", "0.4326396", "0.43237883", "0.4322093", "0.43175673", "0.431551", "0.43124518", "0.43059435", "0.43041617", "0.4294642", "0.4291619", "0.42885756", "0.42702615", "0.42689267", "0.4254956", "0.42543367", "0.4251271", "0.42487007", "0.4245328", "0.4242861", "0.4239761", "0.42356384", "0.42245343", "0.422331", "0.42109305" ]
0.798024
0
Generates DiffieHellman global parameters p and q.
def generate_dh_params(bits): # Domain parameters # Choose a prime q so that p=2q+1 is also prime acc1 = mr.estimate_accuracy(bits-1) acc2 = mr.estimate_accuracy(bits) # TODO: THis is not correct, must use discrete logarithm approach to # actually find it. See example_dh_params.py generator = 2 while True: q = mr.find_prime(bits-1, acc1) p = 2*q + 1 # used to prevent small subgroup attacks, se Stallings if mr.probably_prime(p, acc2): return q, p, generator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setup_parameters__(self):\r\n self.M=self.N+1\r\n self.u=1+self.pu\r\n self.d=1-self.pd\r\n self.qu=(math.exp((self.r-self.div)*self.dt)-self.d)/(self.u-self.d)\r\n self.qd=1-self.qu", "def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf", "def init_P_PHM_GIVEN_PHI():\n global P_PHM_GIVEN_PHI\n for i in INTERFACE_LEVEL_ACTIONS: # ui\n P_PHM_GIVEN_PHI[i] = collections.OrderedDict()\n for j in INTERFACE_LEVEL_ACTIONS: # um\n if i == j:\n # try to weight the true command more for realistic purposes. Can be offset by using a high UM_GIVEN_UI_NOISE\n P_PHM_GIVEN_PHI[i][j] = 1.0\n else:\n # P_PHM_GIVEN_PHI[i][j] = np.random.random()*UM_GIVEN_UI_NOISE#IF UM_GIVEN_UI_NOISE is 0, then the p(um|ui) is a deterministic mapping\n P_PHM_GIVEN_PHI[i][j] = 0.0\n\n delta_dist = np.array(P_PHM_GIVEN_PHI[i].values())\n uniform_dist = (1.0 / len(INTERFACE_LEVEL_ACTIONS)) * np.ones(len(INTERFACE_LEVEL_ACTIONS))\n blended_dist = (1 - PHM_GIVEN_PHI_NOISE) * delta_dist + PHM_GIVEN_PHI_NOISE * uniform_dist # np.array\n for index, j in enumerate(INTERFACE_LEVEL_ACTIONS):\n P_PHM_GIVEN_PHI[i][j] = blended_dist[index]", "def generate_parameters(self):\n self.parameters = np.zeros(self.D)\n for l in range(self.D):\n if self.p_l[l] >= np.random.uniform(0,1):\n self.parameters[l] = 1", "def genKeys(p, q):\r\n n = p * q\r\n phi = (p-1)*(q-1)\r\n #e = findE(phi, p, q)\r\n e = findE(phi)\r\n \r\n d = ext_Euclid(phi, e) #Using the extended Euclidean algorithm to compute d\r\n if (d < 0):\r\n d += phi\r\n print (\"n = \"+ str(n))\r\n print (\"phi(n) = \"+ str(phi))\r\n print (\"e = \"+ str(e))\r\n print (\"d = \"+ str(d))\r\n print\r\n return n, e, d", "def G():\n Pz=[40]\n Pp=[1,2,1]\n return Pz, Pp", "def buildQ(self):\r\n\r\n print 'Building Q ...'\r\n\r\n self.y = T.matrix('y')\r\n\r\n mlp = MLP(activations=self.hyper['q_activs'],\r\n dims=self.hyper['q_dims'],\r\n weights_init=self.hyper['q_W_init'],\r\n biases_init=Constant(0))\r\n\r\n q_parameters = mlp.apply(self.y)\r\n mlp.initialize()\r\n\r\n # self.qxgy_mu.shape == (minibatch size, num of dimension of x)\r\n self.qxgy_mu = q_parameters[:,:self.hyper['x_dim']]\r\n\r\n # self.qxgy_var.shape == (minibatch size, num of dimension of x)\r\n self.qxgy_var = T.exp( q_parameters[:,self.hyper['x_dim']:2*self.hyper['x_dim']] )\r\n\r\n # self.qwgy_mu.shape == (minibatch size, num of dimension of w)\r\n self.qwgy_mu = q_parameters[:,2*self.hyper['x_dim']:2*self.hyper['x_dim']+self.hyper['w_dim']]\r\n\r\n # self.qwgy_var.shape == (minibatch size, num of dimension of w)\r\n self.qwgy_var = T.exp( q_parameters[:,2*self.hyper['x_dim']+self.hyper['w_dim']:] )\r\n\r\n\r\n #---Will be useful to compute samples from q(x|y)---#\r\n #self.eps_x.shape == (minibatch size, # of x samples , # of dimension of x)\r\n self.eps_x = self.srng.normal((self.qxgy_mu.shape[0] ,self.hyper['L_x'] ,self.hyper['x_dim']))\r\n\r\n #self.x corresponds roughly to the function g(\\epsilon,y) (see reparametrization trick in Kingma 2014)\r\n #self.x.shape == (minibatch size, # of x samples , # of dimension of x)\r\n self.x = self.qxgy_mu.dimshuffle(0,'x',1) + T.sqrt(self.qxgy_var).dimshuffle(0,'x',1)*self.eps_x\r\n\r\n #---Will be useful to compute samples from q(w|y)---#\r\n #self.eps_w.shape == (minibatch size, # of w samples , # of dimension of w)\r\n self.eps_w = self.srng.normal((self.qwgy_mu.shape[0] ,self.hyper['L_w'] ,self.hyper['w_dim']))\r\n\r\n #self.w corresponds roughly to the function g(\\epsilon,y) (see reparametrization trick in Kingma 2014)\r\n #self.w.shape == (minibatch size, # of w samples , # of dimension of w)\r\n self.w = self.qwgy_mu.dimshuffle(0,'x',1) + T.sqrt(self.qwgy_var).dimshuffle(0,'x',1)*self.eps_w\r\n\r\n\r\n #---Building the log density q(x|y)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.x - self.qxgy_mu.dimshuffle(0,'x',1))**2/(2*self.qxgy_var.dimshuffle(0,'x',1)), axis=2)\r\n norm_cst = (2*np.pi)**(-self.hyper['x_dim']/2.)*T.exp(T.sum(T.log(self.qxgy_var), axis=1))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n qxgy = norm_cst.dimshuffle(0,'x')*T.exp(inside_exp)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n self.log_qxgy = T.log(qxgy + little_num)", "def genKeys(p, q):\n n = p*q\n phi_of_n = (p-1)*(q-1)\n e = 0\n \n #find a e less than n that is coprime with phi(n)\n count=2\n while count:\n gcd = gcd_iter(phi_of_n,count)\n if gcd==1:\n e = count\n break\n count+=1\n \n # finding the mutiplicative inverse of e and phi(n)\n d = ext_Euclid(e,phi_of_n)\n\n #positive values of d\n if d<0:\n d = phi_of_n - d\n return n,e,d", "def generation(hid_pl, f_state, eps_z, eps_x, pd, fd):\n params_prior = fd['phi_prior'](hid_pl)\n z = sample(params_prior, eps_z, 'gauss')\n phi_z = fd['phi_z'](z)\n params_out = fd['phi_dec'](phi_z, hid_pl)\n x = sample(params_out, eps_x, pd['model'])\n\n phi_x = fd['phi_x'](x)\n f_in = tf.concat([phi_x, phi_z], axis=1, name='f_theta_joint_inputs')\n f_out, f_state = fd['f_theta'](f_in, f_state)\n return x, f_out, f_state", "def buildP(self):\r\n\r\n print 'Building P ...'\r\n\r\n #---Building p(y|x)---#\r\n pygx_params_mlp = MLP(activations=self.hyper['pygx_activs'],\r\n dims=self.hyper['pygx_dims'],\r\n weights_init=self.hyper['pygx_W_init'],\r\n biases_init=Constant(0))\r\n\r\n pygx_params = pygx_params_mlp.apply(self.x.reshape((self.x.shape[0]*self.x.shape[1],self.x.shape[2])))\r\n pygx_params = pygx_params.reshape((self.x.shape[0],self.x.shape[1],2*self.hyper['y_dim']))\r\n pygx_params_mlp.initialize()\r\n\r\n # self.pygx_mu.shape == (minibatch size, L_x , num of dimension of y)\r\n self.pygx_mu = pygx_params[:,:,:self.hyper['y_dim']]\r\n\r\n # self.pygx_var.shape == (minibatch size, L_x, num of dimension of y)\r\n self.pygx_var = T.exp( pygx_params[:,:,self.hyper['y_dim']:] )\r\n\r\n\r\n #---Building graph for the density of p(y|x)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.y.dimshuffle(0,'x',1) - self.pygx_mu)**2/(2*self.pygx_var), axis=2)\r\n norm_cst = (2*np.pi)**(-self.hyper['y_dim']/2.)*T.exp(T.sum(T.log(self.pygx_var), axis=2))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n pygx = norm_cst*T.exp(inside_exp)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n self.log_pygx = T.log(pygx + little_num)\r\n\r\n #---Building NN for p(x|z=j,w) for all j---#\r\n pxgzw_mus = [None]*self.hyper['num_clust']\r\n pxgzw_vars = [None]*self.hyper['num_clust']\r\n pxgzw = [None]*self.hyper['num_clust']\r\n\r\n for j in range(self.hyper['num_clust']):\r\n\r\n pxgzw_params_mlp = MLP(activations=self.hyper['pxgzw_activs'][j],\r\n dims=self.hyper['pxgzw_dims'][j],\r\n weights_init=self.hyper['pxgzw_W_init'],\r\n biases_init=Constant(0))\r\n\r\n pxgzw_params = pxgzw_params_mlp.apply(self.w.reshape((self.w.shape[0]*self.w.shape[1],self.w.shape[2])))\r\n pxgzw_params = pxgzw_params.reshape((self.w.shape[0],self.w.shape[1], 2*self.hyper['x_dim']))\r\n pxgzw_params_mlp.initialize()\r\n\r\n # pxgzw_mus[j].shape == (minibatch size, L_w , num of dimension of x)\r\n pxgzw_mus[j] = pxgzw_params[:,:,:self.hyper['x_dim']]\r\n\r\n # pxgzw_vars[j].shape == (minibatch size, L_w, num of dimension of x)\r\n pxgzw_vars[j] = T.exp( pxgzw_params[:,:,self.hyper['x_dim']:] )\r\n\r\n #---Building graph for the density of p(x|z=j,w)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.x.dimshuffle(0,'x',1,2) - pxgzw_mus[j].dimshuffle(0,1,'x',2))**2/(2*pxgzw_vars[j].dimshuffle(0,1,'x',2)), axis=3)\r\n norm_cst = (2*np.pi)**(-self.hyper['x_dim']/2.)*T.exp(T.sum(T.log(pxgzw_vars[j]), axis=2))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of w samples (L_w), # of x samples (L_x))\r\n pxgzw[j] = norm_cst.dimshuffle(0,1,'x')*T.exp(inside_exp)\r\n\r\n\r\n # shape is (minibatch size, L_w , # of clusters , num of dimension of x)\r\n self.pxgzw_mus = T.concatenate([mu.dimshuffle(0,1,'x',2) for mu in pxgzw_mus], axis=2)\r\n # shape is (minibatch size, L_w , # of clusters , num of dimension of x)\r\n self.pxgzw_vars = T.concatenate([var.dimshuffle(0,1,'x',2) for var in pxgzw_vars], axis=2)\r\n\r\n # self.pxgzw.shape == (minibatch size, L_w, L_x, num_clust)\r\n self.pxgzw = T.concatenate([density.dimshuffle(0,1,2,'x') for density in pxgzw], axis=3)\r\n self.log_pxgzw = T.log(self.pxgzw + little_num)\r\n\r\n #---Building the p(z=j|x,w) posterior for all j---#\r\n # self.log_pzgxw.shape == (minibatch size, L_w, L_x, num_clust)\r\n self.log_pzgxw = T.log(self.pxgzw + little_num) -T.log(T.sum(self.pxgzw + little_num, axis=3).dimshuffle(0,1,2,'x'))", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def sample_parameters_given_hyper(self, gen_seed=0):\n if type(gen_seed) is not int:\n raise TypeError(\"gen_seed should be an int\")\n\n rng = random.Random(gen_seed)\n\n hypers = self.get_hypers()\n s = hypers[b's']\n r = hypers[b'r']\n nu = hypers[b'nu']\n m = hypers[b'mu']\n\n rho = rng.gammavariate(nu/2.0, s)\n mu = rng.normalvariate(m, (r/rho)**.5)\n\n assert(rho > 0)\n\n params = {'mu': mu, 'rho': rho}\n\n return params", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def random_parameters():\n res = dict()\n res[\"population_size\"] = random.randrange(2, 21)\n res[\"mutation_prob\"] = random.choice([0.02, 0.05, 0.10, 0.20, 0.30, 0.40, 0.50])\n res[\"crossover\"] = random.choice([True, False])\n res[\"selection\"] = random.choice([True, False])\n res[\"sigma\"] = random.choice([0.1, 0.25, 0.5, 1])\n res[\"crossover_method\"] = random.choice([\"single_swap\", \"uniform_swap\", \"arithmetic\"])\n res[\"selection_method\"] = random.choice([\"truncated\", \"fitness_based\", \"rank_based\"])\n res[\"best_rate\"] = random.choice([0.2, 0.3, 0.5])\n res[\"n_parents\"] = random.choice([2, 3, 4])\n res[\"elitism\"] = random.choice([True, False])\n return res", "def genDSGParams(self, z, evol='faber', Q=-0.866):\n params = np.zeros(8)\n phistar = 10 ** (-1.79574 + (-0.266409 * z))\n mstar = -20.44\n mstar0 = -20.310\n\n params[0] = 0.0156 #phistar1\n params[1] = -0.166 #alpha1\n params[2] = 0.00671 #phistar2\n params[3] = -1.523 #alpha2\n params[4] = -19.88 #mstar\n params[5] = 3.08e-5 #phistar3\n params[6] = -21.72 #M_hi\n params[7] = 0.484 #sigma_hi\n\n phistar_rat = phistar/params[0]\n mr_shift = mstar - mstar0\n params[0] *= phistar_rat\n params[2] *= phistar_rat\n params[5] *= phistar_rat\n params[4] += mr_shift\n params[6] += mr_shift\n\n if evol=='faber':\n params[4] += Q * (np.log10(z) + 1)\n params[6] += Q * (np.log10(z) + 1)\n elif evol=='a':\n params[4] += Q * (1. / (1 + z) - 1. / 1.1)\n params[6] += Q * (1. / (1 + z) - 1. / 1.1)\n elif evol=='a0':\n params[4] += Q / (1 + z)\n params[6] += Q / (1 + z)\n\n return params", "def p(self):\n return hlp.parms(self.y(0))", "def generate_variations(seed=425, th=150):\n \n # impact parameters\n M = 1e8*u.Msun\n B0 = 19.85*u.kpc\n\n V = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n potential_perturb = 1\n \n # potential parameters (log halo)\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n Bs = 20*u.kpc\n xr = Bs + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n ienc = np.argmin(np.abs(x))\n \n farray = np.array([0.1, 0.3, 0.5, 1, 2, 3, 10])\n farray = np.array([0.3,0.5,0.8,0.9,1,1.1,1.2,2,3])\n \n for e, f in enumerate(farray):\n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, (T*f).si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n par_perturb = np.array([M.si.value, 0., 0., 0.])\n dB = (B0 - Bs)\n B = dB + Bs\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, (T*f).si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n outdict = {'x': stream['x'], 'v': stream['v'], 'xi': xi, 'eta': eta, 'observer': observer, 'vobs': vobs, 'R': R, 'xi0': xioff, 'x0': stream0['x'], 'v0': stream0['v']}\n pickle.dump(outdict, open('../data/variations/vary_th{:03d}_T_{:.1f}.pkl'.format(th, f), 'wb'))\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n for e, f in enumerate(farray):\n par_perturb = np.array([f*M.si.value, 0., 0., 0.])\n dB = (B0 - Bs)\n B = dB + Bs\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, (T).si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n outdict = {'x': stream['x'], 'v': stream['v'], 'xi': xi, 'eta': eta, 'observer': observer, 'vobs': vobs, 'R': R, 'xi0': xioff, 'x0': stream0['x'], 'v0': stream0['v']}\n pickle.dump(outdict, open('../data/variations/vary_th{:03d}_M_{:.1f}.pkl'.format(th, f), 'wb'))\n \n for e, f in enumerate(farray):\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n dB = (B0 - Bs)\n B = dB*f + Bs\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n outdict = {'x': stream['x'], 'v': stream['v'], 'xi': xi, 'eta': eta, 'observer': observer, 'vobs': vobs, 'R': R, 'xi0': xioff, 'x0': stream0['x'], 'v0': stream0['v']}\n pickle.dump(outdict, open('../data/variations/vary_th{:03d}_B_{:.1f}.pkl'.format(th, f), 'wb'))\n \n theta0 = theta\n V0 = V\n \n for e, f in enumerate(farray):\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n dB = (B0 - Bs)\n B = dB + Bs\n \n vpar = Vh + np.cos(theta0.rad)*V0\n vperp = np.sin(theta0.rad)*V0\n vpar_scaled = vpar*f\n vperp_scaled = vperp*f\n \n V = np.sqrt((vpar_scaled-Vh)**2 + vperp_scaled**2)\n theta = coord.Angle(np.arctan2(vperp_scaled, vpar_scaled-Vh))\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n outdict = {'x': stream['x'], 'v': stream['v'], 'xi': xi, 'eta': eta, 'observer': observer, 'vobs': vobs, 'R': R, 'xi0': xioff, 'x0': stream0['x'], 'v0': stream0['v']}\n pickle.dump(outdict, open('../data/variations/vary_th{:03d}_V_{:.1f}.pkl'.format(th, f), 'wb'))", "def genKeys(p, q):\n # Fill in code to generate the server's public and private keys.\n # Make sure to use the Extended Euclidean algorithm...............................\n n = p * q\n phi = (p-1)*(q-1)\n #e = e_finder(n, phi)\n while True:\n e = random.randint(1, phi)\n if gcd_iter(e, phi) == 1:\n break\n d = ext_Euclid(phi, e)\n if d <0:\n d+=phi\n return n, e, d", "def Gd():\n Pz=[8]\n Pp=[1,1]\n return Pz, Pp", "def initDE(N_p,lb,ub,prob):\n\n\n\n lb = np.full(N_p,lb)\n \n ub = np.full(N_p,ub)\n \n f = np.zeros((N_p,1)) #empty vector for fitness function\n \n fu = np.zeros((N_p,1))#newly created trial vector\n\n D = len(lb) # Determining amount of decision variables\n \n U = np.zeros((N_p,D)) #Matrix for storing trial solutions \n \n #Initial random population \n P = mat.repmat(lb,N_p,1)+mat.repmat((ub-lb),N_p,1)*np.random.rand(len(ub-lb),N_p)\n \n for p in np.arange(N_p):\n f[p]=prob(P[p,])\n \n return lb,ub,f,fu,D,U,P", "def gen_parameter(self, g, ng, p):\n pass", "def initialize_parameters(n_a,n_x,n_y):\n np.random.seed(1)\n Wax=np.random.randn(n_a,n_x)*0.01 #input to hidden\n Waa=np.random.randn(n_a,n_a)*0.01 #hidden to hidden\n Wya=np.random.randn(n_y,n_a)*0.01 #hidden to output\n b=np.zeros((n_a,1)) #hidden bias\n by=np.zeros((n_y,1)) #output bias\n \n parameters={\"Wax\":Wax,\"Waa\":Waa,\"Wya\":Wya,\"b\":b,\"by\":by}\n return parameters", "def random():\n pars = dict(\n scale=10**np.random.uniform(1, 3),\n gamma=np.random.uniform(0, 6),\n q_0=10**np.random.uniform(-3, -1),\n )\n return pars", "def generate_keys(self):\n\n\t\tmin_ext = 1 << self.size_ext - 1\n\t\tmax_ext = 1 << self.size_ext\n\t\t\n\t\t\t\n\t\t# step 1 : chose random primary numbers p and q\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._p = n\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\twhile(n == self._p):\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._q = n\n\n\t\t#step 2 : compute n = pq\n\t\tself.n = self._p * self._q\n\n\t\t#step 3 : compute phi(n)\n\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t#step 4 : chose the exponent\n\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\textension = extension + self.D\n\t\twhile (gcd(self._phi,n) != 1):\n\t\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\tself._d = extension\n\n\t\t#step 5 : compute d (private key)\n\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def main(rand,mu,lamb,cxpb,mutpb,ngen,param):\n \n random.seed(rand)\n NGEN = ngen\n MU = mu\n LAMBDA = lamb\n CXPB = cxpb\n MUTPB = mutpb\n \n # Used for printing the results. It is the parameter that is changed one run from another\n if param==\"rand\" or param==\"optimal\":\n list_results=[rand]\n elif param==\"mu\":\n list_results=[mu]\n elif param==\"lamb\":\n list_results=[lamb]\n elif param==\"cross\":\n list_results=[cxpb]\n elif param==\"mutate\":\n list_results=[mutpb]\n elif param==\"ngen\":\n list_results=[ngen]\n elif param==\"original\":\n list_results=[0]\n \n # Initialization of the objects for the GA\n pop = toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean, axis=0)\n stats.register(\"std\", np.std, axis=0)\n stats.register(\"min\", np.min, axis=0)\n stats.register(\"max\", np.max, axis=0)\n\n # Run of the GA\n p,logbook=algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,\n halloffame=hof,verbose=0)\n with open(results_path+param+'_logbook.csv', 'a',newline='') as f:\n w = csv.DictWriter(f, logbook[0].keys())\n w.writeheader()\n for el in logbook:\n w.writerow(el)\n w.writerow({})\n \n # Takes the max fitness of the population from all of the runs\n max_fit=0\n max_gen=0\n for elt in logbook:\n if elt['max'][0]>max_fit:\n max_fit=elt['max'][0]\n max_gen=elt['gen']\n list_results.append(max_fit)\n list_results.append(max_gen)\n \n #TODO\n# for ind in hof:\n# dist = numpy.linalg.norm(a-b)\n\n print (\"{0} {1} {2} {3}\".format(round(list_results[1],3),round(list_results[2],3),round(list_results[0],3),hof[0]))\n current_out_writer.writerow([list_results[0],list_results[1],list_results[2],hof[0]])\n \n return pop, stats, hof", "def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)", "def __init__(self, p, q):\n self.p = p\n self.q = q\n # biais des unités d’entrée) -> dim (1xp)\n self.a = np.zeros((1, self.p))\n # biais des unités de sortie -> dim (1xq)\n self.b = np.zeros((1, self.q))\n # initialisés aléatoirement suivant une loi normale centrée, de variance égale à 0.01\n self.W = np.random.normal(loc=0, scale=0.1, size=(self.p, self.q))", "def get_prob_params():\n prob = Namespace()\n prob.study_name = STUDY_NAME\n if IS_DEBUG:\n prob.num_trials = 3\n prob.max_capital = 10\n else:\n prob.num_trials = NUM_TRIALS\n prob.max_capital = MAX_CAPITAL\n # Common\n prob.time_distro = TIME_DISTRO\n prob.num_workers = NUM_WORKERS\n _study_params = {\n 'branin': ('synthetic/branin/config_mf.json',\n branin_mf, cost_branin_mf, 0.1, 0, 1),\n 'hartmann3_2': ('synthetic/hartmann3_2/config_mf.json',\n hartmann3_2_mf, cost_hartmann3_2_mf, 0.1, 0, 1),\n 'hartmann6_4': ('synthetic/hartmann6_4/config_mf.json',\n hartmann6_4_mf, cost_hartmann6_4_mf, 0.1, 0, 1),\n 'borehole_6': ('synthetic/borehole_6/config_mf.json',\n borehole_6_mf, cost_borehole_6_mf, 1, 0, 1),\n 'park2_4': ('synthetic/park2_4/config_mf.json',\n park2_4_mf, cost_park2_4_mf, 0.3, 0, 1),\n 'park2_3': ('synthetic/park2_3/config_mf.json',\n park2_3_mf, cost_park2_3_mf, 0.1, 0, 1),\n 'park1_3': ('synthetic/park1_3/config_mf.json',\n park1_3_mf, cost_park1_3_mf, 0.5, 0, 1),\n }\n (domain_config_file_suffix, raw_func, raw_fidel_cost_func, _fc_noise_scale,\n _initial_pool_size, _) = _study_params[prob.study_name]\n domain_config_file = os.path.join(DRAGONFLY_EXPERIMENTS_DIR, domain_config_file_suffix)\n # noisy\n prob.noisy_evals = NOISY_EVALS\n if NOISY_EVALS:\n noise_type = 'gauss'\n noise_scale = _fc_noise_scale\n else:\n noise_type = 'no_noise'\n noise_scale = None\n # Create domain, function_caller and worker_manager\n config = load_config_file(domain_config_file)\n func_caller = get_multifunction_caller_from_config(raw_func, config,\n raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type,\n noise_scale=noise_scale)\n # Set max_capital\n if hasattr(func_caller, 'fidel_cost_func'):\n prob.max_capital = prob.max_capital * \\\n func_caller.fidel_cost_func(func_caller.fidel_to_opt)\n else:\n prob.max_capital = prob.max_capital\n # Store everything in prob\n prob.func_caller = func_caller\n prob.worker_manager = SyntheticWorkerManager(prob.num_workers,\n time_distro='caller_eval_cost')\n prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '')\n prob.methods = METHODS\n prob.save_results_dir = SAVE_RESULTS_DIR\n prob.reporter = get_reporter('default')\n # evaluation options\n prob.evaluation_options = Namespace(prev_eval_points='none',\n initial_pool_size=_initial_pool_size)\n return prob", "def run(self):\n if self.pp['net']:\n space = {\n # Qlearnnet\n 'net_lr': hp.loguniform('net_lr', np.log(5e-7), np.log(1e-4)),\n 'net_lr_decay': hp.loguniform('net_lr_decay', np.log(0.90), np.log(0.99)),\n # Singh\n # 'net_lr': hp.loguniform('net_lr', np.log(1e-7), np.log(5e-4)),\n 'beta': hp.uniform('beta', 16, 30),\n # Double\n 'net_copy_iter': hp.loguniform('net_copy_iter', np.log(5), np.log(150)),\n 'net_creep_tau': hp.loguniform('net_creep_tau', np.log(0.01),\n np.log(0.7)),\n # Exp. replay\n 'batch_size': scope.int(hp.uniform('batch_size', 8, 16)),\n 'buffer_size': scope.int(hp.uniform('buffer_size', 2000, 10000)),\n # N-step\n 'n_step': scope.int(hp.uniform('n_step', 3, 40)),\n # Policy\n 'vf_coeff': hp.uniform('vf_coeff', 0.005, 0.5),\n 'entropy_coeff': hp.uniform('entropy_coeff', 1.0, 100.0)\n }\n else:\n space = {\n 'beta': hp.uniform('beta', 7, 23),\n 'alpha': hp.uniform('alpha', 0.0001, 0.4),\n 'alpha_decay': hp.uniform('alpha_decay', 0.9999, 0.9999999),\n 'epsilon': hp.loguniform('epsilon', np.log(0.2), np.log(0.8)),\n 'epsilon_decay': hp.uniform('epsilon_decay', 0.9995, 0.9999999),\n 'gamma': hp.uniform('gamma', 0.7, 0.90),\n 'lambda': hp.uniform('lambda', 0.0, 1.0)\n }\n # Only optimize parameters specified in args\n space = {param: space[param] for param in self.pp['hopt']}\n if self.pp['hopt_fname'].startswith('mongo:'):\n self._hopt_mongo(space)\n else:\n self._hopt_pickle(space)", "def generate_keys(self):\n\n\t\tcondition = False\n\t\t\n\t\t\t\n\t\twhile (not condition) :\n\t\t\t# step 1 : chose random primary numbers p and q\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._p = n\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\twhile(n == self._p):\n\t\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._q = n\n\n\t\t\t#step 2 : compute n = pq\n\t\t\tself.n = self._p * self._q\n\t\t\t\n\t\t\ta = find_invpow(self.n,4) // 3\n\t\t\tcondition = (self._p > self._q) and (self._p < 2 * self._q)\n\t\t\tif (not condition) :\n\t\t\t\tcontinue\n\n\t\t\tprint(\"step one OK\")\n\n\t\t\t#step 3 : compute phi(n)\n\t\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t\t#step 4 : chose the exponent\n\t\t\tn = randint(100,a)\n\t\t\twhile (gcd(self._phi,n) != 1):\n\t\t\t\tn = randint(100,self._phi)\n\t\t\tself._d = n\n\n\t\t\t#step 5 : compute d (private key)\n\t\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\t\tcondition = (self._d < a)\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_log: np.ndarray = 4 * np.random.rand(9) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(9)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_log: np.ndarray = 4 * np.random.rand(3) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(3)) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def gen_hash_params(self):\n m_bitlength = self.m.bit_length()\n hfs = []\n for _ in xrange(self.k):\n p = generateLargePrime(m_bitlength + 7)\n a = random.randint(1, p)\n b = random.randint(1, p)\n hfs.append((a, b, p))\n return hfs", "def el_ph(om,eig,q,zs,mass,eps,rG,nmodes,nqpt,nat):\n\n # Initiate\n g = np.zeros((nqpt,nmodes),dtype=complex)\n\n # Initiate q+G\n qpG = np.zeros((nqpt,3))\n\n q_c = q[:,0:3] \n q2 = np.zeros(nqpt)\n N = 5 # Initial size of G-point grid used for sum\n\n alpha = 5.0 # Convergence parameter\n\n for nn in range(-N,N+1):\n for mm in range(-N,N+1):\n for ll in range(-N,N+1):\n #\n for ic in range(3):\n qpG[:,ic] = q_c[:,ic] + nn*rG[0,ic] + mm*rG[1,ic] + ll*rG[2,ic]\n # IMPORTANT : Put a check here that qpG is nonzero! (break the loop if so)\n # Denominator\n q2[:] = 0.0\n for ia in range(3): \n for ib in range(3):\n q2[:] += qpG[:,ia]*eps[ia,ib]*qpG[:,ib]\n # \n inv_q2 = 1.0 / (q2 + 1e-10)\n arg = np.exp(-0.25 * np.sum(qpG**2, axis=1) / alpha) * inv_q2 # exp((q+G)^2/4a)\n \n for imod in range(nmodes):\n for ia in range(3):\n for ib in range(3):\n for iat in range(nat):\n g[:,imod] += arg[:]*qpG[:,ia]*zs[iat,ia,ib]*eig[imod,:,iat,ib] \\\n / np.sqrt(2.0*mass[iat]*np.abs(om[imod,:])+1e-10)\n\n return g", "def _core_init_params(self) :\n\t\ta_list,b_list = [],[]\n\t\tg_list,h_list = [],[]\n\t\t\n\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\th_eqn = self.initsol['h'][eqn-1]\n\t\t\tg_eqn = self.initsol['g'][eqn-1]\n\n\n\t\t\ta_list.append(self.initsol['alpha'][eqn-1])\n\t\t\tb_list.append(self.initsol['beta'][eqn-1])\n\t\t\t\n\t\t\tg_eqn = np.array([g_eqn[reg-1] for reg in reg_p])\n\t\t\th_eqn = np.array([h_eqn[reg-1] for reg in reg_d])\n\t\t\th_list.append(h_eqn)\n\t\t\tg_list.append(g_eqn)\n\t\n\t\treturn (a_list,b_list,g_list,h_list)", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def __init__(self, m1=None, m2=None):\n if m1 == None and m2 == None:\n \n TMS = TwoMechSetup()\n self.m1 = TMS.m1\n self.m2 = TMS.m2\n \n else:\n #takes this path if called from PairExpt\n self.m1 = m1\n self.m2 = m2\n \"\"\" \n self.m1.rates = m1.rates\n self.m2.rates = m2.rates\n self.m1.open_states = m1.open_states\n self.m2.open_states = m2.open_states\n self.m1.N_states = m1.N_states\n self.m2.N_states = m2.N_states\n self.m1.param = m1.param\n self.m2.param = m2.param\n \"\"\"\n \n #Initialize hi and lo Qmats here\n \n self.m1.Qlo, self.m1.P_init_lo = generate_Q(\n self.m1.N_states, {1: self.m1.param.zero_conc}, self.m1.rates, \n self.m1.param.MR_rate, self.m1.param.MR_avoid)\n \n self.m1.Qhi, self.m1.P_init_hi = generate_Q(\n self.m1.N_states, {1: self.m1.param.high_conc}, self.m1.rates, \n self.m1.param.MR_rate, self.m1.param.MR_avoid)\n \n self.m2.Qlo, self.m2.P_init_lo = generate_Q(\n self.m2.N_states, {1: self.m2.param.zero_conc}, self.m2.rates, \n self.m2.param.MR_rate, self.m2.param.MR_avoid)\n \n self.m2.Qhi, self.m2.P_init_hi = generate_Q(\n self.m2.N_states, {1: self.m2.param.high_conc}, self.m2.rates, \n self.m2.param.MR_rate, self.m2.param.MR_avoid)", "def init_params():\n p = {}\n \n # p['rootFolder'] = 'C:/Users/Umberto Gostoli/SPHSU/Social Care Model II'\n # p['rootFolder'] = 'N:/Social Care Model Paper III'\n \n p['noPolicySim'] = False\n p['multiprocessing'] = True\n p['numberProcessors'] = 9\n p['numRepeats'] = 3\n \n p['startYear'] = 1860\n p['endYear'] = 2040\n p['thePresent'] = 2012\n p['statsCollectFrom'] = 1990\n p['regressionCollectFrom'] = 1960 \n p['implementPoliciesFromYear'] = 2020\n p['yearOutcome'] = 2015\n \n p['favouriteSeed'] = 123\n p['loadFromFile'] = False\n p['verboseDebugging'] = False\n p['singleRunGraphs'] = False\n p['saveChecks'] = True\n p['getCheckVariablesAtYear'] = 2015\n # To change through command-line arguments\n\n p['numberPolicyParameters'] = 2\n p['valuesPerParam'] = 1\n p['numberScenarios'] = 3\n \n ############ Policy Parameters #######################\n p['incomeCareParam'] = 0.0005 #[0.00025 - 0.001]\n p['taxBreakRate'] = 0.0\n p['ageOfRetirement'] = 65\n p['socialSupportLevel'] = 5\n # p['educationCosts']\n #############################################################\n p['socialCareCreditShare'] = 0.0\n p['maxWtWChildAge'] = 5\n # The basics: starting population and year, etc.\n \n p['discountingFactor'] = 0.03\n \n \n p['initialPop'] = 600 \n \n p['minStartAge'] = 24\n p['maxStartAge'] = 45\n p['numberClasses'] = 5\n p['socialClasses'] = ['unskilled', 'skilled', 'lower', 'middle', 'upper']\n p['initialClassShares'] = [0.2, 0.25, 0.3, 0.2, 0.05]\n p['initialUnemployment'] = [0.25, 0.2, 0.15, 0.1, 0.1]\n p['unemploymentAgeBandParam'] = 0.3\n \n # doDeath function parameters\n p['mortalityBias'] = 0.85 # After 1950\n p['careNeedBias'] = 0.9\n p['unmetCareNeedBias'] = 0.5\n p['baseDieProb'] = 0.0001\n p['babyDieProb'] = 0.005\n p['maleAgeScaling'] = 14.0\n p['maleAgeDieProb'] = 0.00021\n p['femaleAgeScaling'] = 15.5\n p['femaleAgeDieProb'] = 0.00019\n \n p['orphansRelocationParam'] = 0.5\n \n # doBirths function parameters\n p['minPregnancyAge'] = 17\n p['maxPregnancyAge'] = 42\n p['growingPopBirthProb'] = 0.215\n p['fertilityCorrector'] = 1.0\n p['fertilityBias'] = 0.9\n \n # careTransitions function parameters\n p['zeroYearCare'] = 80.0\n p['childcareDecreaseRate'] = 0.25\n p['personCareProb'] = 0.0008\n p['maleAgeCareScaling'] = 18.0 # p['maleAgeCareProb'] = 0.0008\n p['femaleAgeCareScaling'] = 19.0 # p['femaleAgeCareProb'] = 0.0008\n p['baseCareProb'] = 0.0002\n p['careBias'] = 0.9\n p['careTransitionRate'] = 0.7\n\n p['unmetNeedExponent'] = 1.0 # 0.005 #[0.005 - 0.02]\n \n p['numCareLevels'] = 5\n p['careLevelNames'] = ['none','low','moderate','substantial','critical']\n p['careDemandInHours'] = [ 0.0, 8.0, 16.0, 32.0, 80.0 ]\n p['quantumCare'] = 4.0\n \n # careSupplies getCare and probSuppliers function parameters\n \n ######## Key parameter 1 ##############\n \n \n p['weeklyHours'] = 40.0\n \n \n p['priceChildCare'] = 0.76 # 6 \n p['schoolAge'] = 5\n p['maxFormalChildcareHours'] = 48\n p['schoolHours'] = 30\n p['freeChildcareHours'] = 15\n p['workingParentsFreeChildcareHours'] = 30\n p['minAgeStartChildCareSupport'] = 3\n p['minAgeStartChildCareSupportByIncome'] = 2\n p['maxHouseholdIncomeChildCareSupport'] = 40 # 320\n \n ######## Key parameter 2 ##############\n # 5: No public supply \n \n p['retiredHours'] = [48.0, 36.0, 20.0, 10.0] # 60.0\n p['studentHours'] = [24.0, 16.0, 8.0, 4.0]\n p['teenAgersHours'] = [16.0, 0.0, 0.0, 0.0]\n p['unemployedHours'] = [32.0, 24.0, 16.0, 8.0]\n p['employedHours'] = [28.0, 20.0, 12.0, 8.0]\n p['formalCareDiscountFactor'] = 0.5\n \n p['socialNetworkDistances'] = [0.0, 1.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0]\n p['networkDistanceParam'] = 2.0\n p['socialCareWeightBias'] = 1.0\n p['unmetCareNeedDiscountParam'] = 0.5\n p['shareUnmetNeedDiscountParam'] = 0.5\n # p['pastShareUnmetNeedWeight'] = 0.5\n \n \n \n p['networkSizeParam'] = 10.0 # 1.0\n \n p['careSupplyBias'] = 0.5\n p['careIncomeParam'] = 0.001\n \n # Hospitalization Costs\n p['qalyBeta'] = 0.18\n p['qalyAlpha'] = 1.5\n p['qalyDiscountRate'] = 0.035\n p['qalyIndexes'] = [1.0, 0.8, 0.6, 0.4, 0.2]\n p['unmetCareHealthParam'] = 0.1\n p['hospitalizationParam'] = 0.5\n p['needLevelParam'] = 2.0\n p['unmetSocialCareParam'] = 2.0\n p['costHospitalizationPerDay'] = 400\n \n # ageTransitions, enterWorkForce and marketWage functions parameters\n p['ageTeenagers'] = 12\n p['minWorkingAge'] = 16\n \n ######## Key parameter 3 ##############\n \n p['careBankingSchemeOn'] = False\n p['socialCareBankingAge'] = 65\n \n p['absoluteCreditQuantity'] = False\n p['quantityYearlyIncrease'] = 0.0\n p['socialCareCreditQuantity'] = 0\n p['kinshipNetworkCarePropension'] = 0.5\n p['volunteersCarePropensionCoefficient'] = 0.01\n p['pensionContributionRate'] = 0.05\n \n p['hillHealthLevelThreshold'] = 3\n p['seriouslyHillSupportRate'] = 0.5\n \n ### Prices ####\n p['pricePublicSocialCare'] = 20.0 # [2.55] # 20\n p['priceSocialCare'] = 17.0 # [2.29] # 18\n p['taxBrackets'] = [663, 228, 0] # [28.16, 110.23] # [221, 865]\n p['taxBandsNumber'] = 3\n p['bandsTaxationRates'] = [0.4, 0.2, 0.0] # [0.0, 0.2, 0.4]\n # Tax Break Policy\n\n \n p['pensionWage'] = [5.0, 7.0, 10.0, 13.0, 18.0] # [0.64, 0.89, 1.27, 1.66, 2.29] # \n p['incomeInitialLevels'] = [5.0, 7.0, 9.0, 11.0, 14.0] #[0.64, 0.89, 1.15, 1.40, 1.78] # \n p['incomeFinalLevels'] = [10.0, 15.0, 22.0, 33.0, 50.0] #[1.27, 1.91, 2.80, 4.21, 6.37] # \n p['educationCosts'] = [0.0, 100.0, 150.0, 200.0] #[0.0, 12.74, 19.12, 25.49] # \n \n # Priced growth #####\n p['wageGrowthRate'] = 1.0 # 1.01338 # \n\n p['incomeGrowthRate'] = [0.4, 0.35, 0.35, 0.3, 0.25]\n \n # SES inter-generational mobility parameters\n p['leaveHomeStudentsProb'] = 0.5\n \n p['eduWageSensitivity'] = 0.2 # 0.5\n p['eduRankSensitivity'] = 3.0 # 5.0\n p['costantIncomeParam'] = 80.0 # 20.0\n p['costantEduParam'] = 10.0 # 10.0\n p['careEducationParam'] = 0.005 # 0.04\n \n \n \n # p['incEduExp'] = 0.25\n p['educationLevels'] = ['GCSE', 'A-Level', 'HND', 'Degree', 'Higher Degree']\n p['workingAge'] = [16, 18, 20, 22, 24]\n \n # doDivorce function parameters\n p['basicDivorceRate'] = 0.06\n p['variableDivorce'] = 0.06\n p['divorceModifierByDecade'] = [ 0.0, 1.0, 0.9, 0.5, 0.4, 0.2, 0.1, 0.03, 0.01, 0.001, 0.001, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n p['divorceBias'] = 1.0\n \n # doMarriages function parameters\n p['deltageProb'] = [0.0, 0.1, 0.25, 0.4, 0.2, 0.05]\n p['incomeMarriageParam'] = 0.025\n p['studentFactorParam'] = 0.5\n ######## Key parameter 4 ##############\n p['betaGeoExp'] = 2.0 #[1.0 - 4.0]\n \n p['betaSocExp'] = 2.0\n p['rankGenderBias'] = 0.5\n p['basicMaleMarriageProb'] = 0.9\n p['maleMarriageModifierByDecade'] = [ 0.0, 0.16, 0.5, 1.0, 0.8, 0.7, 0.66, 0.5, 0.4, 0.2, 0.1, 0.05, 0.01, 0.0, 0.0, 0.0, 0.0 ]\n \n # jobMarket, updateWork and unemploymentRate functions parameters\n p['unemploymentClassBias'] = 0.75\n p['unemploymentAgeBias'] = [1.0, 0.55, 0.35, 0.25, 0.2, 0.2]\n p['numberAgeBands'] = 6\n p['jobMobilitySlope'] = 0.004\n p['jobMobilityIntercept'] = 0.05\n p['ageBiasParam'] = [7.0, 3.0, 1.0, 0.5, 0.35, 0.15]\n p['deltaIncomeExp'] = 0.05\n p['unemployedCareBurdernParam'] = 0.025\n # Potential key parameter\n p['relocationCareLossExp'] = 1.0 # 40.0 # \n p['incomeSocialCostRelativeWeight'] = 0.5\n \n p['firingParam'] = 0.2\n p['wageVar'] = 0.06\n p['workDiscountingTime'] = 0.75 # 0.8\n p['sizeWeightParam'] = 0.7\n p['minClassWeightParam'] = 1.0\n p['incomeDiscountingExponent'] = 4.0\n p['discountingMultiplier'] = 2.0\n #p['incomeDiscountingParam'] = 2.0\n \n # relocationPensioners function parameters\n p['agingParentsMoveInWithKids'] = 0.1\n p['variableMoveBack'] = 0.1\n p['retiredRelocationParam'] = 0.001 # 0.005\n \n # houseMap function parameters\n p['geoDistanceSensitivityParam'] = 2.0\n p['socDistanceSensitivityParam'] = 2.0\n p['classAffinityWeight'] = 4.0\n p['distanceSensitivityParam'] = 0.5\n \n # relocationProb function parameters\n p['baseRelocatingProb'] = 0.05\n p['relocationParameter'] = 1.0 \n p['apprenticesRelocationProb'] = 0.5\n #p['expReloc'] = 1.0\n \n # computeRelocationCost and relocation Propensity functions parameters\n p['yearsInTownSensitivityParam'] = 0.5\n \n ######## Key parameter 5 ##############\n p['relocationCostParam'] = 0.5 # 1.0 \n \n ######## Key parameter 6 ##############\n p['propensityRelocationParam'] = 2.0 # 2.0 \n p['denRelocationWeight'] = 0.5\n \n \n ## Description of the map, towns, and houses\n p['mapGridXDimension'] = 8\n p['mapGridYDimension'] = 12 \n p['townGridDimension'] = 70\n p['cdfHouseClasses'] = [ 0.6, 0.9, 5.0 ]\n p['ukMap'] = [[ 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.1, 0.1, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 1.0, 0.5, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.4, 0.0, 0.2, 0.2, 0.4, 0.0, 0.0, 0.0 ],\n [ 0.6, 0.0, 0.0, 0.3, 0.8, 0.2, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.6, 0.8, 0.4, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 1.0, 0.8, 0.6, 0.1, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.2, 1.0, 0.6, 0.3, 0.4 ],\n [ 0.0, 0.0, 0.5, 0.7, 0.5, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 0.4, 0.6, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0 ]]\n p['ukClassBias'] = [[ 0.0, -0.05, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, 0.0, -0.05, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, 0.0, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]\n p['mapDensityModifier'] = 0.6\n # p['numHouseClasses'] = 3\n # p['houseClasses'] = ['small','medium','large']\n \n ## Graphical interface details\n p['interactiveGraphics'] = False #True\n p['delayTime'] = 0.0\n p['screenWidth'] = 1300\n p['screenHeight'] = 700\n p['bgColour'] = 'black'\n p['mainFont'] = 'Helvetica 18'\n p['fontColour'] = 'white'\n p['dateX'] = 70\n p['dateY'] = 20\n p['popX'] = 70\n p['popY'] = 50\n p['pixelsInPopPyramid'] = 2000\n p['num5YearAgeClasses'] = 28\n p['careLevelColour'] = ['blue','green','yellow','orange','red']\n p['houseSizeColour'] = ['brown','purple','yellow']\n p['pixelsPerTown'] = 56\n p['maxTextUpdateList'] = 22\n \n # p['eduEduSensitivity'] = 0.5\n # p['mortalityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['fertilityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['divorceBias'] = [2.0, 1.5, 1.0, 0.75, 0.5]\n\n ## Transitions to care statistics\n \n ## Availability of care statistics\n \n #p['childHours'] = 5.0\n # p['employedHours'] = 12.0\n #p['homeAdultHours'] = 30.0\n #p['workingAdultHours'] = 25.0\n #p['maxEmployedHours'] = 60.0\n \n #p['lowCareHandicap'] = 0.5\n #p['hourlyCostOfCare'] = 20.0\n \n ## Fertility statistics\n \n # p['steadyPopBirthProb'] = 0.13\n # p['transitionYear'] = 1965\n \n ## Class and employment statistics\n # p['numClasses'] = 5\n # p['occupationClasses'] = ['lower','intermediate','higher']\n # p['cdfOccupationClasses'] = [ 0.6, 0.9, 1.0 ]\n\n ## Age transition statistics\n # p['ageOfAdulthood'] = 17\n \n ## Marriage function parameters\n \n # p['basicFemaleMarriageProb'] = 0.25\n # p['femaleMarriageModifierByDecade'] = [ 0.0, 0.5, 1.0, 1.0, 1.0, 0.6, 0.5, 0.4, 0.1, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['femaleMarriageProb'] = [0.01, 0.15, 0.3, 0.2, 0.1, 0.1, 0.06, 0.05, 0.02, 0.01, 0.01, 0.005]\n # p['maleMarriageProb'] = [0.005, 0.08, 0.25, 0.25, 0.15, 0.1, 0.07, 0.05, 0.03, 0.02, 0.01, 0.005]\n \n ## Leaving home and moving around statistics\n # p['probApartWillMoveTogether'] = 0.3\n # p['coupleMovesToExistingHousehold'] = 0.3\n # p['basicProbAdultMoveOut'] = 0.22\n # p['probAdultMoveOutModifierByDecade'] = [ 0.0, 0.2, 1.0, 0.6, 0.3, 0.15, 0.03, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbSingleMove'] = 0.05\n # p['probSingleMoveModifierByDecade'] = [ 0.0, 1.0, 1.0, 0.8, 0.4, 0.06, 0.04, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbFamilyMove'] = 0.03\n # p['probFamilyMoveModifierByDecade'] = [ 0.0, 0.5, 0.8, 0.5, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]\n\n \n return p", "def setParams(self, p = 2):\n self.p = p\n self.l = p - 1\n self.id_ntot = {}\n self.id_y = {}\n self.id_W = {}\n self.id_X = {}\n for i in self.uniids:\n tracker = (self.data['id'] == i)\n self.id_ntot.update({i: np.sum(tracker)})\n self.id_y.update({i:\n self.data['weight'][tracker].reshape(np.sum(tracker), 1)})\n self.id_W.update({i: self._designMatrix_(p, tracker)})\n self.id_X.update({i:\n self._designMatrix_(self.l+1,tracker,is_X=True)})\n self.id_Z = self.id_W.copy()", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def getDefaultParams():\n defpar = [\n # coordinate system\n ['crd_sys', \"'sph'\", 'Coordinate system'],\n ['nx', '[60, 40, 30]', 'Number of grid points in the first dimension'],\n ['xbound', '[0.1*au, 30.*au, 110.*au, 250.*au]', 'Number of radial grid points'],\n ['ny', '[10,30, 30, 10]',\n 'Number of grid points in the second dimension'],\n ['ybound', '[0.1, pi/6., pi/2., 5.*pi/6., 3.04]',\n 'Number of radial grid points'],\n ['nz', '[361]', 'Number of grid points in the third dimension'],\n ['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],\n # star related\n ['tstar', '[3900.0]', 'Temperature of star'],\n ['mstar', '[1.0*ms]', 'Mass of the star(s)'],\n ['rstar', '[2.5*rs]', 'Radius of star'],\n # gas density \n ['Rin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['Rin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['Rout', '[30*au, 120*au]', 'outer bounding edge'],\n ['Rout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['sigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['sig0', '[1e2, 1e1]', 'surface density at Rin in g/cm^2'], \n ['ring_r', '[50*au]', 'location of gaussian ring'], \n ['ring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['ring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['ring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'], \n ['cutgdens', '1e-30', 'cut for density'], \n ['Rt', '100*au', 'radius for scale height'], \n ['Ht', '10*au', 'scale height'], \n ['qheight', '1.25', 'height power-law'], \n # gas species\n ['gasspec_mol_name', \"['12co']\", 'name of molecule'],\n ['gasspec_mol_abun', '[5e-5]', 'mass abundance '],\n ['gasspec_mol_dbase_type', \"['leiden']\", ''],\n ['gasspec_mol_freezeout_dfact', '[1e-3]',\n 'Factor by which the molecular abundance should be decreased in the freeze-out zone'],\n ['mol_freeze_Ht', '[24*au]', 'Height at Rt, with index=qheight, for freeze out to happen'],\n ['mol_freeze_del_hfrac', '0.2', 'Gaussian taper for freeze-out. del H = h * hfrac'],\n ['mol_snowR', '[20*au]', 'Radius when freeze out begins to happen'],\n # dust density\n # flat power-law parts\n ['dRin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['dRin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['dRout', '[30*au, 120*au]', 'outer bounding edge'],\n ['dRout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['dsigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['dsig0', '[1e2, 1e1]', 'surface density at Rin'],\n # Lynden-Bell parts\n ['dLB_Rin', '[0.1*au]', 'inner bounding radius'], \n ['dLB_Rsig', '[30*au]', 'charcteristic radius'],\n ['dLB_sigp', '[-1.0]', 'power-law exponent. Careful, the sign is different from the usual function by a negative sign for consistency with flat power-law'], \n ['dLB_sig0', '[1e2]', 'surface density'], \n # ring parts\n ['dring_r', '[50*au]', 'location of gaussian ring'],\n ['dring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['dring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['dring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'],\n ['cutddens', '1e-30', 'cut for dust density'],\n ['dRt', '[100*au]', 'radius for scale height for each grain size'], \n ['dHt', '[10*au]', 'scale height for each grain size'], \n ['dqheight', '[1.25]', 'scale height power-law for dust'], \n # temperature\n ['T0mid', '50', 'mid plane temperature at Rt'],\n ['T0atm', '50', 'atmosphere temperature at Rt'],\n ['zqratio', '3', 'factor of Ht of where temperature transition occurs'],\n ['qmid', '-0.5', 'midplane temperature exponent'],\n ['qatm', '-0.5', 'atmosphere temperature exponent'],\n ['hdel', '2', 'temperature transition exponent '],\n ['cuttemp', '10', 'temperature cut'], \n # alignment\n ['altype', \"'toroidal'\", 'alignment type']\n ]\n\n return defpar", "def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })", "def main(N,N_p,T,lb,ub,prob,N_vars,F_min,F_const,P_c_min,P_c_max):\n\n lb,ub,f,fu,D,U,P = initDE(N_p,lb,ub,prob)\n if N_p < 4:\n raise Exception(\"Sorry, there must be atleast a population of 4. Reccomended 20\")\n for t in np.arange(T):\n for i in np.arange(N_p):\n V = mutation(i,N_p,t,T,P,N_vars,F_min,F_const)\n\n U=crossover(f,P_c_min,P_c_max,i,D,V,P,U)\n\n for j in np.arange(N_p): \n N,f,P = boundgreed(N,j,U,P,f,fu,ub,lb,prob)\n\t\n\t\t#if N == 500:\n\t\t\t#break\n best_of_f= min(f)\n globopt = P[f.argmin()]\n return N,best_of_f, globopt[:N_vars]", "def diffy_hellman(field, a_value, b_value, point):\n a_comb, b_comb = int(), int()\n while a_comb == b_comb:\n a_comb = randint(1, sqrt(field) // 2)\n b_comb = randint(1, sqrt(field) // 2)\n print(\"Next factors have been generated:\")\n print(\"alhpha: \", a_comb)\n print(\"beta: \", b_comb)\n try:\n a_point = multiply_point(point, a_comb, field, a_value, b_value)\n b_point = multiply_point(point, b_comb, field, a_value, b_value)\n a_secret = multiply_point(b_point, a_comb, field, a_value, b_value)\n b_secret = multiply_point(a_point, b_comb, field, a_value, b_value)\n except ValueError:\n print(\"Got a point an eternity... Please, repeat DF-algorythm\")\n return\n if a_secret != b_secret:\n print(\"Something has terribly gone wrong...\")\n return\n else:\n print(\"Common secret key has been succesfully generated\")\n return a_secret", "def _compute_guess_p(rho_l, u_l, p_l, c_l, rho_r, u_r, p_r, c_r):\n quser = 2.0\n p_linearized = 0.5 * (p_l + p_r) + 0.5 * (u_l - u_r) * \\\n 0.25 * (rho_l + rho_r) * (c_l + c_r)\n p_linearized = max(0.0, p_linearized)\n p_min = min(p_l, p_r)\n p_max = max(p_l, p_r)\n qmax = p_max / p_min\n if(\n qmax <= quser and (p_min <= p_linearized and\n p_linearized <= p_max)\n ):\n \"\"\"A Primitive Variable Riemann Solver (PMRS)\"\"\"\n return p_linearized\n else:\n \"\"\"A Two-Rarefaction Riemann Solver (TRRS)\"\"\"\n if p_linearized < p_min:\n p_lr = (p_l / p_r)**gm1_2g\n u_linearized = (p_lr * u_l / c_l + u_r / c_r + (2 / gm1) *\n (p_lr - 1.0)) / (p_lr / c_l + 1.0 / c_r)\n return (\n 0.5 * (p_l * (1.0 + gm1_2 * (u_l - u_linearized) /\n c_l)**(1.0 / gm1_2g) +\n p_r * (1.0 + gm1_2 * (u_linearized - u_r) / c_r) **\n (1.0 / gm1_2g))\n )\n else:\n \"\"\"A Two-Shock Riemann Solver (TSRS)\"\"\"\n gL = sqrt(((2 / gp1) / rho_l) /\n (gm1_gp1 * p_l + p_linearized))\n gR = sqrt(((2 / gp1) / rho_r) /\n (gm1_gp1 * p_r + p_linearized))\n return (gL * p_l + gR * p_r - (u_r - u_l)) / (gL + gR)", "def get_problem():\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('Hannibal_HPAdemo')\n problem.mode='analytical' #Other options: 'numerical', 'dae'\n\n #Define independent variables\n problem.independent('t', 's')\n\n #~~~~~!!!\n #problem.quantity('terrain3','(-0.3*exp(-0.5*((x-2.7)^2+1.5*(y-2.1)^2))+2.6*exp(-0.55*(0.87*(x-6.7)^2+(y-2.2)^2))+2.1*exp(-0.27*(0.2*(x-5.5)^2+(y-7.2)^2))+1.6*(cos(0.8*y))^2*(sin(0.796*x))^2)')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','m') \\\n .state('y','V*sin(hdg)','m') \\\n\n # Define controls\n problem.control('hdg','rad')\n\n # Define Cost Functional\n problem.cost['path'] = Expression('(1-w)+w*V*conv*elev*terrain(x,y)', 's')\n\n #Define constraints\n problem.constraints().initial('x-x_0','m') \\\n .initial('y-y_0','m') \\\n .terminal('x-x_f','m') \\\n .terminal('y-y_f','m')\n\n #Define constants\n problem.constant('w',0.9,'1') #Initial Terrain weighting factor\n problem.constant('conv',1,'s/m^2') #Integral conversion factor\n problem.constant('V',1,'m/s') #Vehicle speed\n problem.constant('elev',1,'m') #Initial Elevation\n\n #Unit scaling\n problem.scale.unit('m',1) \\\n .unit('s',1) \\\n .unit('rad',1)\n\n #Configure solver\n #problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=8)\n problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=15, verbose = True, cached = False)\n\n #Initial Guess (Classic test example [4.9,0.4])\n problem.guess.setup('auto',start=[9.0,0.5], costate_guess=[0.0,-0.1]) #City A\n #problem.guess.setup('auto',start=[4.9,0.4], costate_guess=[0.1,-0.1]) #City A\n\n #Add Continuation Steps (Classic test example [7.2,8.5]) [8, 4.5]\n problem.steps.add_step(strategy='HPA',hweight=0.9) \\\n .terminal('x', 3.0, 10) \\\n .terminal('y', 9.5, 10) \\\n # .const('w', 0.9, 2, confined=True)\n\n #problem.steps.add_step(strategy='manual').num_cases(10) \\\n # .terminal('x', 3.0) \\\n # .terminal('y', 9.5) \\\n\n #problem.steps.add_step().num_cases(30) \\\n # .const('w',0.99) #Final Terrain weighting factor\n\n\n return problem", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_name: List[str] = list(itertools.compress(gX_name, list(self.channel_bool.values())[:9]))\n gX_log: np.ndarray = 4 * np.random.rand(len(gX_name)) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(len(gX_name))) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_name: List[str] = list(itertools.compress(gR_name, list(self.channel_bool.values())[9:12]))\n gR_log: np.ndarray = 4 * np.random.rand(len(gR_name)) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(len(gR_name))) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n\n if self.channel_bool['ca']:\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n param_dict.update(tCa_dict)\n\n return param_dict", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def __init__ (self, p, q):\n self.n = p * q\n self.n_sq = self.n * self.n\n self.g = self.n + 1", "def enthalpy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n h = g - temp*g_t\n return h", "def set_params_hmm_exp1(hmm) :\n\thmm.length = 12\n\thmm.dims = [(2,3)]*hmm.length # (latent,emit) dimspace\n\thmm.emit = [\n\t\t[[0.6,0.2,0.2],[0.2,0.6,0.2]]\n\t]*hmm.length\n\thmm.trans = [\n\t\t[[0.7,0.3],[0.3,0.7]]\n\t]*hmm.length\n\thmm.seqmap = [{'a':0,'b':1}]*hmm.length\n\thmm.seqmap2 = [{0:'a',1:'b'}]*hmm.length\n\thmm.featmap = [{'H':0,'B':1,'L':2}]*hmm.length\n\thmm.initprob = [0.5,0.5]\n\thmm.trained = True", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def learn_params(self, measurements, true_ranges):\n z_hit,z_short,z_max,z_rand,var_hit,lambda_short= self.params\n pre_params=[z_hit,z_short,z_max,z_rand,var_hit,lambda_short]\n updated_params=[-1,-1,-1,-1,-1,-1]\n while np.max(np.abs(np.array(updated_params) - np.array(pre_params))) > 1e-6:\n\n e_hit, e_short, e_max, e_rand = [], [], [], []\n for i in range(len(measurements)):\n true_range, measurement = true_ranges[i], measurements[i]\n p_hit = self.PHit(true_range, measurement,var_hit)\n p_short = self.PShort(true_range, measurement,lambda_short)\n p_max = self.PMax(measurement)\n p_rand = self.PRand(measurement)\n normalizer = 1.0 / (p_hit + p_short + p_max + p_rand)\n e_hit.append(normalizer * p_hit)\n e_short.append(normalizer * p_short)\n e_max.append(normalizer * p_max)\n e_rand.append(normalizer * p_rand)\n e_hit, e_short, e_max, e_rand = np.array(e_hit), np.array(e_short), np.array(e_max), np.array(e_rand)\n\n # perform M step\n pre_params = [z_hit, z_short, z_max, z_rand, var_hit,lambda_short]\n z_hit = sum(e_hit) / len(measurements)\n z_short = sum(e_short) / len(measurements)\n z_max = sum(e_max)/ len(measurements)\n z_rand = sum(e_rand) / len(measurements)\n var_hit = np.sqrt(1.0 / np.sum(e_hit) * np.sum(e_hit * (np.array(measurements)-np.array(true_ranges))**2)).item()\n lambda_short = (np.sum(e_short) / np.sum(e_short * np.array(measurements))).item()\n updated_params = [z_hit, z_short, z_max, z_rand, var_hit, lambda_short]\n print('origin',self.params)\n print('updated',updated_params)\n return updated_params", "def __init__(self, population=25, initSampling='lhc', fracMutation=0.2, fracElite=0.2, fracLevy=1.0, alpha=0.5, gamma=1, n=1, scalingFactor=10.0, penalty=0.0, maxGens=20000, maxFevals=200000, convTol=1e-06, stallLimit=10000, optConvTol=0.01, **kwargs):\n ProblemParameters_multi.__init__(self, **kwargs)\n self.population = population\n self.initSampling = initSampling\n self.fracMutation = fracMutation\n assert self.fracMutation >= 0 and self.fracMutation <= 1, 'The probability of discovery must exist on (0,1]'\n self.fracElite = fracElite\n assert self.fracElite >= 0 and self.fracElite <= 1, 'The elitism fraction must exist on (0,1]'\n self.fracLevy = fracLevy\n assert self.fracLevy >= 0 and self.fracLevy <= 1, 'The probability that a Levy flight is performed must exist on (0,1]'\n self.alpha = alpha\n self.gamma = gamma\n self.n = n\n self.scalingFactor = scalingFactor\n self.penalty = penalty\n self.maxGens = maxGens\n self.maxFevals = maxFevals\n self.convTol = convTol\n self.stallLimit = stallLimit\n self.optConvTol = optConvTol", "def params(dim):\r\n m = 3\r\n s = 1\r\n q = 2 ** (m - 1)\r\n while s < dim:\r\n m += 1\r\n s = m + math.factorial(m - 1) / (2 * math.factorial(m - 3))\r\n q = 2 ** (m - 1)\r\n\r\n return (\r\n m, q, s - dim)", "def setup(self):\n n = 0\n while n.bit_length() != self.n_len:\n p = q = 0\n while p % 4 != 3:\n p = self._gen_prime(self.n_len // 2)\n while p == q or q % 4 != 3:\n q = self._gen_prime(self.n_len // 2)\n n = p * q\n self.p = p\n self.q = q\n self.n = n", "def create_hparams(experiment):\n hparams = {}\n\n # General parameters.\n hparams['batch_size'] = 64\n hparams['eval_batch_size'] = 64\n hparams['learning_rate_warmup_steps'] = 2000\n hparams['learning_rate_constant'] = 1\n hparams['learning_rate'] = 0.001\n hparams['train_epoches'] = 200\n hparams['steps_per_epoch'] = 30\n hparams['train_steps'] = 1000 * 1000\n hparams['eval_steps'] = 100\n hparams['caption_optimizer'] = 't2t'\n hparams['clip_norm'] = 5.0\n hparams['train_files'] = ''\n hparams['eval_files'] = ''\n hparams['train_buffer_size'] = 2000\n hparams['eval_buffer_size'] = 500\n hparams['train_pixel_encoder'] = True\n hparams['debug'] = False\n hparams['distribution_strategy'] = 'mirrored'\n\n # Embedding parameters.\n hparams['embedding_file'] = ''\n hparams['word_vocab_path'] = ''\n hparams['glove_trainable'] = True\n hparams['vocab_size'] = 10000\n\n # View hierarchy encoder parameters.\n hparams['max_pixel_pos'] = 100\n hparams['max_dom_pos'] = 500\n hparams['screen_encoder'] = 'pixel_transformer'\n hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom']\n hparams['obj_text_aggregation'] = 'max'\n hparams['synthetic_screen_noise'] = 0.\n\n # General parameters.\n hparams['num_hidden_layers'] = 2\n hparams['hidden_size'] = 2\n hparams['filter_size'] = 2\n hparams['num_heads'] = 2\n hparams['dropout'] = 0.2\n hparams['layer_prepostprocess_dropout'] = 0.2\n hparams['attention_dropout'] = 0.2\n hparams['relu_dropout'] = 0.2\n\n transformer_hparams = model_params.BASE_PARAMS\n\n # Add parameters from transformer model.\n hparams.update(transformer_hparams)\n\n # Rewrite all the parameters from command-line flags.\n config = screen2words_experiment_config.experiments[experiment]\n hparams.update(config)\n\n return hparams", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu", "def generate_data(params, N, rng=(-7, 7)):\n hp = np.array(params)\n print(\"parameters for data generated from gp are : {0}\".format(hp))\n print(\"using a ExpSquared kernel\")\n gp = george.GP(hp[0] * kernels.ExpSquaredKernel(hp[1]))\n t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))\n\n #y = model(params, t)\n y = gp.sample(t)\n yerr = 1.e-5 #1 + 0.1 * np.random.randn(N)\n y += yerr\n\n return t, y, yerr", "def global_parameter_space():\n return [list(range(7, 22)),\n list(range(12, 27)),\n list(range(25, 40)),\n list(permutations(range(1, 5)))]", "def paramshuman(chrom, outdir, alpha_inter, gamma_inter, p_a, p_b, seed, diag, filter_high):\n simulatehuman.cmd_estimate_params(chrom, outdir, alpha_inter=alpha_inter, gamma_inter=gamma_inter,\n p_a=p_a, p_b=p_b, seed=seed, diag=diag, filter_high=filter_high,\n plot=True)", "def params(timeseries_input):\n # Settings for Nelder Mead Algorithm\n global timeseries\n timeseries=timeseries_input\n\n NumIters = 1 # First Iteration\n MaxIters = 1e3 # Maximum number of iterations\n Tolerance = 1e-5 # Tolerance on best and worst function values\n N = 5 # Number of Heston and Nandi parameters\n r = 0.01 / 252.0 # Risk Free Rate\n\n # Heston and Nandi parameter starting values (vertices) in vector form\n\n x = [[0 for i in range(N + 1)] for j in range(N)]\n x[0][0] = 5.02e-6;\n x[0][1] = 5.12e-6;\n x[0][2] = 5.00e-6;\n x[0][3] = 4.90e-6;\n x[0][4] = 4.95e-6;\n x[0][5] = 4.99e-6 # omega\n x[1][0] = 1.32e-6;\n x[1][1] = 1.25e-6;\n x[1][2] = 1.35e-6;\n x[1][3] = 1.36e-6;\n x[1][4] = 1.30e-6;\n x[1][5] = 1.44e-6 # alpha\n x[2][0] = 0.79;\n x[2][1] = 0.80;\n x[2][2] = 0.78;\n x[2][3] = 0.77;\n x[2][4] = 0.81;\n x[2][5] = 0.82 # beta\n x[3][0] = 427.0;\n x[3][1] = 421.0;\n x[3][2] = 425.0;\n x[3][3] = 419.1;\n x[3][4] = 422.1;\n x[3][5] = 430.0 # gamma\n x[4][0] = 0.21;\n x[4][1] = 0.20;\n x[4][2] = 0.22;\n x[4][3] = 0.19;\n x[4][4] = 0.18;\n x[4][5] = 0.205 # lambda\n\n # Run Nelder Mead and output Nelder Mead results\n B = NelderMead(LogLike, N, NumIters, MaxIters, Tolerance, x, r)\n\n #\tprint(\"Nelder Mead Minimization of Log-Likelihood for Heston and Nandi parameters\")\n #\tprint(\"---------------------------------\")\n #\tprint(\"omega = \", B[0])\n #\tprint(\"alpha = \", B[1])\n #\tprint(\"beta = \", B[2])\n #\tprint(\"gamma = \", B[3])\n #\tprint(\"lambda = \", B[4])\n #\tprint(\"Value of Objective Function = \", B[N])\n #\tprint(\"Number of Iterations = \", B[N+1])\n #\tprint(\"Persistence \", B[2]+B[1]*(B[3]**2) )\n #\tprint(\"---------------------------------\")\n\n # alpha,beta,gamma,omega,lambda\n return [B[1], B[2], B[3], B[0], B[4]]", "def getParameters():\r\n\r\n #dictionnary of all parameters\r\n parameters = {}\r\n\r\n #defalt values\r\n parameters['numChannels'] = [20, 'int'] # number of channels\r\n parameters['loFreq'] = [200, 'int'] # lower bound on frequencies\r\n parameters['hiFreq'] = [5000, 'int'] # upper bound on frequencies\r\n parameters['plotChannels'] = [False, 'boolean'] # if it should plot the Gammatone channels\r\n parameters['block_time'] = [10.0, 'float'] # in ms\r\n parameters['block_shift'] = [1.0, 'float'] # in ms\r\n parameters['selectChannels'] = [6, 'int'] # number of channels to activate at a single time\r\n\r\n # print values in console\r\n print('\\nHere are the default values for the parameters:')\r\n for key, val in parameters.items():\r\n print('{} = {}'.format(key, val[0]))\r\n\r\n # ask user if he wants to change them\r\n response = ''\r\n while(response not in {'y', 'n'}):\r\n response = input('Do you want to change the parameters? (y/n) ')\r\n\r\n if(response=='n'):\r\n return parameters\r\n \r\n description = \"\"\"\r\n numChannels (int): the number of channels/frequencies/electrodes to simulate\r\n loFreq (int): the lower bound on the frequencies (in Hz)\r\n hiFreq (int): the upper bound on frequencies (in Hz)\r\n plotChannels (boolean): if the Gammatone channels should be plotted\r\n block_time (float): length of blocks of time over whch to integrate (in ms)\r\n block_shift (float): length of shift between blocks (in ms)\r\n selectChannels (int): number of channels/frequencies to activate at a single time\r\n\r\n \"\"\"\r\n\r\n #ask if he wants to read description\r\n response = ''\r\n while(response not in {'y', 'n'}):\r\n response = input('Do you want to read the description of the parameters? (y/n) ')\r\n\r\n if(response == 'y'):\r\n print(description)\r\n\r\n #change the parameters\r\n for key, val in parameters.items():\r\n user_input = input('{} = '.format(key))\r\n if(parameters[key][1] == 'int'):\r\n parameters[key][0] = int(user_input)\r\n elif(parameters[key][1] == 'float'):\r\n parameters[key][0] = float(user_input)\r\n elif(parameters[key][1] == 'boolean'):\r\n parameters[key][0] = (user_input == 'True')\r\n \r\n return parameters", "def get_initial_params(self, x, y, yerr):\n# p0 = zeros(self.max_phonons + 1)\n p0 = zeros(2)\n p0[0] = 100\n p0[1] = .1\n return p0", "def __init__ (self, p, q):\n self.lamb = (p-1) * (q-1)\n self.mu = modinv(self.lamb, (p * q))", "def config_params1(parameter):\n\n p = parameter['p']\n q = parameter['q']\n d = parameter['d']\n m = parameter['m']\n pdq_m = list(itertools.product(p, d, q,m)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def lfads_params(key, lfads_hps):\n keys = random.split(key, 9)\n\n data_dim = lfads_hps['data_dim']\n ntimesteps = lfads_hps['ntimesteps']\n enc_dim = lfads_hps['enc_dim']\n con_dim = lfads_hps['con_dim']\n ii_dim = lfads_hps['ii_dim']\n gen_dim = lfads_hps['gen_dim']\n factors_dim = lfads_hps['factors_dim']\n batch_size = lfads_hps['batch_size']\n ic_dim = enc_dim # Could make a different HP via a linear layer\n ib_dim = lfads_hps['ib_dim'] # inferred bias is a static input to generator\n z_dim = ic_dim + ib_dim + ntimesteps * ii_dim\n gmm_size = lfads_hps['gmm_size']\n\n ic_enc_params = {'fwd_rnn' : gru_params(keys[0], enc_dim, data_dim),\n 'bwd_rnn' : gru_params(keys[1], enc_dim, data_dim)}\n post_ib_params = affine_params(keys[2], 2*ib_dim, 2*enc_dim) # m, v <- bi \n post_ic_params = affine_params(keys[3], 2*gen_dim, 2*enc_dim) # m, v <- bi\n \n prior_params = gmm_params(keys[4], gmm_size, z_dim)\n con_params = gru_params(keys[5], con_dim, 2*enc_dim + factors_dim + ii_dim)\n con_out_params = affine_params(keys[6], 2*ii_dim, con_dim) #m, v\n gen_params = gru_params(keys[7], gen_dim, ii_dim + ib_dim)\n factors_params = linear_params(keys[8], factors_dim, gen_dim)\n lograte_params = affine_params(keys[9], data_dim, factors_dim)\n\n return {'ic_enc' : ic_enc_params,\n 'post_ib' : post_ib_params,\n 'post_ic' : post_ic_params,\n 'con' : con_params, 'con_out' : con_out_params,\n 'gmm' : prior_params,\n 'gen' : gen_params, 'factors' : factors_params,\n 'f0' : np.zeros((lfads_hps['factors_dim'],)),\n 'ii0' : np.zeros((lfads_hps['ii_dim'],)),\n 'logrates' : lograte_params}", "def _starting_hparams():\n hparams = contrib_training.HParams()\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('learning_rate', 0.0005)\n hparams.add_hparam('lr_decay_rate', .997)\n hparams.add_hparam('lr_decay_steps', 1000)\n hparams.add_hparam('lr_warmup_steps', 3000)\n hparams.add_hparam('model_type', 'cnn')\n hparams.add_hparam('resnet_bottleneck_factor', 0.5)\n hparams.add_hparam('decision_threshold', 0.5)\n hparams.add_hparam('denominator_power', 1.0) # Standard mean-pooling.\n return hparams", "def obtain_training_parameters(para, x, y, alg = 'LR'):\n \n \n global omega\n \n # Iterate to find the optimal parameters\n if alg == 'LR': # logistic regression\n omega = np.zeros((3, 1))\n alpha = para.step_size # step size\n for i in range(para.iteration):\n grad = np.zeros((3, 1))\n for i in range(len(x[:, 0])):\n grad += np.reshape(x[i, :], (3, 1)) * (-y[i] + 1 / (1 + np.exp(-np.dot(x[i, :], omega))))\n omega -= alpha * grad \n \n elif alg == 'GNB': # Gaussian Naive Bayes\n # get counts for each class\n itszero = 0\n itsone = 0\n for i in range(len(y)):\n if y[i] == 1:\n itsone += 1\n else:\n itszero += 1\n \n # probability of see y\n theta0 = itszero / len(y)\n theta1 = 1 - theta0\n \n # mean of omega\n mew00 = 0\n mew01 = 0\n mew02 = 0\n mew10 = 0\n mew11 = 0\n mew12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n mew00 += x[i, 0] / itszero\n mew01 += x[i, 1] / itszero\n mew02 += x[i, 2] / itszero\n else:\n mew10 += x[i, 0] / itsone\n mew11 += x[i, 1] / itsone\n mew12 += x[i, 2] / itsone\n \n # variance of omega \n sigma00 = 0\n sigma01 = 0\n sigma02 = 0\n sigma10 = 0\n sigma11 = 0\n sigma12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n sigma00 += (x[i, 0] - mew00)**2 / itszero\n sigma01 += (x[i, 1] - mew01)**2 / itszero\n sigma02 += (x[i, 2] - mew02)**2 / itszero\n else:\n sigma10 += (x[i, 0] - mew10)**2 / itsone\n sigma11 += (x[i, 1] - mew11)**2 / itsone\n sigma12 += (x[i, 2] - mew12)**2 / itsone\n \n # store these parameters into the name \"omage\"\n omega = [theta0, theta1, mew00, mew01, mew02, mew10, mew11, mew12,\n sigma00, sigma01, sigma02, sigma10, sigma11, sigma12] \n \n else: # Gaussian Mixture\n pass\n \n return omega", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def evolveDSGParams(self, p, Q, Pe=None, evol='faber'):\n zmeans = ( self.zbins[1:] + self.zbins[:-1] ) / 2\n\n par = np.zeros((len(zmeans), 8))\n\n for i, z in enumerate(zmeans):\n par[i,:] = copy(p)\n\n if evol=='faber':\n par[i,4] += Q * (np.log10(z) + 1)\n par[i,6] += Q * (np.log10(z) + 1)\n elif evol=='z':\n par[i,4] += Q * (z - 0.1)\n par[i,6] += Q * (z - 0.1)\n if Pe!=None:\n par[i,0] *= 1 + Pe * (z - 0.3)\n par[i,2] *= 1 + Pe * (z - 0.3)\n par[i,5] *= 1 + Pe * (z - 0.3)\n elif evol=='a':\n par[i,4] += Q * (1. / (1 + z) - 1. / 1.1)\n par[i,6] += Q * (1. / (1 + z) - 1. / 1.1)\n\n return par", "def _init_params(self):\n self.W_ems = []\n self.b_ems = []\n if self.rank_n_approx:\n W_em1 = self.init_fn[0](self.n_in,\n self.rank_n_approx,\n self.sparsity[0],\n self.scale[0],\n self.rng)\n W_em2 = self.init_fn[0](self.rank_n_approx,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em1 = theano.shared(W_em1,\n name='W1_0_%s'%self.name)\n self.W_em2 = theano.shared(W_em2,\n name='W2_0_%s'%self.name)\n self.W_ems = [self.W_em1, self.W_em2]\n\n else:\n W_em = self.init_fn[0](self.n_in,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em = theano.shared(W_em,\n name='W_0_%s'%self.name)\n self.W_ems = [self.W_em]\n\n self.b_em = theano.shared(\n self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),\n name='b_0_%s'%self.name)\n self.b_ems = [self.b_em]\n\n for dx in range(1, self.n_layers):\n W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n self.rng)\n W_em = theano.shared(W_em, name='W_%d_%s'%(dx,self.name))\n self.W_ems += [W_em]\n\n b_em = theano.shared(\n self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),\n name='b_%d_%s'%(dx,self.name))\n self.b_ems += [b_em]\n\n self.params = [x for x in self.W_ems]\n\n if self.learn_bias and self.learn_bias!='last':\n self.params = [x for x in self.W_ems] + [x for x in self.b_ems]\n elif self.learn_bias == 'last':\n self.params = [x for x in self.W_ems] + [x for x in\n self.b_ems][:-1]\n self.params_grad_scale = [self._grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]\n self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]\n\n self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]", "def get_parameters(self):\n d = Algorithm.get_parameters(self)\n d.update({\n 'M': d.pop('population_size', self.population_size),\n 'num_tests': self.num_tests,\n 'num_searches': self.num_searches,\n 'num_searches_best': self.num_searches_best,\n 'bonus1': self.bonus1,\n 'bonus2': self.bonus2,\n 'num_enabled': self.num_enabled,\n 'local_searches': self.local_searches\n })\n return d", "def simulate_parameters(alg=\"IID_LINEAR\"):\n param_dict = dict()\n\n param = getfullargspec(DAG.erdos_renyi)\n update_param(param, param_dict)\n\n if alg == \"EVENT\":\n param = getfullargspec(Topology.erdos_renyi)\n update_param(param, param_dict, prefix=\"Topology_\")\n\n param = getfullargspec(THPSimulation.simulate)\n update_param(param, param_dict, prefix=\"THPSimulation_simulate_\")\n\n param = getfullargspec(THPSimulation.__init__)\n else:\n param = getfullargspec(IIDSimulation.__init__)\n update_param(param, param_dict, alg, prefix=\"\")\n return param_dict", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def sbm_pp_parameters(k, p, q, sigma=0):\n omega = (p-q)*np.eye(k) + q*np.ones([k, k])\n\n pert = sigma*np.random.randn(*omega.shape)\n pert = np.tril(pert) + np.tril(pert, -1).T\n omega += pert\n np.maximum(omega, 0, omega)\n\n return omega", "def calc_parameters(T, N, sigma, r, div):\n dt = T/N\n u = np.exp(sigma*np.sqrt(dt))\n d = 1/u\n b = r-div\n q = 1/2 + 1/2 * (b - 1/2 * sigma**2)*np.sqrt(dt)/sigma # P(up movement)\n return(dt, u, d, q, b)", "def load_hyperparams():\n #Load halo data (encoding='latin1' for Python3)\n with open('../Data/halo_data.pkl', 'rb') as halo_input:\n halo_data = pickle.load(halo_input, encoding='latin1')\n\n #Load interpolator\n with open('../Data/interpolator.pkl', 'rb') as interp:\n vpeak_Mr_interp = pickle.load(interp, encoding='latin1')\n\n #Cosmological params\n cosmo_params = {}\n cosmo_params['omega_b'] = 0.0 \n cosmo_params['omega_m'] = 0.286\n cosmo_params['h'] = 0.7\n\n #hyperparameters\n hparams = {}\n hparams['mpeak_cut'] = 10**7\n hparams['vpeak_cut'] = 10.\n hparams['vmax_cut'] = 9.\n hparams['orphan_radii_cut'] = 300.\n hparams['chi'] = 1.\n hparams['R0'] = 10.0\n hparams['gamma_r'] = 0.0\n hparams['beta'] = 0.\n hparams['O'] = 1.\n hparams['n_realizations'] = 5\n\n #prior hyperparameters\n prior_hparams = {}\n prior_hparams['alpha'] = np.array([-2.,-1.1])\n prior_hparams['sigma_M'] = np.array([0.,2.])\n prior_hparams['M50'] = np.array([7.35,10.85])\n prior_hparams['sigma_mpeak'] = np.array([1e-5,1.])\n prior_hparams['B'] = np.array([1e-5,3.])\n prior_hparams['A'] = np.array([10.,500.])\n prior_hparams['sigma_r'] = np.array([1e-5,2.])\n prior_hparams['n'] = np.array([0.,2.])\n prior_hparams['Mhm'] = np.array([5.,9.])\n\n #Orphan hyperparameters\n orphan_params = {}\n orphan_params['eps'] = 0.01 \n orphan_params['df'] = 1\n\n #Simulation and LMC indices\n sim_indices = {}\n sim_indices['host'] = [0,1]\n sim_indices['LMC'] = [0,0]\n\n return hparams, prior_hparams, cosmo_params, orphan_params, halo_data, sim_indices, vpeak_Mr_interp", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def construct_param_dict(params,K_RC,K_CP,m_P):\n ###scaling constants\n w=params['w']\n pd=params['pd'] # in 3D and 0.21 in 2D\n pv=params['pv']\n Er=params['Er'] ;Ek=params['Ek']\n ER=params['ER'];EC=params['EC'];EP=params['EP'];\n Eq1=params['Eq1'];Eq2=params['Eq2']\n\n\n #capture success function\n a = params['a']\n b = params['b']\n c = params['c']\n formC = params['formC']\n formPC = params['formPC']\n formPR = params['formPR']\n \n ###variables\n TR= params['TR'] ;TC= params['TC'];TP=params['TP'];D_R= params['D_R']; D_C= params['D_C']\n K_RP=K_RC*K_CP\n fmC=params['fmC'];thermyR=params['thermyR']\n thermyC=params['thermyC'];thermyP=params['thermyP']\n fmPR=params['fmPR']\n fmPC=params['fmPC']\n m_C = K_CP*m_P;m_R = K_RP*m_P\n ###normalization constants and boltzmann constant\n r0 = params['r0']\n k0 = params['k0'] # will depend on the productivity of the habitat\n a01 = a02 = params['a012'] # will depedend on the dimension of the habitat \n a03 = params['a03']\n d0= params['d0']\n q10 = params['q10'];q20 = params['q20'];\n v0R = params['v0R'];v0C =params['v0C'];v0P =params['v0P'];k = b_k\n hC0 = params['hC0'];hP0 = params['hP0'] \n \n #intrapopulation parameters\n q1=set_q1(q10,m_C,w,Eq1,TR,k)\n q2=set_q2(q20,m_P,w,Eq2,TC,k)\n K=set_K(k0,m_R,w,Ek,TR,k)\n r=set_r(r0,m_R,w,Er,TR,k)\n\n #interpopulation parameters\n a1=set_alfa(m_C,a01,K_RC,pv,pd,TR,TC,ER,EC,D_R,v0R,v0C,g,alfa,fmC,thermyR,thermyC,k,a,b,c,formC)\n a2=set_alfa(m_P,a02,K_RP,pv,pd,TR,TP,ER,EP,D_R,v0R,v0P,g,alfa,fmPR,thermyR,thermyP,k,a,b,c,formPR)\n a3=set_alfa(m_P,a03,K_CP,pv,pd,TC,TP,EC,EP,D_C,v0C,v0P,g,alfa,fmPC,thermyC,thermyP,k,a,b,c,formPC)\n\n t_hp = set_th(hP0,m_P,w,EP,k,TP)\n t_hc = set_th(hC0,m_C,w,EC,k,TC)\n param_dict={'q1':q1,'q2':q2,'K':K,'r':r,'a1':a1,'a2':a2,'a3':a3,'t_hp':t_hp,'t_hc':t_hc}\n \n return param_dict", "def default_hparams():\n\n model_hparams = hparams.ModelHparams(\n model_name='imagenet_resnet_50',\n model_init='kaiming_normal',\n batchnorm_init='uniform',\n )\n\n dataset_hparams = hparams.DatasetHparams(\n dataset_name='imagenet',\n batch_size=1024,\n )\n\n training_hparams = hparams.TrainingHparams(\n optimizer_name='sgd',\n momentum=0.9,\n milestone_steps='30ep,60ep,80ep',\n lr=0.4,\n gamma=0.1,\n weight_decay=1e-4,\n training_steps='90ep',\n warmup_steps='5ep',\n )\n\n pruning_hparams = sparse_global.PruningHparams(\n pruning_strategy='sparse_global',\n pruning_fraction=0.2\n )\n\n return LotteryDesc(model_hparams, dataset_hparams, training_hparams, pruning_hparams)", "def generate_parameter_sample(self, t): \n \n theta_hat = np.zeros(self.K) \n k = np.random.choice(self.Npar, 1, p=self.w)[0] # np.random.choice outputs an array\n theta_hat = self.Particles[k] \n return theta_hat", "def MILP_1(self,xData,uData):\n print \"\\n\",\"*\"*80,\"\\n\",\"MILP 1: Parameter Estimation\\n\",\"*\"*80 \n model=Model(\"parameters\")\n outflow={}\n d={}\n bigM=500\n Q_out={}\n Q_in={}\n N=max(l[1] for l in xData.keys())\n print \"x Data size is\",N\n N=max(l[1] for l in uData.keys())\n print \"u Data size is\",N\n for l in self.links:\n l.d=model.addVar(lb=0,ub=200,obj=0*l.type==\"road\") \n for t in range(1,N):\n d[l,t]=model.addVar(lb=0,ub=200,obj=1)\n for k in l.outgoing:\n outflow[l,k,t]=model.addVar(lb=0,ub=200)\n self.c[l,k]=model.addVar(lb=20,ub=200)\n self.beta[l,k]=model.addVar(lb=0.2,ub=0.8)\n self.alpha[l,k]=model.addVar(lb=0,ub=1)\n self.M[l,k]=model.addVar(lb=0,ub=200)\n d[\"outflow-1\",l,k,t]=model.addVar(vtype=GRB.BINARY) \n d[\"outflow-2\",l,k,t]=model.addVar(vtype=GRB.BINARY) \n model.update()\n for t in range(1,N):\n for l in self.links:\n if True:\n Q_out[l,t]=LinExpr()\n Q_in[l,t]=LinExpr()\n Q_out[l,t].addConstant(0)\n Q_in[l,t].addConstant(0)\n for k in l.outgoing:\n model.addConstr(outflow[l,k,t]<=self.beta[l,k]*uData[l,t]*xData[l,t])\n model.addConstr(outflow[l,k,t]<=self.M[l,k])\n model.addConstr(outflow[l,k,t]<=self.c[l,k]-self.alpha[l,k]*xData[k,t])\n model.addConstr(outflow[l,k,t]>=self.beta[l,k]*uData[l,t]*xData[l,t]+bigM*d[\"outflow-1\",l,k,t]-bigM)\n model.addConstr(outflow[l,k,t]>=self.M[l,k]+bigM*d[\"outflow-2\",l,k,t]-bigM)\n model.addConstr(outflow[l,k,t]>=self.c[l,k]-self.alpha[l,k]*xData[k,t]-bigM*d[\"outflow-1\",l,k,t]-bigM*d[\"outflow-2\",l,k,t])\n Q_out[l,t].add(outflow[l,k,t])\n for k in l.incoming:\n Q_in[l,t].add(outflow[k,l,t])\n if l.type==\"road\":\n model.addConstr(xData[l,t+1]<=xData[l,t]- Q_out[l,t] + Q_in[l,t] + d[l,t] + l.lambda_arrival) \n else:\n model.addConstr(xData[l,t+1]<=xData[l,t]- uData[l,t]*xData[l,t] + Q_in[l,t] + d[l,t] + l.lambda_arrival)\n for l in self.links:\n sum=LinExpr()\n for k in l.outgoing:\n sum.add(self.beta[l,k])\n model.addConstr(sum>=0)\n \n# J=QuadExpr()\n# for l in self.links:\n# for t in range(1,N):\n# if l.type==\"road\":\n# J.add(d[l,t]*d[l,t])\n# model.setObjective(J)\n model.optimize()\n for l in self.links:\n l.d=l.d.X\n for k in l.outgoing:\n self.beta[l,k]=self.beta[l,k].X\n self.c[l,k]=self.c[l,k].X\n self.alpha[l,k]=self.alpha[l,k].X\n self.M[l,k]=self.M[l,k].X\n for l in self.links:\n for t in range(1,N):\n l.d=max(d[l,t].X,l.d)\n \n \n\n \n if True:\n for t in range(1,N):\n print \"*\"*80,\"time=\",t\n for l in self.links:\n print \"\\n\",l,\"x is\",xData[l,t],\"u is\",uData[l,t],\"x+ is\",xData[l,t+1]\n for k in l.outgoing:\n print k,\"beta:\",self.beta[l,k],\"outflow\",outflow[l,k,t].X", "def gen_fhe_instance(n, q, alpha=None, h=None, m=None, seed=None):\n if seed is not None:\n set_random_seed(seed)\n\n q = next_prime(ceil(q)-1, proof=False)\n if alpha is None:\n alpha = ZZ(8)/q\n\n n, alpha, q = preprocess_params(n, alpha, q)\n\n stddev = stddevf(alpha*q)\n\n if m is None:\n m = n\n K = GF(q, proof=False)\n A = random_matrix(K, m, n)\n\n if h is None:\n s = random_vector(ZZ, n, x=-1, y=1)\n else:\n S = [-1, 1]\n s = [S[randint(0, 1)] for i in range(h)]\n s += [0 for _ in range(n-h)]\n shuffle(s)\n s = vector(ZZ, s)\n c = A*s\n\n D = DiscreteGaussian(stddev)\n\n for i in range(m):\n c[i] += D()\n\n return A, c", "def init_P_PHI_GIVEN_A():\n global P_PHI_GIVEN_A\n for k in TRUE_TASK_ACTION_TO_INTERFACE_ACTION_MAP.keys(): # task level action\n P_PHI_GIVEN_A[k] = collections.OrderedDict()\n for u in INTERFACE_LEVEL_ACTIONS:\n if u == TRUE_TASK_ACTION_TO_INTERFACE_ACTION_MAP[k]:\n # try to weight the true command more for realistic purposes. Can be offset by using a high PHI_GIVEN_A_NOISE\n P_PHI_GIVEN_A[k][u] = 1.0\n else:\n # P_PHI_GIVEN_A[k][u] = np.random.random()*PHI_GIVEN_A_NOISE #IF PHI_GIVEN_A_NOISE is 0, then the p(ui|a) is a deterministic mapping\n P_PHI_GIVEN_A[k][u] = 0.0\n\n delta_dist = np.array(P_PHI_GIVEN_A[k].values())\n uniform_dist = (1.0 / len(INTERFACE_LEVEL_ACTIONS)) * np.ones(len(INTERFACE_LEVEL_ACTIONS))\n blended_dist = (1 - PHI_GIVEN_A_NOISE) * delta_dist + PHI_GIVEN_A_NOISE * uniform_dist # np.array\n for index, u in enumerate(INTERFACE_LEVEL_ACTIONS):\n P_PHI_GIVEN_A[k][u] = blended_dist[index]", "def generate_keys(self, p, q, e):\n d = EucAlg(p, q)\n for i in d:\n if i == 0:\n raise Exception(\"p and q are not relatively prime.\")\n\n n = p*q\n phi_n = (p-1)*(q-1)\n d = EucAlg(e, phi_n)\n\n self._private_key = (d[0],n)\n self.public_key = (e,n)", "def calc_parameters(T, N, sigma, r, div):\n dt = T / N\n u = np.exp(sigma * np.sqrt(dt))\n d = 1 / u\n b = r - div\n q = 1 / 2 + 1 / 2 * (b - 1 / 2 * sigma ** 2) * np.sqrt(dt) / sigma # P(up movement)\n return dt, u, d, q, b", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_kvhh', 'g_cav', 'g_kca', 'g_nap']\n gX_log: np.ndarray = 4 * np.random.rand(5) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(5)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(tCa_dict)\n return param_dict", "def _build_space(self, param_grid):\n if self.verbose>9:\n 'Building param space...'\n \n _warnings.filterwarnings('ignore')\n \n param_grid = param_grid.copy()\n space = {}\n for key in param_grid.keys():\n params = param_grid[key]\n \n if self.verbose>9:\n print('\\tinput:',key, params)\n \n type_str = str(type(params[0]))\n\n if 'float' in type_str or 'int' in type_str:\n \n min_ = min(params)\n max_ = max(params)\n log10_min_ = _np.log10(min_)\n log10_max_ = _np.log10(max_)\n\n if round(log10_max_)-round(log10_min_)>1 and round(log10_max_)-round(log10_min_)!=_np.inf: # use uniform distribution on log spacing \n \n space['log10.'+key] = _hyperopt.hp.uniform(key, log10_min_, log10_max_)\n \n if self.verbose>9:\n print('\\toutput:','log10.'+key, 'uniform', log10_min_, log10_max_)\n \n else:\n if 'int' in type_str:\n space[key] = _hyperopt.hp.quniform(key, min_, max_, 1)\n \n if self.verbose>9:\n print('\\toutput:',key, 'quniform', min_, max_)\n \n elif 'float' in type_str:\n space[key] = _hyperopt.hp.uniform(key, min_, max_)\n \n if self.verbose>9:\n print('\\toutput:',key, 'uniform', min_, max_)\n \n \n elif 'str' in type_str:\n space[key] = _hyperopt.hp.choice(key, [i for i in range(len(params))])\n \n if self.verbose>9:\n print('\\toutput:',key, 'choice', [i for i in range(len(params))])\n\n else:\n raise Exception('type(params[0]) is '+type_str+'. This type of hyperparameter is not yet supported.')\n\n assert(len(space.keys())==len(param_grid.keys())), 'len(space.keys())='+str(len(space.keys()))+', which is not equal to len(param_grid.keys())='+str(len(param_grid.keys()))\n \n if self.verbose>9:\n print('...finished building space')\n \n _warnings.filterwarnings('default')\n\n return space", "def __init__(self, num_hermgauss=20):\r\n super().__init__()\r\n\r\n gh_x, gh_w = np.polynomial.hermite.hermgauss(num_hermgauss)\r\n self.gh_x = torch.nn.Parameter(\r\n torch.from_numpy(gh_x[:, None, None].astype(NUMPY_DTYPE)),\r\n requires_grad=False)\r\n self.gh_w = torch.nn.Parameter(\r\n torch.from_numpy(gh_w[:, None, None].astype(NUMPY_DTYPE)),\r\n requires_grad=False)", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def __prepare_dh_params(self):\n self.alpha = symbols('alpha0:' + str(self.joint_count))\n self.a = symbols('a0:' + str(self.joint_count))\n self.q = symbols('q1:' + str(self.joint_count + 1))\n self.d = symbols('d1:' + str(self.joint_count + 1))", "def sim(p=0.5, ph=0.7, pl=0.8, n=20,sd=70):\n random.seed(sd)\n te=np.array(random.sample(range(1000),n-1))/1000#random draw\n random.seed(sd)\n rea=random.choices('HL',weights=(p,1-p),k=1) #initial realization at t0\n if rea[0]=='H':\n jp=p\n else:\n jp=round((1-p),2) \n for i in range(n-1): #compute realization from t1 to tn\n temp=te[i]\n if rea[-1]=='L': #using probability of low to low to be compared with draw\n if temp<=pl:\n rea.append('L')\n jp=jp*pl\n else:\n rea.append('H')\n jp=jp*round((1-pl), 2)\n else: #using probability of high to high to be compared with draw\n if temp<=ph:\n rea.append('H')\n jp=jp*ph\n else:\n rea.append('L')\n jp=jp*round((1-ph), 2)\n return (rea, jp)", "def ph(self,k,z=0):\n return self.p(k*self.h,z)*self.h**3", "def Get_params(numparams, dt, D):\n # bounds from table 1 Kowalek et al 2020\n Nmin, Nmax = 30, 600\n Bmin, Bmax = 1, 6\n Rmin, Rmax = 1, 17\n alphamin, alphamax = 0.3, 0.7\n Qmin, Qmax = 1, 9\n\n # Gen parameters\n Q = np.random.uniform(Qmin, Qmax, size=numparams)\n Q1, Q2 = Q, Q\n\n NsND = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsAD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsCD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsDM = np.random.randint(Nmin, Nmax + 1, size=numparams)\n TDM = NsDM * dt\n\n B = np.random.uniform(Bmin, Bmax, size=numparams)\n r_c = np.sqrt(D * NsCD * dt / B) # solving for r_c in eq. 8 Kowalek\n\n R = np.random.uniform(Rmin, Rmax, size=numparams)\n v = np.sqrt(R * 4 * D / TDM) # solving for v in eq. 7 Kowalek\n\n alpha = np.random.uniform(alphamin, alphamax, size=numparams)\n\n # Compute sigma for ND, AD, CD from eq. 12 Kowalek\n sigmaND = np.sqrt(D * dt) / Q1\n sigmaAD = np.sqrt(D * dt) / Q1\n sigmaCD = np.sqrt(D * dt) / Q1\n\n # Compute sigma for DM from eq. 12 Kowalek\n sigmaDM = np.sqrt(D * dt + v ** 2 * dt ** 2) / Q2\n\n return np.array(\n [\n NsND,\n NsAD,\n NsCD,\n NsDM,\n D * np.ones(numparams),\n dt * np.ones(numparams),\n r_c,\n v,\n alpha,\n sigmaND,\n sigmaAD,\n sigmaCD,\n sigmaDM,\n ]\n ).T", "def random_configuration(self):\n q = {}\n for key in self.structure:\n if key != \"p0\":\n q[key] = [\n -pi + 2 * pi * np.random.rand(),\n np.abs(\n wraptopi(\n self.lb[key]\n + (self.ub[key] - self.lb[key]) * np.random.rand()\n )\n ),\n ]\n return q", "def demonstrate():\n\tprint('The following is a demonstration of the Diffie-Hellman-Encrypter.')\n\tprint('Requires two private values and max input for possibilites; returns a secret key')\n\n\tperson1 = input('Person 1: Please enter your name: ')\n\tperson2\t= input('Person 2: Please enter your name: ')\n\n\tpossible = input(\"Enter a maximum number of possibilites (Example: 10000) for your encrypter key: \")\n\tpublicMod = next_prime(int(possible))\n\t# publicMod = get_prime(20, 99999)\n\tprint(\"The public Modulus is\", str(publicMod) + '.')\n\tprint(\"Calculating random possible generators for\", str(publicMod) +':')\n\tpublicGen = get_generator(publicMod, 3, 999)\n\tprint(\"The public generator is\", str(publicGen) + '.\\n')\n\n\tglobal MAX\n\tMAX = int(input(\"Choose an upper limit for your secret numbers.\"\n\t\t\"\\nCaution: Smaller values increase potential for hacking. \"))\n\n\tperson1 = pEncryptor(publicMod, publicGen, person1)\n\tperson2 = pEncryptor(publicMod, publicGen, person2)\n\n\tpause(1)\n\n\twhile person1.secret > MAX or person1.secret <= 0:\n\t\tperson1.secret = int(input(person1.name + \", what's your secret number between\" \n\t\t\t\t+ \" 1 and \" + str(MAX) + \"? \"))\n\twhile person2.secret > MAX or person2.secret <= 0:\n\t\tperson2.secret = int(input(person2.name + \", what's your secret number between\" \n\t\t\t\t+ \" 1 and \" + str(MAX) + \"? \"))\n\n\tpause(2)\n\n\tperson1_pass = person1.encrypt()\n\tencryption_process(person1.name, person1.modulus, person1.generator, person1.secret, person1_pass)\n\tpause(1)\n\tprint('\\n' + person2.name, 'performs the same calculation...')\n\tpause(1)\n\tperson2_pass = person2.encrypt()\n\tencryption_process(person2.name, person2.modulus, person2.generator, person2.secret, person2_pass)\n\n\tpause(5)\n\n\tprint('\\nThen, they pass their calculated key values to one another: ')\n\tprint(person1.name + \" passes the number \" + str(person1_pass) \n\t\t+ \" to \" + person2.name + \".\")\n\tprint(person2.name + \" passes the number \" + str(person2_pass) \n\t\t+ \" to \" + person1.name + \".\")\n\n\tpause(2)\n\n\tprint(\"\\nThen, they both calculate the secret shared key with their own personal private value\" \n\t\t\"that is unlikely to be found by anyone attempting to find the secret shared key.\")\n\n\tperson1Message = person1.decrypt(person2_pass)\n\tdecryption_process(person1.name, person1.modulus, person2_pass, person1.secret, person1.key)\n\tpause(1)\n\tprint('\\n'+ person2.name, 'then does the same...')\n\tpause(1)\n\tperson2Message = person2.decrypt(person1_pass)\n\tdecryption_process(person2.name, person2.modulus, person1_pass, person2.secret, person2.key)\n\n\tpause(2)\n\n\tif person1Message == person2Message:\n\t\tprint(\"Success! The shared private message is:\", str(person1Message))\n\t\tpause(1)\n\t\tprint(\"This number is a secret key for secure communication\")\n\telse:\n\t\tprint(\"Uh Oh. Something went wrong :( Messages obtained were:\", str(person1Message), \n\t\t\t'and', str(person2Message) +'.\\n')\n\n\tprint(\"For more info, go to https://en.wikipedia.org/wiki/Diffie–Hellman_key_exchange\\\n\tto learn more about the Diffie–Hellman Key Exchange\\n\")", "def init_params(options):\n params = OrderedDict()\n\n # event embedding, shape = (n_events, dim_proj)\n randn = np.random.randn(options['n_events'],\n options['dim_proj'])\n params['Eemb'] = (0.1 * randn).astype(config.floatX)\n\n # shape = dim_proj * dim_proj\n gru_Wz = ortho_weight(options['dim_proj'])\n params['gru_Wz'] = gru_Wz\n gru_Wh = ortho_weight(options['dim_proj'])\n params['gru_Wh'] = gru_Wh\n gru_Wr = ortho_weight(options['dim_proj'])\n params['gru_Wr'] = gru_Wr\n\n # shape = dim_proj * dim_proj\n gru_Uz = ortho_weight(options['dim_proj'])\n params['gru_Uz'] = gru_Uz\n gru_Uh = ortho_weight(options['dim_proj'])\n params['gru_Uh'] = gru_Uh\n gru_Ur = ortho_weight(options['dim_proj'])\n params['gru_Ur'] = gru_Ur\n\n # shape = dim_proj\n gru_bz = np.random.rand(options['dim_proj']).astype(config.floatX)-0.5\n params['gru_bz'] = gru_bz\n gru_bh = np.random.rand(options['dim_proj']).astype(config.floatX)-0.5\n params['gru_bh'] = gru_bh\n gru_br = np.random.rand(options['dim_proj']).astype(config.floatX)-0.5\n params['gru_br'] = gru_br\n\n # for attention\n attp_q = init_params_weight(options['dim_proj'], options['dim_att'])\n params['attp_q'] = attp_q\n attp_b = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['attp_b'] = attp_b\n attp_eta = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['attp_eta'] = attp_eta\n\n atts_q = init_params_weight(options['dim_proj'], options['dim_att'])\n params['atts_q'] = atts_q\n atts_b = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atts_b'] = atts_b\n atts_eta = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atts_eta'] = atts_eta\n\n atti_q = init_params_weight(options['dim_proj'], options['dim_att'])\n params['atti_q'] = atti_q\n atti_b = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atti_b'] = atti_b\n atti_eta = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atti_eta'] = atti_eta\n\n atta_q = init_params_weight(options['dim_proj'], options['dim_att'])\n params['atta_q'] = atta_q\n atta_b = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atta_b'] = atta_b\n atta_eta = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atta_eta'] = atta_eta\n\n # decoding matrix for external influences\n W_ext = init_params_weight(options['dim_proj'],\n options['n_events'])\n params['W_ext'] = W_ext\n dec_b = np.random.rand(options['n_events']).astype(config.floatX)-0.5\n params['b_ext'] = dec_b.astype(config.floatX)\n\n return params", "def malthusiens(nb_init, t0, tf, eps, methode, gamma ) :\n\n f=lambda y, t : gamma*y\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def generate_params(self, randomize=True):\n pass", "def __init__(self, p=0.5):\n assert 0. <= p <= 1.\n self.p = p\n self.rng = T.shared_randomstreams.RandomStreams(seed=123456)\n self.params = []" ]
[ "0.6224027", "0.6129461", "0.61040443", "0.60493153", "0.5921859", "0.5847553", "0.58098495", "0.5763295", "0.5746084", "0.5744892", "0.57363373", "0.56992453", "0.5693567", "0.562936", "0.55725604", "0.5562822", "0.5549936", "0.55414885", "0.5523866", "0.5520235", "0.55157614", "0.54955804", "0.5480857", "0.54685396", "0.5468053", "0.5464597", "0.5463361", "0.5447293", "0.5446729", "0.5440649", "0.5440593", "0.5438626", "0.54358375", "0.5430809", "0.5425621", "0.54246676", "0.54200464", "0.5409459", "0.54071635", "0.54006886", "0.5389794", "0.53775126", "0.5375744", "0.53576255", "0.5356781", "0.53565896", "0.5356455", "0.5349311", "0.5344976", "0.534296", "0.5335909", "0.5331927", "0.53290445", "0.53288573", "0.5327026", "0.53262603", "0.5325767", "0.532453", "0.53213376", "0.53183514", "0.53158104", "0.5308843", "0.53057593", "0.5303607", "0.5297031", "0.52946836", "0.52917975", "0.5287118", "0.528371", "0.5283607", "0.5280432", "0.5275297", "0.5260079", "0.525596", "0.5250894", "0.52471864", "0.523551", "0.523053", "0.5225684", "0.52250516", "0.5218767", "0.52178514", "0.5210329", "0.5209784", "0.52067274", "0.5197665", "0.5189315", "0.51758623", "0.51736075", "0.5172909", "0.5171546", "0.51714927", "0.5170913", "0.51696646", "0.51653814", "0.51631606", "0.51628935", "0.5153231", "0.5152619", "0.5152146" ]
0.5730508
11
Returns the number of bits requires to represent n.
def numbits(n): return int(math.ceil(math.log(n, 2)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_bits_for_int(n: int):\n n = abs(int(n))\n bits = 0\n while n > 0:\n n >>= 1\n bits += 1\n return bits", "def count_bits(n):\n return sum(1 for x in bin(n) if x == \"1\")", "def bits(n):\n\n # Create a list of the first 1,000 binary numbers\n binary_list = reverse_binary_list()\n\n # Start by calculating number of 1's for n\n n_ones = num_of_ones(n, binary_list)\n\n # Calculate number of 1's for next value\n next_ones = 0\n while n_ones != next_ones:\n n = n + 1\n next_ones = num_of_ones(n, binary_list)\n\n return(n)", "def count_bits(dqxx, n):\n return sum([check_bit(x, n) for x in dqxx])", "def n_bits(self):\n return self._n_bits", "def _bit_storing_size(n):\n return -((-n) // 8)", "def countBits(n):\n binary = bin(n)[2:]\n counter = 0\n \n for i in binary:\n if i == '1':\n counter += 1\n \n return counter", "def number_of_bits(self):\n return self.numbits", "def _get_nr_of_bits(self):\n return sum(self._size_var)", "def calBitLen(n, p):\n m = int(-(n*math.log(p))/BloomFilter.ln2p2)\n # round up to 32 bits\n if m%32: m += (32-m%32)\n return m", "def number_of_bits(self) -> int:\n raise NotImplementedError('To be Overidden by the derived class')", "def get_set_bits_count(number: int) -> int:\n if number < 0:\n raise ValueError(\"the value of input must be positive\")\n result = 0\n while number:\n if number % 2 == 1:\n result += 1\n number = number >> 1\n return result", "def count(bits: int) -> int:\n return len(to_list(bits)) # I'm lazy", "def __len__(self):\r\n return numBits(self.n)", "def NumBits(self):\n num_bits = 8*len(self.output)\n if self.out_boff % 8:\n num_bits -= 8\n num_bits += self.out_boff\n if num_bits < 0:\n print \"What the...\"\n return num_bits", "def _generate_bitmask(n: int = 2, n_bits: int = 8) -> int:\n all_ones = 2 ** n_bits - 1\n cancel_bits = 2 ** n - 1\n return all_ones - cancel_bits", "def count_bits(x: int) -> int:\n num_bit: int = 0\n while x:\n # if odd, right most bit is 1\n num_bit += x & 1\n # shift to the right 1 bit\n x >>= 1\n\n return num_bit", "def bitlen(number):\n assert(isinstance(number, int))\n if number == 0:\n return 1\n else:\n return floor(log2(number)) + 1", "def count_ones(n):\n s = 0\n mask = 1\n for i in xrange(16):\n if (mask << i) & n:\n s += 1\n return s", "def compute_n_codes(n_bits, half_bit=None):\n n_bits, half_bit = parse_bits(n_bits, half_bit)\n if half_bit:\n return 2**(n_bits + 1) - 1\n else:\n return 2**n_bits", "def test_count_bits(n, result):\n from count_bits import count_bits\n assert count_bits(n) == result", "def num_bin(N, places=8):\n return [(N >> k) & 0x1 for k in range(places)]", "def shitty_count_set_bits(num:int) -> int:\n count = 0\n while num != 0:\n count += num & 1\n num >>= 1 # heh\n return count", "def getNbrOfBit(self):\n return DPxGetDinNumBits()", "def easy_count_set_bits(num):\n print('Counted {} set bits'.format(bin(num).count('1')))", "def _varint_cost(n: int):\n result = 1\n while n >= 128:\n result += 1\n n >>= 7\n return result", "def get_least_significant_bits(x, n):\n\n return x & ones(n)", "def countBits(x):\n # from https://stackoverflow.com/questions/10874012/how-does-this-bit-manipulation-work-in-java/10874449#10874449\n # Used because of the O(log(n)) complexity\n\n x = x - ((x >> 1) & 0x55555555)\n x = (x & 0x33333333) + ((x >> 2) & 0x33333333)\n x = (x + (x >> 4)) & 0x0F0F0F0F\n x = x + (x >> 8)\n x = x + (x >> 16)\n return x & 0x0000003F", "def ndigits(n):\n return len(str(abs(n)))", "def nextPowerOf2(n):\n count = 0; \n \n # First n in the below \n # condition is for the \n # case where n is 0 \n if (n and not(n & (n - 1))): \n return n \n \n while( n != 0): \n n >>= 1\n count += 1\n \n return 1 << count;", "def zernike_num_coeff(n):\n \n\tif not (n>=0):\n\t\tprint('Input parameter must be >= 0')\n\t\traise AssertionError() \n \n\treturn sum(xrange(n+1)) + n+1", "def orderByIncreasingBitCount(n):\n res = [0] # freebie\n biggest = 2**n - 1\n for i in range(1, n):\n for j in range(1, biggest):\n if hamming_weight(j) == i:\n res.append(j)\n res.append(biggest) # another freebie\n return res", "def count_significant_bits(input_x: int) -> int:\n x = input_x\n for i in range(x.bit_length()):\n if x & (1 << i) > 0:\n return x.bit_length() - i\n return 0", "def countBits(self, num: int) -> List[int]:\n x = [0] * (num + 1)\n for i in range(1, num + 1):\n x[i] = x[i & (i - 1)] + 1\n return x", "def solution(n: int) -> int:\n binary_gap = 0\n count = 0\n # skip the lowest zeros\n while n and (n & 1) == 0:\n n = n >> 1\n while n:\n while n & 1:\n n = n >> 1\n while n and (n & 1) == 0:\n count += 1\n n = n >> 1\n if n & 1 and binary_gap < count:\n binary_gap = count\n count = 0\n return binary_gap", "def pkcs_ilen(n):\n i = 0\n while n > 0:\n n >>= 8\n i += 1\n return i", "def example_count_set_bits(value):\n n = 0\n while value:\n n += 1\n value &= value-1\n return n", "def get_n_digit(num):\n cnt = 0\n while num & 1 != 1:\n num >>= 1\n cnt += 1\n # print(cnt)\n return cnt", "def number_bits_in_cardinality(self,card):\n return 32 - self.count_lead_zs(card)", "def twos_power_max(number):\n\n bits = bits_list(number)\n return len(bits) - 1", "def countBits(self, num: int) -> List[int]:\n x = [0] * (num + 1)\n for i in range(1, num + 1):\n x[i] = x[i >> 1] + (i & 1)\n return x", "def count_diff_bits(n1, n2):\n diff = n1 ^ n2\n bit_diff = make_bitstring(diff)\n return len([b for b in bit_diff if b == \"1\"])", "def hit_bin(self, n):\n # TODO: fix this monkey code!\n\n if n < 4:\n return n\n elif n << 3 == 0:\n return 4\n elif n << 4 == 0:\n return 5\n elif n << 5 == 0:\n return 6\n elif n >= 32 and n <= 127:\n return 7\n else:\n return 8", "def bitmask(n: int) -> int:\n if n >= 0:\n return (1 << n) - 1\n else:\n return -1 << -n", "def count_bits(num: int) -> list:\r\n # Type check arguments: raise Error\r\n counts = []\r\n for i in range(num+1):\r\n count = 0\r\n for bit in bin(i)[2:]:\r\n if bit == '1':\r\n count += 1\r\n counts.append(count) # rather than return a list, yield each count\r\n return counts", "def count_ways(n):\n if n < 0:\n return 0\n elif n == 0:\n return 1\n else:\n total = 0\n for i in range(1, min(n, 3) + 1):\n total += count_ways(n - i)\n return total", "def solution2(n):\n ones = 0\n while n > 0:\n if n & 1:\n ones += 1\n n = n >> 1\n\n return 0 if ones % 2 == 0 else 1", "def minOperations(n):\n count = 0\n min_val = 2\n if n < 2:\n return 0\n while min_val <= n:\n if (n % min_val == 0):\n count = count + min_val\n n = n / min_val\n else:\n min_val = min_val + 1\n return (count)", "def getrandbits(k: int) -> int:\n ...", "def num_of_ones(n, binary_list):\n # Start with no 1's\n num_ones = 0 \n while n > 0: \n for num in binary_list:\n # Start with the largest binary number\n # 1 occurs when num is equal to n or n can be subtracted positively \n if (num - n) <= 0:\n # Add 1 to tally and subtract binary num\n num_ones += 1\n n = n - num\n # Continue with new n\n return num_ones", "def bits(self):\n return self._q.bit_length()", "def bits_to_bytes(n: int) -> int:\n return _round_bits(n, 8)", "def count_digits(n):\n return len(str(n))", "def size_as_number_of_bits(size):\n\n if size == 0:\n return 0\n else:\n return len('{:b}'.format(size))", "def test_get_n_bits_combinations():\n # Check n=1 - Pass\n assert layer_util.get_n_bits_combinations(1) == [[0], [1]]\n # Check n=2 - Pass\n assert layer_util.get_n_bits_combinations(2) == [[0, 0], [0, 1], [1, 0], [1, 1]]\n\n # Check n=3 - Pass\n assert layer_util.get_n_bits_combinations(3) == [\n [0, 0, 0],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 1],\n ]", "def compute_n_caps(n_bits, n_refs, half_bit=None):\n n_bits, half_bit = parse_bits(n_bits, half_bit)\n n_codes = compute_n_codes(n_bits, half_bit)\n\n assert n_refs >= 2, \"Need at least 2 references.\"\n assert (n_codes - 1) % (n_refs - 1) == 0, (\"Cannot match {} \"\n \"refs with {} codes.\".format(n_refs, n_codes))\n\n return ((n_codes - 1) // (n_refs - 1)) + 1", "def randbelow_from_randbits(self, n):\n k = int(n-1).bit_length()\n r = self.getrandbits(k) # 0 <= r < 2**k\n while int(r) >= n:\n r = self.getrandbits(k)\n return int(r)", "def get_1s_count(number: int) -> int:\n if not isinstance(number, int) or number < 0:\n raise ValueError(\"Input must be a non-negative integer\")\n\n count = 0\n while number:\n # This way we arrive at next set bit (next 1) instead of looping\n # through each bit and checking for 1s hence the\n # loop won't run 32 times it will only run the number of `1` times\n number &= number - 1\n count += 1\n return count", "def next_po2(n) -> int:\n if not n:\n return 1\n if is_po2(n):\n # n is a power of 2\n return n\n return 1 << (n - 1).bit_length()", "def get_min_run(n):\n r = 0\n while n >= 64:\n r |= n & 1\n n >>= 1\n return n + r", "def potential_witnesses(n):\n if n < 1373653: return [2, 3] # n < 1,373,653\n if n < 9080191: return [31, 73] # n < 9,080,191\n if n < 4759123141: return [2, 7, 61] # n < 4,759,123,141\n if n < 2152302898747: return [2, 3, 5, 7, 11] # n < 2,152,302,898,747\n if n < 3474749660383: return [2, 3, 5, 7, 11, 13] # n < 3,474,749,660,383\n if n < 341550071728321: return [2, 3, 5, 7, 11, 13, 17] # n < 341,550,071,728,321\n return [random.randint(1,n-1) for _ in xrange(0,20)] # 99.999999999909051% (1 - .25**20) accuracy for n >= 341,550,071,728,321", "def bit_length(self, ???):", "def num_squares(n):\n nums = _squares(n)\n\n dp = [0] * (n + 1)\n dp[0] = 0\n\n for i in range(1, n + 1):\n can = [j for j in nums if j <= i]\n dp[i] = 1 + min([dp[i - c] for c in can])\n\n return dp[n]", "def numSquares(self, n):\n # Generate perfect square numbers smaller than n.\n perfect_square_numbers = []\n i = 1\n square_i = i * i\n while square_i <= n:\n perfect_square_numbers.append(square_i)\n i += 1\n square_i = i * i\n\n cur_level = [n]\n count = 0\n while cur_level:\n count += 1\n tmp = []\n for num in cur_level:\n for val in perfect_square_numbers:\n if num == val:\n return count\n if val < num:\n tmp.append(num - val)\n if val > num:\n break\n cur_level = tmp\n return count", "def bitSizeOf(self) -> int:\n\n return self._numBits", "def bitSizeOf(self) -> int:\n\n return self._numBits", "def num_squares(n):\n squares = _squares(n)\n cnt = 0\n remains = {n}\n while remains:\n cnt += 1\n tmp = set()\n for remain in remains:\n for sq in [sqq for sqq in squares if sqq <= remain]:\n if remain == sq:\n return cnt\n else:\n tmp.add(remain - sq)\n remains = tmp", "def bitSizeOf() -> int:\n\n return 1", "def get_witnesses(n):\n assert (n > 2) and (n % 2 == 1)\n if n < 2047:\n # References: [1], [2], [4]\n witnesses = (2,)\n elif n < 1373653: # ~1.3 million\n # References: [1], [2], [3], [4]\n witnesses = (2, 3)\n elif n < 9080191: # ~9.0 million\n # References: [3], [4]\n witnesses = (31, 73)\n elif n < 25326001: # ~25.3 million\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5)\n elif n < 3215031751: # ~3.2 billion\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5, 7)\n elif n < 4759123141: # ~4.7 billion\n # References: [3], [4]\n witnesses = (2, 7, 61)\n elif n < 2152302898747: # ~2.1 trillion\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5, 7, 11)\n elif n < 3474749660383: # ~3.4 trillion\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5, 7, 11, 13)\n elif n < 341550071728321: # ~341.5 trillion\n # References: [1], [2], [3], [4]\n witnesses = (2, 3, 5, 7, 11, 13, 17)\n elif n < 3825123056546413051: # ~3.8 million trillion\n # References: [1], [4]\n witnesses = (2, 3, 5, 7, 11, 13, 17, 19, 23)\n elif n <= 2**64:\n witnesses = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37)\n else:\n witnesses = None\n \n return witnesses", "def get_total_complexity(n):\n total = 0\n for i in range(1, n + 1):\n total += get_complexity(i)\n return total", "def number_of_ways(n):\r\n return number_of_ways_helper([1, 5, 10, 25], n)", "def n_matrices_per_weight(weight, n_bits):\n return int(_sp.special.binom(n_bits, weight))", "def find_minrun(n: int) -> int:\n r = 0 # Becomes 1 if any bits are shifted off\n assert n >= 0\n while n >= 64:\n # The target of this while-loop:\n # If n is an exact power of 2, return 32;\n # otherwise, return int k in [32,64] such that n/k is close to, but strictly \n # less than, an exact power of 2 that is larger than 2^1=2.\n \n # | is `OR by bits`, & is `AND by bits`. ie r = r|(n&1).\n # The next two lines of code work as follows:\n # 1. If n is an exact power of 2, then for all loops, n&1=0, r=r|0=0|0=0, \n # and n is halved, until n=64 and is halved to 32, with r=0, so returns 32.\n # 2. Otherwise, then there must be at least one `1` among the second to the \n # last digits of n's binary form, eg.10010000. We scan from the rightmost digit # to the left, and whenever a 1 is met, r is 1. n will decrease to the n//2^k \n # that is closest to but less than 64. The target is met.\n #\n # In essence, this procedure is simply taking the first 6 bits of n, and add \n # 1 if any of the remaining bits is 1 (we call a bit that is 1 a \"set bit\").\n\n r |= n & 1\n n >>= 1 # move n's binary form all 1 digit to the right, ie n = n // 2\n # If n < 64, just return n, since it is too small to bother with fancy stuff\n return n + r", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def binbits(x, n):\n bits = bin(x).split('b')[1]\n if len(bits) < n:\n ans = '0' * (n - len(bits)) + bits\n else:\n ans = bits\n\n return ans", "def len_score(n):\n return len(n)", "def bitget(x, n):\n return (x >> n) & 1", "def next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n return i", "def _next_power_of_2(x):\n return 1 if x == 0 else 2**(x - 1).bit_length()", "def bitSizeOf() -> int:\n\n return 16", "def bitSizeOf() -> int:\n\n return 64", "def num_clbits(self):\n return 0", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def cardinality(self):\n from sage.arith.all import binomial\n n = self._size\n if n == 0:\n return Integer(1)\n return (2 * binomial(4 * n + 1, n - 1)) // (n * (n + 1))\n # return Integer(2 * factorial(4*n+1)/(factorial(n+1)*factorial(3*n+2)))" ]
[ "0.81563795", "0.8084728", "0.7868972", "0.7752117", "0.75737745", "0.7487806", "0.74711466", "0.73270065", "0.72633255", "0.7196523", "0.7144834", "0.71082604", "0.7071569", "0.70644087", "0.7052807", "0.7040102", "0.7017897", "0.6989122", "0.68654424", "0.68466884", "0.68210346", "0.67855626", "0.6776359", "0.677261", "0.6758699", "0.67407715", "0.66907567", "0.6669358", "0.6648137", "0.66145134", "0.6603638", "0.6597021", "0.65930533", "0.657327", "0.6552098", "0.65057206", "0.65042067", "0.6477648", "0.64751744", "0.6452642", "0.6446708", "0.64217824", "0.6413652", "0.63765854", "0.6334397", "0.63334364", "0.63187635", "0.6316951", "0.63085014", "0.63056225", "0.6303329", "0.62875235", "0.6282949", "0.6282751", "0.62804615", "0.62773675", "0.62462", "0.6244804", "0.6236899", "0.6224728", "0.61993515", "0.6187163", "0.61803114", "0.6172239", "0.61443675", "0.61443675", "0.6138095", "0.61346513", "0.6124209", "0.61113745", "0.6110327", "0.6077622", "0.6062708", "0.6049273", "0.6049273", "0.6048355", "0.6045706", "0.6015401", "0.60100436", "0.5996054", "0.59893477", "0.59879345", "0.59876174", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.59855014", "0.5983029" ]
0.79052716
2
A dictionary to map required slots to an extracted entity
def slot_mappings(self): # type: () -> Dict[Text: Union[Dict, List[Dict]]] return {"use_case": self.from_text(intent="inform")}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"product\": [\n self.from_entity(entity=\"product\", intent=[\"inform\"]),\n ],\n \"applicant_name\": [\n self.from_entity(entity=\"applicant_name\", intent=[\"inform\"]),\n ],\n \"applicant_dob\": [\n self.from_entity(entity=\"applicant_dob\", intent=[\"inform\"]),\n ],\n \"applicant_phoneno\": [\n self.from_entity(entity=\"applicant_phoneno\", intent=[\"inform\"]),\n ],\n \"applicant_address\": [\n self.from_entity(entity=\"applicant_address\", intent=[\"inform\"]),\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"bug\":[self.from_entity(\n entity=\"bug\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"beverage\": [self.from_entity(\n entity=\"beverage\", \n intent=\"inform\"), \n self.from_text(\n intent=\"inform\")],\n \"second_person_plural\": [self.from_entity(\n entity=\"second_person_plural\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"cot_caught\": [self.from_entity(\n entity=\"cot_caught\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"rain_sun\": [self.from_entity(\n entity=\"rain_sun\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"crawfish\": [self.from_entity(\n entity=\"crawfish\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"halloween\": [self.from_entity(\n entity=\"halloween\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"sandwich\": [self.from_entity(\n entity=\"sandwich\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"side_road\": [self.from_entity(\n entity=\"side_road\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"shoes\": [self.from_entity(\n entity=\"shoes\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"highway\": [self.from_entity(\n entity=\"highway\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"yard_sale\": [self.from_entity(\n entity=\"yard_sale\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"rubbernecking\": [self.from_entity(\n entity=\"rubbernecking\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"frosting\": [self.from_entity(\n entity=\"frosting\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"lawyer\": [self.from_entity(\n entity=\"lawyer\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"kitty_corner\": [self.from_entity(\n entity=\"kitty_corner\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"firefly\": [self.from_entity(\n entity=\"firefly\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"verge\": [self.from_entity(\n entity=\"verge\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"brew_thru\": [self.from_entity(\n entity=\"brew_thru\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"water_fountain\": [self.from_entity(\n entity=\"water_fountain\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")]\n }", "def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n\n return {\"ingredient\": self.from_entity(entity=\"ingredient\",\n not_intent=\"greet\")}", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"search_type\": [\n self.from_trigger_intent(\n intent=\"search_transactions\", value=\"spend\"\n ),\n self.from_trigger_intent(\n intent=\"check_earnings\", value=\"deposit\"\n ),\n ],\n \"time\": [\n self.from_entity(entity=\"time\"),\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"amount_of_money\": [\n self.from_entity(entity=\"amount-of-money\"),\n self.from_entity(entity=\"number\"),\n ],\n \"confirm\": [\n self.from_intent(value=True, intent=\"affirm\"),\n self.from_intent(value=False, intent=\"deny\"),\n ],\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n # return { \"faq_choice\": self.from_entity(\"faq_choice\"),\"faq_question\": self.from_entity(\"faq_question\"), \"faq_text\": [self.from_text()]}\n\n return {\"faq_choice\": [self.from_entity(\"faq_choice\"), self.from_text()], \"faq_text\": [self.from_text(), self.from_entity(entity=\"navigation\")]}", "def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n\n return {\"name\": [self.from_entity(entity=\"name\"),\n self.from_text()],\n \"roomcount\": [self.from_entity(entity=\"roomcount\"),\n self.from_text()],\n \"roomtype\": [self.from_entity(entity=\"roomtype\"),\n self.from_text()]}", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"payment_amount\": [\n self.from_entity(entity=\"payment_amount\"),\n self.from_entity(entity=\"amount-of-money\"),\n self.from_entity(entity=\"number\"),\n ],\n \"confirm\": [\n self.from_intent(value=True, intent=\"affirm\"),\n self.from_intent(value=False, intent=\"deny\"),\n ],\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict[Text, Any]]]]:\n return {\"use_case\": self.from_text(intent=\"inform\")}", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"fecha_hora\": [\n self.from_entity(entity=\"time\"),\n\n ]\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"fecha_hora\": [\n self.from_entity(entity=\"time\"),\n\n ]\n }", "def slots(self):\n highSlots = self._getAttribute(Attribute.highSlots)\n medSlots = self._getAttribute(Attribute.medSlots)\n lowSlots = self._getAttribute(Attribute.lowSlots)\n\n if None in [highSlots, medSlots, lowSlots]:\n # This is a T3 ship.\n highSlots = medSlots = lowSlots = 0\n\n # Get rigs and subs.\n rigSlots = self._getAttribute(Attribute.rigSlots, 0)\n subSlots = self._getAttribute(Attribute.subSlots, 0)\n\n # Get missile and turret slots.\n missileSlots = self._getAttribute(Attribute.missileSlots, 0)\n turretSlots = self._getAttribute(Attribute.turretSlots, 0)\n\n return {\n \"highSlots\": int(highSlots),\n \"medSlots\": int(medSlots),\n \"lowSlots\": int(lowSlots),\n \"rigSlots\": int(rigSlots),\n \"subSlots\": int(subSlots),\n \"turretSlots\": int(turretSlots),\n \"missileSlots\": int(missileSlots)\n }", "def convert_slots(slots: typing.List[Slot]) -> {}:\n resolved = {}\n\n for slot in slots:\n slot_name = slot.slot_name\n slot_value = slot.value\n\n slot_entity = slot.entity\n if slot_entity.startswith('snips/'):\n resolved[slot_name] = slot.value\n resolved[slot_name + '_raw'] = slot.raw_value\n else:\n # assuming Rasa NLU slot\n slot_extractor = slot_value['extractor']\n if not slot_extractor:\n slot_extractor = 'Unknown'\n else:\n del slot_value['extractor']\n\n if slot_name not in resolved:\n resolved[slot_name] = {}\n if slot_extractor not in resolved[slot_name]:\n resolved[slot_name][slot_extractor] = []\n\n # take the text entity extractor as the raw value\n if slot_extractor == 'CRFEntityExtractor':\n resolved[slot_name + '_raw'] = slot.raw_value\n\n resolved[slot_name][slot_extractor].append(slot_value)\n\n return resolved", "def initSlotObjectDict(cls):\n restslotattributedict.update(dict({extension_tunnel: \"name\"}))\n restslotattributedict.update(dict({extension_circuit: \"name\"}))\n restslotattributedict.update(dict({extension_ip_interface: \"name\"}))\n restslotattributedict.update(dict({extension_ip_route: \"name\"}))\n restslotattributedict.update(dict({gigabitethernet: \"name\"}))\n restslotattributedict.update(dict({blade: \"slot_number\"}))", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"ingredient\"]", "def slot_info(self) -> Dict[int, Tuple[int, str, int]]:\n return self._slot_info", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"product\", \"applicant_name\", \"applicant_dob\", \"applicant_phoneno\", \"applicant_address\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n \"tipo_lavado\"\n ]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"numero_prendas\",\n ]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"tipo_prenda\",\n \"tipo_compostura\"\n ]", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"PERSON\", \"amount_of_money\", \"confirm\"]", "def get_slots_query(scene: GameScene, entity: int):\n\n def query():\n\n paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity)\n equipment = paper_doll.get_equipment()\n\n return [\n (k, scene.cm.get_one(Entity, v))\n for k, v in equipment.items()\n ]\n return query", "def required_slots(self,tracker) -> List[Text]:", "def get_assessment_part_mdata():\n return {\n 'assessment_part': {\n 'element_label': {\n 'text': 'assessment part',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'instructions': {\n 'text': 'accepts an osid.id.Id object',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'required': False,\n 'read_only': False,\n 'linked': False,\n 'array': False,\n 'default_id_values': [''],\n 'syntax': 'ID',\n 'id_set': [],\n },\n 'assessment': {\n 'element_label': {\n 'text': 'assessment',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'instructions': {\n 'text': 'accepts an osid.id.Id object',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'required': False,\n 'read_only': False,\n 'linked': False,\n 'array': False,\n 'default_id_values': [''],\n 'syntax': 'ID',\n 'id_set': [],\n },\n 'weight': {\n 'element_label': {\n 'text': 'weight',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'instructions': {\n 'text': 'enter a cardinal value',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'required': False,\n 'read_only': False,\n 'linked': False,\n 'array': False,\n 'default_cardinal_values': [None],\n 'syntax': 'CARDINAL',\n 'minimum_cardinal': None,\n 'maximum_cardinal': None,\n 'cardinal_set': []\n },\n 'allocated_time': {\n 'element_label': {\n 'text': 'allocated time',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'instructions': {\n 'text': 'enter a valid duration object.',\n 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),\n 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),\n 'formatTypeId': str(DEFAULT_FORMAT_TYPE),\n },\n 'required': False,\n 'read_only': False,\n 'linked': False,\n 'array': False,\n 'default_duration_values': [None],\n 'syntax': 'DURATION',\n 'date_time_set': [],\n },\n }", "def required_slots(tracker):\n print(tracker.get_slot('order_number'))\n return [\"order_number\"]", "def _packaged_dict_for_entity(rt):\n entity = rt.entity\n return {u'entity_id': entity.id,\\\n u'name': entity.aggregation_paths['_geo'][-1]}", "def to_dict(self):\n print(\"\\n\\nSTARTING...\")\n ea = db.session.query(entity_assets).filter(entity_assets.c.entity_id == self.id).all()\n print(\"\\n\\nmade it\", ea)\n em = db.session.query(entity_meters).filter(entity_meters.c.entity_id == self.id).all()\n est = db.session.query(entity_statuses).filter(entity_statuses.c.entity_id == self.id).all()\n \n return {\n \"id\": self.id,\n \"user_id\": self.user_id,\n \"creator\": self.user.username,\n \"type\": self.type,\n \"category\": self.category,\n \"title\": self.title,\n \"description\": self.description,\n \"color\": self.color,\n \"icon\": self.icon,\n \"image\": self.image,\n \"created_at\": self.created_at,\n\n \"location_id\": self.location_id,\n \"generated_id\": self.generated_id,\n \n \"assets\": [(a.asset_id, a.quantity) for a in ea],\n \"statuses\": [(s.status_id, s.expiry) for s in est],\n \"meters\": [(m.meter_id, m.points) for m in em],\n \"slots\": [(slot.slot_id, slot.filler_id) for slot in self.entity_slots],\n }", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"search_type\", \"time\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"domicilio\",\n \"fecha_hora\"\n ]", "def getSlotMap(self):\n slotMap = dict()\n for entry in self.slots:\n slotMap[entry] = self.__getattribute__(\"on_\" + entry)\n return slotMap", "def required_slots(tracker: Tracker) -> List[Text]:\n print(\"required_slots(tracker: Tracker)\")\n return [\"name\",\"roomcount\",\"roomtype\"]", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"sucursal\",\n \"fecha_hora\"\n ]", "def _map_part_base(entry):\n dmt_entry = {}\n\n # derive DMT fields from QM data\n description = combined_description(entry[config.desc1], entry[config.desc2])\n\n classkey = entry[config.classkey]\n if classkey != 'PARTS':\n class_id = config.class_mapping.get(classkey, '!{0}!'.format(classkey))\n else:\n class_id = config.class_mapping_parts[entry[config.asbl_flag]]\n\n # default part type is 'P' (raw material), since there are so many\n part_type = config.part_type_mapping.get(entry[config.classkey], 'P')\n uom = config.uom_mapping[part_type]\n\n prefix = 'FSC' if entry[config.desc2][:3].upper() == 'FSC' else 'NCA'\n suffix = 'ASBL' if entry[config.asbl_flag] == '1' else 'COMP'\n prod_code = prefix + '-' + suffix if part_type == 'M' else 'PURCHASE'\n\n # create a dict from the constants, data from QM, and data mappings\n dmt_entry['Company'] = config.company\n dmt_entry['PartNum'] = entry[config.partnum]\n dmt_entry['SearchWord'] = description[:8].strip()\n dmt_entry['PartDescription'] = description\n dmt_entry['ClassID'] = class_id\n dmt_entry['IUM'] = uom\n dmt_entry['PUM'] = uom\n dmt_entry['TypeCode'] = part_type\n dmt_entry['NonStock'] = True\n dmt_entry['PricePerCode'] = config.price_per_code\n dmt_entry['ProdCode'] = prod_code\n dmt_entry['CostMethod'] = config.cost_method[part_type]\n dmt_entry['TrackLots'] = True if part_type == 'P' else False\n dmt_entry['SalesUM'] = uom\n dmt_entry['UsePartRev'] = (part_type == 'M')\n dmt_entry['SNFormat'] = config.sn_format if part_type == 'M' else ''\n dmt_entry['SNBaseDataType'] = config.sn_base_data_type if part_type == 'M' else ''\n dmt_entry['SNMask'] = config.sn_mask if part_type == 'M' else ''\n dmt_entry['SNMaskExample'] = config.sn_mask_example if part_type == 'M' else ''\n dmt_entry['UOMClassID'] = config.uom_class_id\n dmt_entry['NetWeightUOM'] = config.net_weight_uom if part_type == 'M' else ''\n\n return dmt_entry", "def slot_key_db() -> Dict[str, List]:\n\n return {'q50': 'second_person_plural',\n 'q28': 'cot_caught',\n 'q80': 'rain_sun',\n 'q66': 'crawfish',\n 'q110': 'halloween',\n 'q64': 'sandwich',\n 'q90': 'side_road',\n 'q105': 'beverage',\n 'q73': 'shoes',\n 'q79': 'highway',\n 'q58': 'yard_sale',\n 'q107': 'rubbernecking',\n 'q94': 'frosting',\n 'q14': 'lawyer',\n 'q76': 'kitty_corner',\n 'q65': 'firefly',\n 'q60': 'verge',\n 'q118': 'brew_thru',\n 'q103': 'water_fountain'}", "def _partition_pairs_by_slot(\n self, mapping: Mapping[AnyKeyT, EncodableT]\n ) -> Dict[int, List[EncodableT]]:\n\n slots_to_pairs = {}\n for pair in mapping.items():\n slot = key_slot(self.encoder.encode(pair[0]))\n slots_to_pairs.setdefault(slot, []).extend(pair)\n\n return slots_to_pairs", "def entity(self) -> dict: \n image = {}\n image['PartitionKey'] = self.getPartitionKey()\n image['RowKey'] = self.getRowKey()\n for key, value in vars(self).items():\n if not key.startswith('_') and key not in ['','PartitionKey','RowKey']:\n if type(value) in [str, int, bool, datetime.date, datetime.datetime]:\n image[key] = value \n return image", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n return {\n \"topic\": [\n self.from_text(),\n ],\n }", "def merge_slots(self, pred_slots, item_slots):\n for slot in pred_slots:\n if slot in item_slots:\n raise DuplicateSlotError('Slot %s already has the value %s.' % (\n slot, item_slots[slot]))\n slots = {}\n for slot in pred_slots:\n slots[slot] = pred_slots[slot]\n for slot in item_slots:\n slots[slot] = item_slots[slot]\n return slots", "def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict", "def get_slots(self) -> int:", "def __init__(self):\r\n self.adjacent = {}\r\n self.capacity = {}\r\n self.flow = {}", "def _bids_entities():\n return {\n \"raw\": [\n \"sub\",\n \"ses\",\n \"task\",\n \"acq\",\n \"ce\",\n \"rec\",\n \"dir\",\n \"run\",\n \"echo\",\n \"part\",\n ],\n \"derivatives\": [\"hemi\", \"space\", \"res\", \"den\", \"desc\"],\n }", "def map(self, entity):\n return ([], [])", "def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord", "def nodes_mapped(instance):\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n\n node_dict_mapped = {}\n\n for old_label, new_label in mapping.items():\n for node, ammentity in node_dict.items():\n if old_label == node:\n node_dict_mapped[new_label] = ammentity\n\n return node_dict_mapped", "def smp_dict():\n out = base_dict()\n out['mro']['current'] = ['Sample']\n out['name']['current'] = 'Sample'\n ao(out, 'idx', 'Integer', attr=['Hidden'])\n ao(out, 'ii', 'Integer', attr=['Hidden'])\n ao(out, 'initialDimension', 'Float', 0., name='Initial Dimension')\n return out", "def __init__(self):\n self.map = {} #key: val, val, idx\n self.slot = []", "def remap_inventory_dicts(self) -> None:\n if \"product\" in self.remapping_dicts:\n self.demand = {\n self.remapping_dicts[\"product\"][k]: v for k, v in self.demand.items()\n }\n\n for label in (\"activity\", \"product\", \"biosphere\"):\n if label in self.remapping_dicts:\n getattr(self.dicts, label).remap(self.remapping_dicts[label])", "def _species(self, hdr):\n # Called PolyAtomic in OpenMIMS source\n d = {}\n\n d['numeric flag'], d['numeric value'], d['elements'], \\\n d['charges'], d['charge label'], d['label'] = \\\n unpack(self._bo + '4i c 64s', hdr.read(81))\n\n d['label'] = self._cleanup_string(d['label'])\n d['charge label'] = self._cleanup_string(d['charge label'])\n\n # OpenMIMS says 3 bytes AFTER el.table are unused; this is wrong,\n # 3 bytes BEFORE el.table (b 81-84) are unused. n_elements (here:\n # atomic number) is element number in periodic table rather than\n # number of elements. n_isotopes (here: isotope number) is offset from\n # main atomic Z number. Also: collapse ElementTable (Tabelts) into\n # main dict, too many layers.\n hdr.seek(3, 1)\n atoms = unpack(self._bo + '15i', hdr.read(60))\n d['atomic number'] = tuple(n for n in atoms[::3])\n d['isotope number'] = tuple(n for n in atoms[1::3])\n d['stoich number'] = tuple(n for n in atoms[2::3])\n return d", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"credit_card\", \"payment_amount\", \"time\", \"confirm\"]", "def _get_goal_info(self, last_info):\n start_ID = 4\n end_ID = start_ID + self.num_parts\n places = {}\n for ID in range(start_ID, end_ID):\n assert ID in last_info, f'something went wrong with ID={ID}'\n position, _, _ = last_info[ID]\n places[ID] = (position, (0, 0, 0, 1.))\n return places", "def _get_groupby_required_pieces(self) -> Dict:\n requiredPieces = frozenset([\"keys\", \"permutation\", \"unique_keys\", \"segments\"])\n\n return {piece_name: getattr(self, piece_name) for piece_name in requiredPieces}", "def _map_part_plnt(entry):\n dmt_entry = {}\n part_type = config.part_type_mapping.get(entry[config.classkey], 'P')\n\n dmt_entry['Plant'] = config.plant\n dmt_entry['PrimWhse'] = config.prim_whse\n dmt_entry['SourceType'] = part_type\n\n return dmt_entry", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def _get_challenge_parts(self):\n query_result = db.session.query(ChallengePart). \\\n filter(ChallengePart.season_id == self.SEASON.id). \\\n order_by(ChallengePart.order.asc()). \\\n all()\n\n result = {}\n dist = 0\n for item in query_result:\n dist += item.distance\n result[dist] = item.target\n\n return result", "def generate_slot(slot_name, slot_description, slot_raw_filename):\n slot = {\n 'enumerationValues': [],\n \"name\": slot_name,\n \"description\": slot_description\n }\n slot_raw_vals = read_raw_vals(slot_raw_filename)\n for slot_val in slot_raw_vals:\n slot['enumerationValues'].append({'value': slot_val})\n\n return slot", "def get_resource_information():\n\n\n # the resources we are allowed to use is easy. We just copy this...\n resource_limit_dict = _resources_allowed_dict.copy()\n\n \n # from the other dict, we only take the resource information. (this omits\n # locks and timing information that isn't needed)\n\n # first, let's do the easy thing, the quantity resources. These are just \n # floats\n resource_use_dict = {}\n for resourcename in resource_constants.quantity_resources:\n resource_use_dict[resourcename] = _resources_consumed_dict[resourcename]\n\n # for the fungible resources (files opened, etc,), we only need a count...\n for resourcename in resource_constants.fungible_item_resources:\n resource_use_dict[resourcename] = len(_resources_consumed_dict[resourcename])\n\n # for the individual item resources (ports, etc,), we copy the set...\n for resourcename in resource_constants.individual_item_resources:\n resource_use_dict[resourcename] = _resources_consumed_dict[resourcename].copy()\n\n # and that's it!\n return (resource_limit_dict, resource_use_dict)", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def generate_empty_slots():\n return {\n 'colors': ['', '', '', '', ''],\n 'pets': ['', '', '', '', ''],\n 'beverages': ['', '', '', '', ''],\n 'cigarettes': ['', '', '', '', ''],\n 'nationality': ['', '', '', '', ''],\n 'numbers': ['', '', '', '', ''],\n }", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"bug\", \"beverage\", \"second_person_plural\", \n \"cot_caught\", \"rain_sun\", \"crawfish\", \"halloween\",\n \"sandwich\", \"side_road\", \"shoes\", \"highway\", \"yard_sale\",\n \"rubbernecking\", \"frosting\", \"lawyer\", \"kitty_corner\",\n \"firefly\", \"verge\", \"brew_thru\", \"water_fountain\"]", "def observation_space(self) -> Dict[str, Any]:", "def _odl_inventory(self):\n return {\n \"id\": self._id,\n \"hard-timeout\": self._hard_timeout,\n \"idle-timeout\": self._idle_timeout,\n \"table_id\": self._table_id,\n \"priority\": self._priority,\n \"instructions\": {\n \"instruction\": [self._instructions[i].odl_inventory(i) for i in range(len(self._instructions))]\n },\n \"match\": self._match.odl_inventory()\n }", "def _compute_offers_to_send(self) -> Dict[Tuple[float, float], float]:\n partial_asgt = self._neighbors_values.copy()\n offers = dict()\n\n for limited_asgt in generate_assignment_as_dict([self.variable, self._partner]):\n partial_asgt.update(limited_asgt)\n cost = self._compute_cost(**partial_asgt)\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\n f\"looking for offer : {partial_asgt} - cost {cost}\"\n f\" current {self.current_cost} {self._mode}\"\n )\n\n if (self.current_cost > cost and self._mode == \"min\") or (\n self.current_cost < cost and self._mode == \"max\"\n ):\n offers[(limited_asgt[self.name], limited_asgt[self._partner.name])] = (\n self.current_cost - cost\n )\n return offers", "def test_parse_AS_STRUCTURE_dict(self):\n \n true_tool = pybedtools.BedTool(\"chr10\\t127496045\\t127555714\\tENSMUSG00000040054\\t0\\t+\\n\", from_string=True)\n \n result, result_bed = parse_AS_STRUCTURE_dict(\"test\", clipper.test_dir())\n print str(result_bed)\n self.assertEqual(len(true_tool.intersect(result_bed)), 1)\n test_result = result[\"ENSMUSG00000040054\"]\n \n true_exons = {0:'127496045-127496082', \n 1:'127528690-127528832',\n 2:'127533494-127533579', \n 3:'127545949-127546087', \n 4:'127547810-127548404', \n 5:'127549637-127549823', \n 6:'127550518-127550737', \n 7:'127551389-127551839', \n 8:'127552080-127552141', \n 9:'127553116-127553225', \n 10:'127553361-127553463', \n 11:'127553602-127553813',\n 12:'127555610-127555714'}\n \n \n self.assertDictEqual(test_result[\"exons\"], true_exons)\n \n self.assertDictEqual(test_result[\"introns\"], {0 :'127496083-127528689', \n 1 :'127528833-127533493', \n 2 :'127533580-127545948', \n 3 :'127546088-127547809', \n 4 : '127548405-127549636',\n 5 :'127549824-127550517', \n 6 :'127550738-127551388', \n 7 :'127551840-127552079', \n 8 : '127552142-127553115', \n 9 : '127553226-127553360', \n 10 : '127553464-127553601', \n 11 :'127553814-127555609'}, \n \"introns not equal\")\n\n self.assertDictEqual(test_result[\"types\"], {0 : \"CE:\", \n 1 : \"CE:\", \n 2 : \"CE:\", \n 3 : \"CE:\",\n 4 : \"CE:\", \n 5 : \"CE:\", \n 6 : \"CE:\", \n 7 : \"CE:\", \n 8 : \"CE:\", \n 9 : \"CE:\", \n 10 : \"CE:\", \n 11 : \"CE:\", \n 12 : \"CE:\" }, \n \"types not equal\")\n\n self.assertEqual(test_result[\"tx_stop\"], 127555714)\n self.assertEqual(test_result[\"tx_start\"], 127496045)\n self.assertEqual(test_result[\"premRNA_length\"], 59670)\n self.assertEqual(test_result[\"mRNA_length\"], 2451)", "def _pack_items(self):\n identifiers = tuple(self.identify_items(self))\n cache_keys = self.make_cache_keys(identifiers)\n cache_items = dict(izip(cache_keys, self))\n self.cache.set_many(cache_items, self.cache_timeout)\n return identifiers", "def prepare_optimization(items,schedule,df_pred):\n itemblocks_to_produce = schedule[itemnames()].sum(0).to_dict()\n blocks_available = schedule.blockid.unique()\n block_order = pd.unique(schedule.blockid)\n forecasted_block_prices = df_pred['forecasted_price'].to_dict()\n actual_block_prices = df_pred['price'].to_dict()\n item_consumptions = items.set_index('item').consumption.to_dict()\n return(itemblocks_to_produce,blocks_available,forecasted_block_prices,\n actual_block_prices,item_consumptions,block_order)", "def get_perfect_information(self):\n state = {}\n state[\"chips\"] = [self.game.players[i].in_chips for i in range(self.player_num)]\n state[\"public_card\"] = (\n self.game.public_card.get_index() if self.game.public_card else None\n )\n state[\"hand_cards\"] = [\n self.game.players[i].hand.get_index() for i in range(self.player_num)\n ]\n state[\"current_round\"] = self.game.round_counter\n state[\"current_player\"] = self.game.game_pointer\n state[\"legal_actions\"] = self.game.get_legal_actions()\n return state", "def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):\n\n return {\n \"sessionAttributes\": session_attributes,\n \"dialogAction\": {\n \"type\": \"ElicitSlot\",\n \"intentName\": intent_name,\n \"slots\": slots,\n \"slotToElicit\": slot_to_elicit,\n \"message\": message,\n },\n }", "def map_part(data):\n dmt_data = {}\n for entry in data:\n working_part = Part(_map_part_base(entry))\n working_part.update(_map_part_plnt(entry))\n working_part.update(_map_part_revn(entry))\n\n dmt_data[working_part.PartNum] = working_part\n\n return dmt_data", "def infon_dict(self, node):\n raise NotImplementedError", "def required_slots(tracker):\n # type: () -> List[Text]\n\n return [\"suggestion\"]", "def avail_table_to_dict(avail_data):\n avail_target = avail_data[\"TARGETID\"]\n avail_loc = avail_data[\"LOCATION\"]\n avail = dict()\n for lid, tgid in zip(avail_loc, avail_target):\n if lid in avail:\n avail[lid].append(tgid)\n else:\n avail[lid] = list([tgid])\n avail = {f: np.array(av) for f, av in avail.items()}\n return avail", "def _process_resource(cls, resource):\n urn = resource['component_id']\n hrn, type = urn_to_hrn(resource['component_id'])\n\n resource['urn'] = urn\n resource['hrn'] = hrn\n\n resource['network_hrn'] = Xrn(resource['component_id']).authority[0] # network ? XXX\n\n # We also add 'facility' and 'testbed' fields\n resource['facility_name'] = cls.get_resource_facility_name(urn)\n resource['testbed_name'] = cls.get_resource_testbed_name(urn)\n\n if 'exclusive' not in resource:\n resource['exclusive'] = 'true'\n elif resource['exclusive'] is None:\n resource['exclusive'] = 'true'\n else:\n Log.warning(\"EXCLUSIVE = \",resource['exclusive'])\n\n #if 'location' in node:\n # if node['location']:\n # node['latitude'] = node['location']['latitude']\n # node['longitude'] = node['location']['longitude']\n # del node['location']\n #else:\n # if the location is not provided, aproximate it from the city\n t_urn = resource['urn'].split('+')\n city = t_urn[3].split('.')[1]\n if city == 'iii':\n city = 'Institute for Information Industry, Taïwan 106'\n resource['country'] = 'Taiwan'\n else:\n resource['country'] = 'France'\n location = cls.get_location(city)\n if location is not None:\n resource['latitude'] = str(location.latitude)\n resource['longitude'] = str(location.longitude)\n\n return resource", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def observation_spec(self) -> Dict[str, Any]:", "def gen_task_item(self) -> Dict[str, Any]:\n return {}", "def _electron_multiplier(self, hdr):\n d = {}\n d['em yield'], d['em background'], d['em deadtime'] = \\\n unpack(self._bo + 'd 2i', hdr.read(16))\n return d", "def get_slots(intent_request):\n return intent_request[\"currentIntent\"][\"slots\"]", "def get_student_item_dict(self):\n # pylint: disable=no-member\n return {\n \"student_id\": \"TURMAS_PROFESSORES2\",\n \"course_id\": self.block_course_id(),\n \"item_id\": \"TURMAS_PROFESSORES\",\n \"item_type\": \"TURMAS_PROFESSORES\",\n }", "def cluster_slots() -> Dict[str, Any]:\n # TODO: refactor tests to not use cli singleton auth.\n certs.cli_cert = certs.default_load(conf.make_master_url())\n authentication.cli_auth = authentication.Authentication(conf.make_master_url())\n r = api.get(conf.make_master_url(), \"api/v1/agents\")\n assert r.status_code == requests.codes.ok, r.text\n jvals = r.json() # type: Dict[str, Any]\n return {agent[\"id\"]: agent[\"slots\"].values() for agent in jvals[\"agents\"]}", "def _create_resource_consumption_dict():\n\n returned_resource_dict = {}\n\n # things that are quantities should start at 0.0\n for resource in resource_constants.quantity_resources:\n returned_resource_dict[resource] = 0.0\n\n for resource in resource_constants.item_resources:\n # double check there is no overlap...\n if resource in resource_constants.quantity_resources:\n raise InternalRepyError(\"Resource '\"+resource+\"' cannot be both quantity and item based!\")\n\n returned_resource_dict[resource] = set()\n\n # I need locks to protect races in accesses to some items...\n returned_resource_dict['fungible_locks'] = {}\n for init_resource in resource_constants.fungible_item_resources:\n returned_resource_dict['fungible_locks'][init_resource] = threading.Lock()\n\n returned_resource_dict['renewable_locks'] = {}\n for init_resource in resource_constants.renewable_resources:\n returned_resource_dict['renewable_locks'][init_resource] = threading.Lock()\n\n\n # I also need to track when the last update of a renewable resource occurred\n returned_resource_dict['renewable_update_time'] = {}\n\n # (Aside) JAC: I've thought about this and looked through the commit history.\n # I don't see any reason to initialize the renewable resources with the\n # current time (as was done before).\n for init_resource in resource_constants.renewable_resources:\n returned_resource_dict['renewable_update_time'][init_resource] = 0.0\n\n\n return returned_resource_dict", "def extract(net, parameters=None):\r\n if parameters is None:\r\n parameters = {}\r\n\r\n smap = {}\r\n\r\n for t in net.transitions:\r\n smap[t] = t.properties[STOCHASTIC_DISTRIBUTION]\r\n\r\n return smap", "def __getitem__(self, item: int) -> Dict[str, torch.Tensor]:\n\n return {\n \"inputs\": self.inputs[item],\n \"targets\": self.targets[item],\n }", "def __init__(self, entities):\n self._shape_to_ent = dict()\n self._ent_to_shapes = dict()\n for entity in entities:\n shapes = entity.shapes\n self._ent_to_shapes[entity] = shapes\n for shape in shapes:\n assert shape not in self._shape_to_ent, \\\n f\"shape {shape} appears in {entity} and \" \\\n f\"{self._shape_to_ent[shape]}\"\n self._shape_to_ent[shape] = entity", "def raw_data() -> Dict:\n return {\"neighbourhood\":\"Buttes-Montmartre\",\"room_type\":\"Entire home/apt\",\"minimum_nights\":1.555,\"mois\":2,\"voyageurs\":2.5,\"chambres\":1,\"lits\":1,\"salle_de_bains\":1}", "def generate_inventory(baremetal_info, server_info):\n\n hosts = defaultdict(list)\n hosts_meta = {}\n\n for node in baremetal_info:\n if node['Provisioning State'].lower() == 'active':\n role = re.findall('.*profile:(compute|control)', node['Properties']['capabilities'])[0]\n for server in server_info:\n if server['ID'] == node['Instance UUID']:\n node_ip = re.findall('.+=(\\d+.\\d+.\\d+.\\d+)$', server['Networks'])[0]\n hosts[role].append(node_ip)\n # To match ssh.cfg.j2 template\n hosts_meta[node_ip] = {'ansible_ssh_host': node_ip,\n 'ansible_user': 'heat-admin'}\n\n for host in hosts:\n hosts[host].sort()\n\n return {'hosts': hosts, 'hosts_meta': hosts_meta}", "def __getitem__(self, index):\n item_info = {\n \"ID\": self.ID[index], \n \"turn_id\": self.turn_id[index], \n \"turn_belief\": self.turn_belief[index], \n \"gating_label\": self.gating_label[index], \n \"context_plain\":self.dialog_history[index].split(), \n \"turn_uttr_plain\": self.turn_uttr[index], \n \"turn_domain\": self.turn_domain[index], \n \"generate_y\": [v.split() for v in self.generate_y[index]],\n \"slot_temp\": self.slot_temp\n }\n return item_info", "def get_range(self):\n classes = concrete_descendents(self.class_)\n d=dict([(name,class_) for name,class_ in classes.items()])\n if self.allow_None:\n d['None']=None\n return d", "def _dynamic_dict(example, src_field, tgt_field):\n\n src = src_field.tokenize(example[\"src\"])\n # make a small vocab containing just the tokens in the source sequence\n unk = src_field.unk_token\n pad = src_field.pad_token\n src_ex_vocab = Vocab(Counter(src), specials=[unk, pad])\n unk_idx = src_ex_vocab.stoi[unk]\n # Map source tokens to indices in the dynamic dict.\n src_map = torch.LongTensor([src_ex_vocab.stoi[w] for w in src])\n example[\"src_map\"] = src_map\n example[\"src_ex_vocab\"] = src_ex_vocab\n\n if \"tgt\" in example:\n tgt = tgt_field.tokenize(example[\"tgt\"])\n mask = torch.LongTensor(\n [unk_idx] + [src_ex_vocab.stoi[w] for w in tgt] + [unk_idx])\n example[\"alignment\"] = mask\n return src_ex_vocab, example", "def get_card_values(self, name: str, expansion: str) -> Dict:", "def get_material_mapping(self):\n return {name: self.get_material(name) for name in self.parts.keys()}", "def make_entity_dict(class_reference, template, partial_dict): \n _data = class_reference.properties()\n for _key in _data:\n _data[_key] = partial_dict.get(_key, template.get(_key, '')) \n return _data", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = mtrx.create_distance_matrix(mtrx.create_data()) \r\n data['demands'] = clean.demands\r\n # Each location has a demand corresponding to the quantity—for example, \r\n # weight or volume—of the item to be picked up.\r\n data['vehicle_capacities'] = capacity\r\n # Each vehicle has a capacity: the maximum quantity that the vehicle can hold. \r\n # As a vehicle travels along its route, the total quantity of the items it is carrying \r\n # can never exceed its capacity.\r\n data['num_vehicles'] = number\r\n data['depot'] = 0\r\n return data", "def armor(self):\n capacity = self._getAttribute(Attribute.armorCapacity)\n em = self._getAttribute(Attribute.armorEM)\n explosive = self._getAttribute(Attribute.armorExplosive)\n kinetic = self._getAttribute(Attribute.armorKinetic)\n thermal = self._getAttribute(Attribute.armorThermal)\n\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }", "def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out", "def _process_entity_map(entity_type, entity_map, normalizer):\n item_map = {}\n syn_map = {}\n seen_ids = []\n for item in entity_map.get(\"entities\"):\n cname = item[\"cname\"]\n item_id = item.get(\"id\")\n if cname in item_map:\n msg = \"Canonical name %s specified in %s entity map multiple times\"\n logger.debug(msg, cname, entity_type)\n if item_id:\n if item_id in seen_ids:\n msg = \"Item id {!r} specified in {!r} entity map multiple times\"\n raise ValueError(msg.format(item_id, entity_type))\n seen_ids.append(item_id)\n\n aliases = [cname] + item.pop(\"whitelist\", [])\n items_for_cname = item_map.get(cname, [])\n items_for_cname.append(item)\n item_map[cname] = items_for_cname\n for alias in aliases:\n norm_alias = normalizer(alias)\n if norm_alias in syn_map:\n msg = \"Synonym %s specified in %s entity map multiple times\"\n logger.debug(msg, cname, entity_type)\n cnames_for_syn = syn_map.get(norm_alias, [])\n cnames_for_syn.append(cname)\n syn_map[norm_alias] = list(set(cnames_for_syn))\n\n return {\"items\": item_map, \"synonyms\": syn_map}", "def __init__ (self):\n\n self.ships = {}\n self.fleetSize = 0", "def calculate_statistics(self) -> Dict[str, Tuple[str, float]]:\n tempDict = {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }\n\n\n\n return {\n 'max_start': ('', -1),\n 'max_end': ('', -1),\n 'max_time_low_availability': ('', -1),\n 'max_time_low_unoccupied': ('', -1)\n }" ]
[ "0.6995648", "0.6862953", "0.6558813", "0.64900076", "0.6489124", "0.64611995", "0.64086974", "0.63684666", "0.63684666", "0.6338538", "0.62612367", "0.6197755", "0.6197755", "0.5999761", "0.59434724", "0.59309", "0.588155", "0.58714616", "0.5779117", "0.5742074", "0.5714348", "0.57009083", "0.565323", "0.5623322", "0.5564382", "0.5521141", "0.5488658", "0.54579973", "0.5399381", "0.53929", "0.53667504", "0.5355532", "0.5304656", "0.5296335", "0.5275866", "0.52521706", "0.5203124", "0.51995194", "0.51745117", "0.51614696", "0.5150585", "0.5145178", "0.51265025", "0.5121717", "0.51176065", "0.5110914", "0.50926167", "0.50856227", "0.50844485", "0.50770617", "0.5073823", "0.5071795", "0.50508535", "0.50394213", "0.5035931", "0.50346774", "0.5034604", "0.50333554", "0.5033074", "0.50086623", "0.4999225", "0.49918318", "0.4984425", "0.4940035", "0.4927214", "0.4924667", "0.49210873", "0.49117404", "0.49105486", "0.48949784", "0.48920038", "0.48833847", "0.48722354", "0.4866376", "0.4866045", "0.48550022", "0.48437238", "0.4841362", "0.48407605", "0.48329893", "0.48189488", "0.48185408", "0.48120442", "0.48069444", "0.48055375", "0.47997728", "0.47965473", "0.47826117", "0.47798523", "0.47735214", "0.47716472", "0.47690657", "0.47686067", "0.47594267", "0.4751814", "0.47488973", "0.47458515", "0.47446135", "0.47430328", "0.4739148" ]
0.62104833
11
This function check weather a year is leap year or not.
def is_leap_year(year_): if year_ % 4 == 0: if year_ % 100 == 0: if year_ % 400 == 0: return True else: return False else: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_leap_year():", "def leapyear(year):\n\n # Return the answer\n return bool(calendar.isleap(year))", "def leapyear(year):\n\n\n if not year % 4 == 0:\n return False\n elif not year % 100 == 0:\n return False\n elif not year % 400 == 0:\n return False\n else:\n return True", "def is_leap_year(year):\n mod = divider(year)\n return mod(4) and not mod(100) or mod(400)", "def is_leap_year(year):\n if (year % 4) != 0:\n return False\n elif (year % 100) != 0:\n return True\n elif (year % 400) != 0:\n return False\n else:\n return True", "def is_leap_year(self):\n\n yr = self.year\n if not yr%4 == 0:\n return False\n elif not yr%100 == 0: #if divisible by 4 and not divisible by 100\n return True\n elif not yr%400 == 0: #if divisible by 4, divisible by 100 and not divisible 400\n return False\n else:\n return True", "def is_leap_year(year):\n\n # Standard Check\n if( year % 4 == 0):\n # Make sure it doesn't fit the exception \n if( (year % 100 == 0) and not (year % 400 == 0)):\n return False\n return True\n\n return False", "def _check_leap(year):\n\n return ((year % 4) == 0) and (not(((year % 100) == 0) and ((year % 400) != 0)))", "def is_leap_year(year):\n return ((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0)", "def is_leap_year(year):\n\n if year % 400 == 0:\n return True\n\n if year % 100 == 0:\n return False\n\n if year % 4 == 0:\n return True", "def is_leap(year):\n\n\n\n\n\n\n\n\n\n\n\treturn year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def isleap(year):\n if year%4 == 0:\n return True\n elif year%100 == 0:\n return False\n elif year%400 == 0:\n return True\n else:\n return False", "def is_leap_year(year: int) -> bool:\n return (year % 4 == 0) and ((year % 100 != 0) or (year % 400 == 0))", "def is_leap_year(year):\n return year % 400 == 0 or (year % 4 == 0 and not year % 100 == 0)", "def is_leap_year(year):\n if ((year % 4 == 0) and (year % 100 != 0)) or year % 400 == 0:\n return True\n else:\n return False", "def is_leap_year(a_year):\n if a_year % 4 == 0: \n if a_year % 100 == 0:\n if a_year % 400 == 0:\n return True\n else:\n return False\n else:\n return True\n else:\n return False", "def is_leap_year(year):\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def leap_year(year:int) -> bool:\n if(year % 4 == 0 and year % 100 == 0) or year % 400 == 0:\n return True\n else:\n return False", "def is_leap_year(year: int) -> bool:\n if year % 400 == 0 or (year % 100 != 0 and year % 4 == 0):\n return True\n return False", "def _is_leap(year):\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def is_leap_year(year):\n # Please note, this is bad.\n # The previous iteration is much better, I just wanted to see how\n # bad this would look if I put it in one line.\n return False if year % 4 != 0 else True if year % 100 != 0 else year % 400 == 0", "def leap_year(self):\n\n if self.time_stamp.year % 4 != 0:\n return False\n if self.time_stamp.year % 100 != 0:\n return True\n if self.time_stamp.year % 400 != 0:\n return False\n return True", "def is_leap_year(year):\n assert year >= 1583\n return ((year % 4 == 0) and year != 100) or (year % 400 == 0)", "def leap_year(year):\n if (year % 4) != 0: # not devisible by 4 \n return False\n\n if (year % 400) != 0 and (year % 100) == 0: # century but not divisible by 400\n return False\n \n return True", "def is_leap_year(self):\n if (self.year / 4).is_integer():\n return True\n return False", "def is_leap(year):\n\treturn year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def is_leap_year(self):\n if self.year % 400 == 0:\n return True\n elif self.year % 100 == 0:\n return False\n elif self.year % 4 == 0:\n return True\n return False", "def is_leap_year(self):\n if self.year % 400 == 0:\n return True\n elif self.year % 100 == 0:\n return False\n elif self.year % 4 == 0:\n return True\n return False", "def leap_year(self, year):\n\n\t\tif (year % 4 == 0):\n\t\t\tif (year % 100 == 0):\n\t\t\t\tif (year % 400 == 0):\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn True\n\n\t\treturn False", "def leapYear(year):\n leap = False\n if (year % 400 == 0 or year % 100 != 0 and year % 4 == 0):\n leap = True\n else:\n leap = False\n return leap", "def isleap(year):\n return year % 4 == 0", "def is_leap(year):\n if year % 4 == 0:\n if year % 100 == 0:\n if year % 400 == 0:\n return True\n else:\n return False\n else:\n return True\n else:\n return False", "def leapyear_checker(year):\n if calendar.isleap(year):\n print(\"{} is a leap year.\".format(year))\n return True\n else:\n print(\"{} is not a leap year.\".format(year))\n return False", "def leap_year(year: int) -> bool:\n return year % 4 == 0 and \\\n (year % 100 != 0 or year % 400 == 0)", "def is_leap(year):\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def isleapyear(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n if yr % 400 == 0: return True\n if yr % 100 == 0: return False\n if yr % 4 == 0: return True\n return False", "def isLeapYear( year):\n if (year % 400 == 0) :\n return True\n elif (year % 100 == 0) :\n return False\n elif (year % 4 == 0):\n return True\n else:\n return False", "def is_leap(year):\n if (year % 4 == 0):\n if (year % 100 == 0):\n if (year % 400 == 0):\n return True\n return False\n return True\n\n return False", "def isLeapYear(self):\n if self.year % 400 == 0: return True\n elif self.year % 100 == 0: return False\n elif self.year % 4 == 0: return True\n return False", "def is_leap(year):\n\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def is_leap(year):\n\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def is_leap(year):\n\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def is_leap(year):\n\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def is_leap_year(year_number: int) -> bool:\n if year_number % 4 == 0:\n if year_number % 100 == 0:\n if year_number % 400 == 0:\n return True\n else:\n return False\n return True\n else:\n return False", "def is_leap(year):\n try:\n year = int(year)\n if (year % 400 == 0) or (year % 4 == 0 and not year % 100 == 0):\n return True\n else:\n return False\n except TypeError:\n return ValueError(\"You did not enter a year as a number\")", "def is_leap_year(cls, year):\n return mod(year * cls.ARYA_SOLAR_YEAR - cls.ARYA_SOLAR_MONTH, cls.ARYA_LUNAR_MONTH) >= 23902504679/1282400064", "def is_leap(\n year # type: int\n ):\n # type: (...) -> bool\n\n if isinstance(year, int):\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)\n\n raise ValueError('Year must be integer.')", "def LeapYear(year):\n if year % 4:\t\t\t# doesn't divide by 4\n return False\n elif year % 100:\t\t# doesn't divide by 100\n return True\n elif year % 400:\t\t# doesn't divide by 400\n return False\n else:\n return True", "def is_leap(year):\n\tleap = False\n\n\tif (year % 4) == 0:\n\t\tif (year % 100) == 0:\n\t\t\tif (year % 400) == 0:\n\t\t\t\tleap = True\n\t\telse:\n\t\t\tleap = True\n\n\treturn leap", "def is_leap_year(year):\n if year % 4 == 0:\n if (year % 100) == 0:\n if (year % 400) == 0:\n # print(\"{0} is a leap year\".format(year))\n return True\n else:\n # print(\"{0} is not a leap year\".format(year))\n return False\n else:\n return True\n # print(\"{0} is a leap year\".format(year))\n else:\n return False\n # print(\"{0} is not a leap year\".format(year))", "def is_leap(year):\n if year % 4 == 0:\n if year % 100 == 0:\n if year % 900 in [200, 600]:\n return 3\n return 2\n return 1\n return 0", "def isLeapYear(year):\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)", "def leap_year(year):\n try:\n d = date(year, 2, 29)\n except ValueError:\n return f\"{year} is not a leap year.\"\n return f\"{d.year} is a leap year.\"", "def calculate_leap_year(year):\n if year % 400 == 0:\n print(year, \"is a leap year\")\n elif year % 4 == 0 and year % 100 != 0:\n print(year, \"is a leap year\")\n else:\n print(year, \"is not a leap year\")", "def is_leap_year(year):\n try:\n pattern = re.compile('([\\d]{4})')\n if pattern.match(str(year)):\n if not year % 4 and year % 100 or not year % 400:\n return True\n else:\n return False\n except ValueError:\n print('Four-digit integer not supplied.')", "def findingLeapYear (year):\n leapYear = False\n if year % 4 != 0:\n leapYear = False\n elif year % 100 != 0:\n leapYear = True\n elif year % 400 != 0:\n leapYear = False\n else:\n leapYear = True\n return leapYear", "def isLeapYear(year):\n if (year % 4 == 0):\n if (str(year)[-2:] == \"00\" and year % 400 != 0):\n return False\n return True\n else:\n return False", "def is_leap_year(n):\n if n % 400 == 0:\n return True\n elif n % 100 == 0:\n return False\n elif n % 4 == 0:\n return True\n else:\n return False", "def isLeapYear(year):\n\n year = int(float(year))\n chk = year % 4 #---- evry 4 yrs leap year\n chk2 = year % 100 #---- except every 100 years (e.g. 2100, 2200)\n chk3 = year % 400 #---- excpet every 400 years (e.g. 2000, 2400)\n\n val = 0\n if chk == 0:\n val = 1\n if chk2 == 0:\n val = 0\n if chk3 == 0:\n val = 1\n\n return val", "def LeapYear(self):\n if self.year is None:\n raise DateTimeError(\n \"Insufficient precision for leap year calculation\")\n if self.year % 4:\t\t\t# doesn't divide by 4\n return False\n elif self.year:\t\t\t# doesn't divide by 100\n return True\n elif self.century % 4: # doesn't divide by 400\n return False\n else:\n return True", "def isLeapYear(self):\n return (self._year % 4 == 0 and\n (self._year % 100 != 0 or self._year % 400 == 0))", "def is_leap_year(year: str) -> bool:\n assert len(year) == 4, \"year must be a string of length 4\"\n assert len(re.findall(\"\\d\", year)) == 4, \"year must be composed of digits\"\n return (int(year) % 4 == 0) and year[:-2] != \"00\"", "def test_leap_years_1899to1904_and_1999to2004():\n\tyears = [1899,1900,1901,1902,1903,1904,1999,2000,2001,2002,2003,2004]\n\tleaps = date_functions.is_leap( years )\n\tassert not leaps[0] # 1899 was not a leap year\n\tassert not leaps[1] # 1900 was not a leap year\n\tassert not leaps[2] # 1901 was not a leap year\n\tassert not leaps[3] # 1902 was not a leap year\n\tassert not leaps[4] # 1903 was not a leap year\n\tassert leaps[5] # 1904 was a leap year\n\tassert not leaps[6] # 1999 was not a leap year\n\tassert leaps[7] # 2000 was a leap year\n\tassert not leaps[8] # 2001 was not a leap year\n\tassert not leaps[9] # 2002 was not a leap year\n\tassert not leaps[10] # 2003 was not a leap year\n\tassert leaps[11] # 2004 was a leap year", "def is_valid_year(year):\n return 1750 <= year <= 2019", "def is_leap_cli(year: int) -> None:\n LOGGER.info(\"is_leap_cli({}) is {}\".format(year, is_leap(year)))", "def test_negative():\n assert is_leap_year(2010) is False", "def test_positive():\n assert is_leap_year(2016) is True", "def test_centenary_negative():\n assert is_leap_year(2100) is False", "def is_leap_year(self) -> Index:\n return Index(self.to_series().dt.is_leap_year)", "def check_leap_year():\n while True:\n # Taking user input for year and converting to int\n year_input = int(input(\"Please input a year: \"))\n\n # Checking the condition for Leap year\n leap_check = ((year_input % 4 == 0) and (year_input % 100 != 0)) or (year_input % 400 == 0)\n\n # Manipulating a string based on computation\n leap = \" NOT\"\n if leap_check:\n leap = \"\"\n leap_result = ' is' + str(leap) + ' a leap'\n\n # Printing output of the check\n print(\"The given year \" + str(year_input) + str(leap_result) + \" year\")\n print('--------------------')", "def leap(x: int):\n if x < 1582:\n return(\"The gregorian calendar wasn'n invented yet\")\n else:\n if x%100 == 0 and x%400 != 0:\n return false\n elif x%4 == 0:\n return True\n else:\n return False", "def _days_before_year(year):\n y = year - 1\n return y * 365 + y // 4 - y // 100 + y // 400", "def is_valid_birth_year(birth_year: int) -> bool:\n return birth_year.isnumeric() and 1920 <= int(birth_year) <= 2002", "def is_valid_year(year_range):\n\n if not year_range:\n return False\n\n if len(str(year_range)) != 8:\n return False\n\n year1 = year_range[:4]\n year2 = year_range[4:]\n\n try:\n if int(year2) - int(year1) == 1:\n if int(year1) <= int(get_current_hockey_year_start()):\n return True\n return False\n\n except Exception as e:\n print (\"inalid year passed\")\n print (str(e))\n print (traceback.print_exc())\n return False", "def is_valid_year(year_number):\n\n if (type(year_number) == int) and (START_YEAR <= year_number <= FINAL_YEAR):\n return True\n\n return False", "def test_centenary_positive():\n assert is_leap_year(2400) is True", "def valid_year(cls, new_year):\n if cls.MIN_YEAR <= new_year <= cls.MAX_YEAR:\n return True\n else:\n return False", "def validate_birth_year(birth_year: str) -> None:\n if not 1920 <= int(birth_year) <= 2002:\n raise ValueError(\"Birth year is outside permissible range\")", "def validate_exp_year(passport: map) -> bool:\n if passport.get('eyr'):\n if int(passport['eyr']) >= 2020 and int(passport['eyr']) <= 2030:\n return True\n\n return False", "def is_valid_issue_year(issue_year: int) -> bool:\n return issue_year.isnumeric() and 2010 <= int(issue_year) <= 2020", "def validate_birth_year(passport: map) -> bool:\n if passport.get('byr'):\n if int(passport['byr']) >= 1920 and int(passport['byr']) <= 2002:\n return True\n\n return False", "def is_valid_year_number(year_number: int) -> bool:\n if 0 <= int(year_number) < 100:\n return True\n return False", "def is_leap_valid(self, final_x, final_y) -> bool:\n return True", "def validate_issue_year(passport: map) -> bool:\n if passport.get('iyr'):\n if int(passport['iyr']) >= 2010 and int(passport['iyr']) <= 2020:\n return True\n\n return False", "def doy_leap(date=None):\n\n doy = date.timetuple().tm_yday\n year = date.timetuple().tm_year\n\n if year % 4 == 0:\n if doy > 60:\n doy -= 1\n\n return doy", "def is_valid_year_number(year_number: int) -> bool:\n if year_number in range(100):\n return True\n else:\n return False", "def test_invalid_beginning_of_year(self):\n year, month, day, error = clean_year_month_day(2014, 12, 31, 1)\n self.assertEqual(year, 2015)\n self.assertEqual(month, 1)\n self.assertEqual(day, 1)\n self.assertEqual(error, False)", "def date_is_valid(year, month, day):\r\n \r\n if (datetime.date(year, month <= 12, day <= 31)):\r\n return True\r\n\r\n else:\r\n return False", "def new_year(dacycle):\n\n this_year = dacycle['time.start'].year\n prev_year = (dacycle['time.start']-dacycle['cyclelength']).year\n\n return (this_year != prev_year)", "def is_valid_expiration_year(expiration_year: int) -> bool:\n return expiration_year.isnumeric() and 2020 <= int(expiration_year) <= 2030", "def calculate_day_of_year(date):\n day_of_year = date.timetuple().tm_yday\n if (is_leap_year(date.year) and \\\n date > datetime.datetime(date.year, 2, 28)):\n day_of_year -= 1\n \n return day_of_year", "def leapCheck(n):\n \n if n % 400:\n return True\n elif n % 100:\n return False\n elif n % 4:\n return True", "def test_convert_date_to_year(self):\n # TODO there might be a more robust way to write this with try except statements.", "def closeyear(year):\n\n # Return the specific year\n return int(year % 4)", "def validate_issue_year(issue_year: str) -> None:\n if not 2010 <= int(issue_year) <= 2020:\n raise ValueError(\"Issue year is outside permissible range\")", "def test_leap_years(self):\n\n dates1 = (\n datetime.date(2000, 1, 29),\n datetime.date(2004, 1, 29),\n datetime.date(2008, 1, 29),\n datetime.date(2012, 1, 29),\n datetime.date(2016, 1, 29),\n datetime.date(2020, 1, 29),\n datetime.date(2024, 1, 29),\n )\n\n dates2 = (\n datetime.date(2000, 2, 29),\n datetime.date(2004, 2, 29),\n datetime.date(2008, 2, 29),\n datetime.date(2012, 2, 29),\n datetime.date(2016, 2, 29),\n datetime.date(2020, 2, 29),\n datetime.date(2024, 2, 29),\n )\n\n for date1, date2 in zip(dates1, dates2):\n self.assertTrue(self.expander.is_same_date_month_ahead(date1, date2))", "def calendar_days(year: int | float | np.ndarray) -> np.ndarray:\n # Rules in the Gregorian calendar for a year to be a leap year:\n # divisible by 4, but not by 100 unless divisible by 400\n # True length of the year is about 365.2422 days\n # Adding a leap day every four years ==> average 365.25\n # Subtracting a leap year every 100 years ==> average 365.24\n # Adding a leap year back every 400 years ==> average 365.2425\n # Subtracting a leap year every 4000 years ==> average 365.24225\n m4 = (year % 4)\n m100 = (year % 100)\n m400 = (year % 400)\n m4000 = (year % 4000)\n # find indices for standard years and leap years using criteria\n if ((m4 == 0) & (m100 != 0) | (m400 == 0) & (m4000 != 0)):\n return np.array(_dpm_leap, dtype=np.float64)\n elif ((m4 != 0) | (m100 == 0) & (m400 != 0) | (m4000 == 0)):\n return np.array(_dpm_stnd, dtype=np.float64)", "def calendar_days(year: int | float | np.ndarray) -> np.ndarray:\n # Rules in the Gregorian calendar for a year to be a leap year:\n # divisible by 4, but not by 100 unless divisible by 400\n # True length of the year is about 365.2422 days\n # Adding a leap day every four years ==> average 365.25\n # Subtracting a leap year every 100 years ==> average 365.24\n # Adding a leap year back every 400 years ==> average 365.2425\n # Subtracting a leap year every 4000 years ==> average 365.24225\n m4 = (year % 4)\n m100 = (year % 100)\n m400 = (year % 400)\n m4000 = (year % 4000)\n # find indices for standard years and leap years using criteria\n if ((m4 == 0) & (m100 != 0) | (m400 == 0) & (m4000 != 0)):\n return np.array(_dpm_leap, dtype=np.float64)\n elif ((m4 != 0) | (m100 == 0) & (m400 != 0) | (m4000 == 0)):\n return np.array(_dpm_stnd, dtype=np.float64)", "def _set_year(self, year) -> bool:\n if self.set_start_year(year) is False:\n return False\n return self.set_finish_year(year)", "def isCurrentYear(self):\n t = time()\n return safegmtime(t + _tzoffset(self._tz, t))[0] == self._year" ]
[ "0.9214601", "0.8924297", "0.8878337", "0.8866551", "0.8856028", "0.8848545", "0.88234866", "0.88232195", "0.88118976", "0.88081586", "0.8797112", "0.87938344", "0.8793488", "0.8789037", "0.878295", "0.87800264", "0.8750441", "0.87445366", "0.87398547", "0.8735187", "0.8731056", "0.87252426", "0.8723542", "0.8699367", "0.8696404", "0.8694758", "0.86869866", "0.86869866", "0.86678797", "0.8666432", "0.8649143", "0.8648455", "0.864813", "0.86409557", "0.8620166", "0.8616595", "0.8599985", "0.8593351", "0.85789305", "0.8572128", "0.8572128", "0.8572128", "0.8572128", "0.8570611", "0.85490495", "0.85165995", "0.850113", "0.85003245", "0.8495811", "0.84706587", "0.8463127", "0.84451056", "0.8415532", "0.84151185", "0.8361463", "0.83351064", "0.83260024", "0.830984", "0.830281", "0.82988775", "0.8280508", "0.82364404", "0.7654753", "0.75668854", "0.7451703", "0.74086976", "0.7374138", "0.7278526", "0.7267829", "0.7229253", "0.72024983", "0.7192399", "0.7121591", "0.7081099", "0.7063076", "0.7039521", "0.6855405", "0.6853541", "0.68411565", "0.68196774", "0.6767818", "0.67583454", "0.6750795", "0.6738274", "0.6671084", "0.665204", "0.6637234", "0.66161084", "0.6581426", "0.6515881", "0.6511217", "0.6496202", "0.6418584", "0.64124215", "0.6407418", "0.6405395", "0.63685864", "0.63685864", "0.6348741", "0.6272444" ]
0.8835978
6
Make RPC call to 'Envoy Controller' to add target URLs to envoy datapath.
def activate_he_urls_for_ue( ip: IPAddress, rule_id: str, urls: List[str], imsi: str, msisdn: str, ) -> bool: try: chan = ServiceRegistry.get_rpc_channel( SERVICE_NAME, ServiceRegistry.LOCAL, ) except grpc.RpcError: logging.error('Cant get RPC channel to %s', SERVICE_NAME) return False client = EnvoyControllerStub(chan) try: headers = [Header(name=IMSI_HDR, value=imsi)] if msisdn: headers.append(Header(name=MSISDN_HDR, value=msisdn)) he_info = AddUEHeaderEnrichmentRequest( ue_ip=ip, rule_id=rule_id, websites=urls, headers=headers, ) ret = client.AddUEHeaderEnrichment(he_info, timeout=TIMEOUT_SEC) return ret.result == AddUEHeaderEnrichmentResult.SUCCESS except grpc.RpcError as err: logging.error( "Activate HE proxy error[%s] %s", err.code(), err.details(), ) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_url(path, controller_ip=DNAC):\n print(\"3\")\n return \"https://%s:%s/api/v1/%s\" % (controller_ip, DNAC_PORT, path)", "def register():\n\n print(\"Request: \", request)\n print(\"foo: \", request.app.ep_mapping)\n print(json.load(request.body))\n endpoint_details = json.load(request.body)\n print(endpoint_details)\n\n # Here we want to start an executor client.\n # Make sure to not put anything into the client, until after an interchange has\n # connected to avoid clogging up the pipe. Submits will block if the client has\n # no endpoint connected.\n endpoint_id = str(uuid.uuid4())\n fw = spawn_forwarder(request.app.address, endpoint_id=endpoint_id)\n connection_info = fw.connection_info\n ret_package = {'endpoint_id': endpoint_id}\n ret_package.update(connection_info)\n print(\"Ret_package : \", ret_package)\n\n print(\"Ep_id: \", endpoint_id)\n request.app.ep_mapping[endpoint_id] = ret_package\n return ret_package", "def serve(config, port, address, secret, update_org, update_older_than,\n periodic_interval, payload_log):\n replicategithub.webhook.serve(\n config.get_manager(),\n secret=secret,\n listen=(address, port),\n periodic_interval=periodic_interval,\n update_orgs=update_org,\n update_older_than=update_older_than,\n payload_log=payload_log)", "def paths_allow_service(ctx, network, destination, source, port):\n source_service = get_service_for_cli(ctx, network, source)\n destination_service = get_service_for_cli(ctx, network, destination)\n ctx.obj['CLIENT'].paths.add(source_service, destination_service, port)\n click.echo('Added path from %s to %s in network %s for port %s' % (source, destination,\n network, port))", "def mutate_urls(self, mutator):", "def request(flow):\n flow.count = context.count\n context.count += 1\n context.hosts_list.add(flow.request.host)\n context.locusts.add(flow.request.host, flow)", "def register_traffic(self, intent):\n servers = collections.defaultdict(list)\n clients = collections.defaultdict(list)\n _trules = []\n for rule in intent:\n srchost = self.get_ep_host(rule['src'])\n dsthost = self.get_ep_host(rule['dst'])\n\n if not srchost:\n log.error(\"No host found for running traffic from IP : %s\",\n rule['src'])\n continue\n elif not dsthost:\n log.error(\"No host found for running traffic from IP : %s\",\n rule['dst'])\n continue\n\n servers[dsthost].append(rule)\n clients[srchost].append(rule)\n\n trule = self.create_traffic_rule(rule)\n _trules.append(trule)\n\n # Register at endpoint and create local representation.\n if config.get_param('TRAFFIC_START_SERVERS_FIRST'):\n # Start Servers first and then Clients.\n host_rules_map = [servers, clients]\n else:\n # Start Servers / Clients in single call.\n # May result in some cool off time required before the\n # traffic settles.\n for host, rules in clients.items():\n servers[host].extend(rules)\n host_rules_map = [servers]\n\n def _register_traffic_rules(host, rules):\n with LydianClient(host) as dclient:\n dclient.controller.register_traffic(rules)\n\n # Start Server before the client.\n for host_rules in host_rules_map:\n collection = [(host, (host, rules), {})\n for host, rules in host_rules.items()]\n ThreadPool(_register_traffic_rules, collection)\n\n self.rules_app.add_rules(_trules) # Persist rules to local db", "def translate_to_rpc(self, rpcActuatorControlTarget):\n\n \n \n \n rpcActuatorControlTarget.group = self.group\n \n \n \n \n \n for elem in self.controls:\n rpcActuatorControlTarget.controls.append(elem)", "def addTargets(self, **kwargs):\n self.targets.update(kwargs)\n for key, val in kwargs.items():\n self.namespace[key] = val", "def add(self, urls):\n path = \"authSettings/exemptedUrls?action=ADD_TO_LIST\"\n return self._session.post(path, urls)", "def to_requests(self):\n pass", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def remotes():", "def index():\n endpoints = []\n for api_endpoint in app.url_map.iter_rules():\n if api_endpoint.rule.startswith('/api'):\n url = api_endpoint.rule\n methods = api_endpoint.methods\n endpoints.append((url, str(methods)))\n return jsonify(endpoints)", "def to_service_server(allowed_paths):\n\n if allowed_paths is not None:\n # 308 - Permanent Redirect, but preserve the content of your request\n return redirect(\"http://\"+ ip + \":5007/service/%s\" % allowed_paths, code=308)\n # 301 - Moved Permanently \n return redirect(\"http://\"+ ip + \":5007/service/\", code=301)", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file_before()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2))\n\n config = self.create_config_file_after()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2))", "def _serve(self) -> None:\n for instrument in self._config[\"instruments\"]:\n uri = self._daemon.register(instrument, objectId=str(instrument))\n self._services[instrument.id] = str(uri)\n logger.success(f\"Registered {instrument} at {uri}\")\n self.uri = self._daemon.register(self, objectId=self.servername)\n logger.success(f\"Registered self at {self.uri}\")", "def setup(hass, config):\n hass.http.register_view(APIAIWebhookView)\n return True", "def add_target(self, target):\n\n # pass specified target parameters to the PED-RPC server.\n target.pedrpc_connect()\n target.set_fuzz_data_logger(fuzz_data_logger=self._fuzz_data_logger)\n\n # add target to internal list.\n self.targets.append(target)", "def submit_urls(args):\n params = {\n 'api_key': API_KEY,\n 'url': args.get('url')\n }\n markdown = ''\n r = req('POST', SUB_API + 'samples', params=params)\n res = r.json()['data']\n markdown += tableToMarkdown('Threat Grid - URL Submission', res)\n results = CommandResults(\n readable_output=markdown,\n outputs_prefix='Threatgrid.SearchResult',\n outputs_key_field='Info',\n outputs=res\n )\n return results", "def add_target(self, target):\n # type: (LoadBalancerTarget) -> List[BoundAction]\n return self._client.add_target(self, target)", "def endpoint(self, url):\n new_script = {\"url\": url}\n return self.connection.post(self.service + \"/url\", json.dumps(new_script))", "def add_domain_routes(app):\n\n @app.route(\"/v1/list_agencies/\", methods=[\"GET\"])\n @get_dabs_sub_tier_agencies\n def list_agencies(cgac_sub_tiers, frec_sub_tiers):\n \"\"\" Get all agencies the current user has DABS access to.\n Args:\n cgac_sub_tiers - List of all CGAC SubTierAgencies generated by the get_dabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has DABS permissions for\n frec_sub_tiers - List of all FREC SubTierAgencies generated by the get_dabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has DABS permissions for\n \"\"\"\n return JsonResponse.create(StatusCode.OK, get_accessible_agencies(cgac_sub_tiers, frec_sub_tiers))\n\n @app.route(\"/v1/list_all_agencies/\", methods=[\"GET\"])\n def list_all_agencies():\n \"\"\" List all CGAC and FREC Agencies \"\"\"\n return JsonResponse.create(StatusCode.OK, get_all_agencies())\n\n @app.route(\"/v1/list_sub_tier_agencies/\", methods=[\"GET\"])\n @get_fabs_sub_tier_agencies\n def list_sub_tier_agencies(sub_tier_agencies):\n \"\"\" List all Sub-Tier Agencies user has FABS permissions for\n Args:\n sub_tier_agencies - List of all SubTierAgencies generated by the get_fabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has FABS permissions for\n \"\"\"\n return JsonResponse.create(StatusCode.OK, organize_sub_tier_agencies(sub_tier_agencies))", "def http_ftp_activity(self, list_pc, t_min = 2, t_max = 5, path_url = \"./config_files/client/\"):\n for pc in list_pc:\n container = pc[\"properties\"][\"container_id\"]\n self.dm.copy_to_docker(\"./config_files/client/requests_urls.sh\", container)\n self.dm.copy_to_docker(\"./config_files/client/kill_urls.sh\", container)\n self.dm.copy_to_docker(path_url+\"url.txt\", container)\n self.dm.exec_to_docker(container, \"ash requests_urls.sh \"+str(t_min)+\" \"+str(t_max)+\" url.txt\",isdetach=True)\n pass", "def addEndpoints(self, endpoints):\n self.endpoints.extend(endpoints)\n self._connectOrBind(endpoints)", "def getURLs():", "def trigger(self, _result, pod, routes, *_args, **_kwargs):\n routes.add('/_grow/routes', router.RouteInfo('console', {\n 'handler': RoutesDevHandlerHook.serve_routes,\n }))", "def _send_to_endpoint(self, events):\n raise NotImplementedError('Please implement _send_to_endpoint().')", "def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))", "def OnAdd(self, controller):\n pass", "def api_apply(self, name, upstream_url, hosts=None, uris=None,\n methods=None, strip_uri=False, preserve_host=False):\n\n if hosts is None and uris is None and methods is None:\n raise ValueError('Need at least one of hosts, uris or methods.')\n\n if name is None:\n raise ValueError('name needs to be specified.')\n\n if upstream_url is None:\n raise ValueError('upstream_url needs to be specified.')\n\n data = {\n 'name': name,\n 'upstream_url': upstream_url,\n 'strip_uri': strip_uri,\n 'preserve_host': preserve_host,\n 'hosts': hosts\n }\n\n if hosts is not None:\n # Kong API expects comma-separated values\n if isinstance(hosts, list):\n hosts = ','.join(hosts)\n\n data['hosts'] = hosts\n\n if uris is not None:\n # Kong API expects comma-separated values\n if isinstance(uris, list):\n uris = ','.join(uris)\n\n data['uris'] = uris\n\n if methods is not None:\n # Kong API expects comma-separated values\n if isinstance(methods, list):\n methods = ','.join(methods)\n\n data['methods'] = methods\n\n # check if the API is already defined in Kong\n if self.api_get(name):\n # patch the resource at /apis/{name}\n r = self._patch(['apis', name], data=data)\n else:\n # post new API to the root of /apis\n r = self._post('apis', data)\n\n return r", "def main():\n\n addon_url = sys.argv[0]\n addon_handle = int(sys.argv[1])\n addon_args = urlparse.parse_qs(sys.argv[2][1:])\n\n # Route request to action.\n Plugin(addon_url, addon_handle, addon_args).route()", "def fusion_api_add_proxy_server(self, body, api=None, headers=None):\n return self.proxyserver.add(body, api, headers)", "def post(self):\n return getServiceListMethod(self)", "def register(self, wsgi_app):\n wsgi_app.add_url_rule(\n rule=self.path,\n view_func=self.controller,\n methods=self.methods)", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))", "def execute(self, targets):", "def urls(self, urls):\n\n self._urls = urls", "def nremote(self):", "def test_bulk_observable_refang_add(self):\n observables = [\"hxxp://{}{}.com\".format(_random_domain(), i) for i in range(20)]\n info = self.api.observable_bulk_add(observables, ['bulk'])\n self.assertEqual(len(info), 20)\n for url in info:\n self.assertIn('http://', url['value'])", "def refresh_urls(environ, start_response):\n store = environ['tiddlyweb.store']\n config = environ['tiddlyweb.config']\n \n register_urls(store, config)\n \n start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])\n return 'All URLs have been updated'", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_update_action(self):\n pass", "def main():\n\n context = yield from Context.create_client_context()\n\n yield from asyncio.sleep(2)\n\n payload = b\"0\"\n request = Message(code=PUT, payload=payload)\n request.opt.uri_host = '192.168.3.2'\n request.opt.uri_path = (\"nodes\", \"48102\", \"humidity\")\n\n response = yield from context.request(request).response\n\n print('Result: %s\\n%r'%(response.code, response.payload))", "def post_list(self, request, **kwargs):\n response = super(BaseCorsResource, self).post_list(request, **kwargs)\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Expose-Headers'] = 'Location'\n return response", "def set_remote_locations(locations: list[RemoteLocation]) -> dict:\n return {\n \"method\": \"Target.setRemoteLocations\",\n \"params\": {\"locations\": [l.to_json() for l in locations]},\n }", "def register(self, target, hostname, listener_type, expire=-1):", "def test_add_url_rule():\n\n application_services.add_url_rule('/tests/application/rule', view_func=mock_view_function,\n methods=HTTPMethodEnum.GET)", "def index(self, *args):\n # Get BioScript Server url (usually from config file)\n bs_server_url = tg.config.get('main.proxy') + '/'\n\n # Fetch the list of operations\n # (if you have a defined service, add your key in the request body to fetch\n # only operations you defined)\n bs_url = bs_server_url + 'plugins?ordered=true'\n operation_list = urllib2.urlopen(bs_url).read()\n\n # define other parameters\n # bs_server url\n validation_url = url('/devs/validation')\n get_url = url('/devs/fetch')\n return {'oplist': operation_list, 'bs_serv_url': bs_server_url,\n 'validation_url': validation_url, 'gurl': get_url}", "def Links(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def build_call(*args):\n call = 'http://atlas.media.mit.edu/'\n for val in args:\n call += str(val) + '/'\n return call", "def __init__(self, service_index_url = DEFAULT_SERVICE_INDEX_URL): \n self.__fetch_base_urls(service_index_url)", "def _construct_endpoints(self):\n # Functions\n async def get_function_list_data(request: web.Request):\n entrypoints = [elm.to_dict() for elm in self._function_manager.definitions.values()]\n return web.json_response(entrypoints)\n\n async def get_function_list_text(request: web.Request):\n rows = []\n for definition in self._function_manager.definitions.values():\n rows.append(definition.function_name)\n rows.append(' URL:')\n rows.append(f' async api: /{definition.function_name}')\n rows.append(f' block api: /{definition.function_name}/keep-connection')\n rows.append(f' Max Concurrency: {definition.max_concurrency}')\n rows.append(' Description:')\n rows.append(f' {definition.description}')\n if len(definition.arg_definitions) == 0:\n rows.append(' No Args')\n else:\n rows.append(' Args')\n for arg in definition.arg_definitions:\n rows.append(f' {arg.name} {arg.type.name} {\"Requiered\" if arg.is_required else \"NOT-Required\"}')\n if arg.description != '':\n rows.append(f' {arg.description}')\n rows.append(f' Timeout: {definition.timeout} sec')\n rows.append('\\n')\n\n return web.Response(text='\\n'.join(rows))\n\n # function\n async def get_function_definition(request: web.Request):\n function_name = request.match_info['function_name']\n\n if function_name not in self._function_manager.definitions:\n raise web.HTTPNotFound()\n\n return web.json_response(self._function_manager.definitions[function_name].to_dict())\n\n async def get_function_running_count(request: web.Request):\n function_name = request.match_info['function_name']\n\n ret = self._function_manager.get_current_number_of_execution(function_name)\n if ret is None:\n raise web.HTTPNotFound()\n\n return web.json_response(ret)\n\n # Tasks\n async def get_task_info(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n\n return web.json_response(task_info.to_dict())\n\n async def get_task_done(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n\n return web.json_response(task_info.is_done())\n\n async def get_task_result(request: web.Request):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n\n task_info = self._function_manager.get_task_info(task_id)\n if task_info is None:\n raise web.HTTPNotFound()\n return web.json_response(task_info.result)\n\n async def get_task_list(request: web.Request):\n if 'function_name' not in request.match_info:\n raise web.HTTPBadRequest()\n\n function_name = request.match_info['function_name']\n\n tasks = self._function_manager.list_task_info(function_name)\n if tasks is None:\n raise web.HTTPNotFound()\n\n return web.json_response([elm.to_dict() for elm in tasks])\n\n # Termination\n async def post_terminate_function(request: web.Request):\n if 'function_name' not in request.match_info:\n raise web.HTTPBadRequest()\n\n function_name = request.match_info['function_name']\n\n self._function_manager.terminate_function(function_name)\n return web.json_response({})\n\n async def post_terminate_task(request: web.Request, task_id: str):\n if 'task_id' not in request.match_info:\n raise web.HTTPBadRequest()\n\n task_id = request.match_info['task_id']\n self._function_manager.terminate_task(task_id)\n\n return web.json_response({})\n\n api_list = [\n web.get('/function/list/data', get_function_list_data),\n web.get('/function/list/text', get_function_list_text),\n web.get(r'/function/definition/{function_name}', get_function_definition),\n web.get(r'/function/running-count/{function_name}', get_function_running_count),\n web.get(r'/task/info/{task_id}', get_task_info),\n web.get(r'/task/done/{task_id}', get_task_done),\n web.get(r'/task/result/{task_id}', get_task_result),\n web.get(r'/task/list/{function_name}', get_task_list),\n web.post(r'/terminate/function/{function_name}', post_terminate_function),\n web.post(r'/terminate/task/{task_id}', post_terminate_task),\n ]\n\n async def index(request: web.Request):\n return web.Response(text='\\n'.join([elm.path for elm in api_list])+'\\n')\n\n self._app.add_routes([*api_list, web.get('/', index)])", "def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url", "def cli(loop, aiohttp_client, known_domain_data):\n app = web.Application()\n\n async def get_handler(request):\n return web.json_response(known_domain_data)\n\n async def bad_get_handler(request):\n return web.json_response(\n {'errors': [{'code': '50004', 'detail': 'URL is not found.'}]},\n status=500\n )\n\n async def post_handler(request):\n json_data = await request.json()\n response_dict = known_domain_data\n response_dict.update(json_data)\n if request.query:\n response_dict['query_args'] = dict(request.query)\n return web.json_response(response_dict)\n\n async def put_handler(request):\n json_data = await request.json()\n response_dict = known_domain_data\n response_dict.update(json_data)\n if request.query:\n response_dict['query_args'] = dict(request.query)\n return web.json_response(response_dict)\n\n app.router.add_get(path='/cli-test', handler=get_handler)\n app.router.add_post(path='/cli-test', handler=post_handler)\n app.router.add_put(path='/cli-test', handler=put_handler)\n app.router.add_get(path='/cli-test-bad', handler=bad_get_handler)\n\n return loop.run_until_complete(aiohttp_client(app))", "def _RemoteSend(self, request, response, method, request_id=None):\n tag = self.project_id\n self._maybeSetDefaultAuthDomain() \n user = users.GetCurrentUser()\n if user != None:\n tag += \":\" + user.email()\n tag += \":\" + user.nickname()\n tag += \":\" + user.auth_domain()\n api_request = remote_api_pb.Request()\n api_request.set_method(method)\n api_request.set_service_name(\"datastore_v3\")\n api_request.set_request(request.Encode())\n if request_id is not None:\n api_request.set_request_id(request_id)\n\n if POOL_CONNECTIONS:\n api_response = self._request_with_pool(api_request, tag)\n else:\n api_response = self._request_from_sandbox(api_request, tag)\n\n if api_response.has_application_error():\n error_pb = api_response.application_error()\n logging.error(error_pb.detail())\n raise apiproxy_errors.ApplicationError(error_pb.code(),\n error_pb.detail())\n\n if api_response.has_exception():\n raise api_response.exception()\n\n response.ParseFromString(api_response.response())", "def get_urls(self):\n info = self.model._meta.app_label, self.model._meta.model_name\n\n return super().get_urls() + [\n path(\"ajax\", self.callback, name=\"%s_%s_ajax\" % info),\n path(\"layer\", self.get_layer, name=\"%s_%s_layer\" % info)\n ]", "def _create_controller(main_controller, action_controller_list):\n controller = server.wsgi.Resource(main_controller())\n for ctl in action_controller_list:\n controller.register_actions(ctl())\n return controller", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--tg_name', required=True,\n help='specify target group name', type=str)\n parser.add_argument('--gwlb_name', required=True,\n help='specify gateway load balancer name', type=str)\n parser.add_argument('--vpc_id', required=True,\n help='specify vpc id', type=str)\n parser.add_argument('--subnet_ids', nargs='+', required=True,\n help='specify subnet ids')\n parser.add_argument('--target_ids', nargs='+', required=True,\n help='specify target ids')\n\n args = parser.parse_args()\n ############################\n # Define script variables:\n ############################\n tg_name = args.tg_name\n gwlb_name = args.gwlb_name\n vpc_id = args.vpc_id\n subnet_ids = args.subnet_ids\n target_ids = args.target_ids\n\n tg1_args = {\n 'name': tg_name,\n 'protocol': 'GENEVE',\n 'port': 6081,\n 'healthchkproto': 'HTTP',\n 'healthchkport': '80',\n 'healthchkpath': '/',\n 'vpc_id': vpc_id,\n 'type': 'instance'\n }\n #############################\n # Target Group:\n tg1 = create_tg(**tg1_args)\n print(f\"TG ARN: {tg1[1]}\")\n # GWLB:\n gwlb1 = create_gwlb(gwlb_name, subnet_ids)\n print(f\"GWLB ARN: {gwlb1[1]}\")\n # Listener:\n listener1 = create_fwd_listener(gwlb1[1], tg1[1])\n print(f\"LISTENER ARN: {listener1[1]}\")\n # Register Targets:\n register_targets(tg1[1], target_ids[0])", "def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n time.sleep(2)\n\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))", "def _upload_outputs(outputs: Dict[str, str]) -> None:\n for name, endpoint in outputs.items():\n output_file = _BASE_PATH / name / 'data.json'\n target_url = '{}/data.json'.format(endpoint)\n with output_file.open('rb') as f:\n requests.put(target_url, data=f.read())", "def put_list(self, request, **kwargs):\r\n response = super(BaseCorsResource, self).put_list(request, **kwargs)\r\n return self.add_cors_headers(response, True)", "def send_accept_component_request(self, method, target_service_obj, \\\n comp_list):\n headers = {}\n create_connection = False\n conn_timeout = float(self.conf.get('conn_timeout', 10))\n node_timeout = int(self.conf.get('node_timeout', 10))\n self.logger.debug(\"Entering connect_target_node method\")\n filesystem = 'export'\n directory = 'OSP_01'\n path = '/recovery_process/'\n #Parameters are fixed, could be changes according to requirement.\n headers['Expect'] = '100-continue'\n headers['X-Timestamp'] = time.time()\n headers['Content-Type'] = 'text/plain'\n try:\n #header_content_key = (self.node['ip'], self.node['port'])\n Content_length = len(str(comp_list))\n headers['Content-Length'] = Content_length \n self.logger.info(\"Header Sent:%s, ip:%s, port:%s, filesystem:%s,\"\n \" directory:%s, path:%s method:%s\" %(headers, \\\n target_service_obj.get_ip(), target_service_obj.get_port(), \\\n filesystem, directory, path, method))\n\n #Creating http connection to target node\n with ConnectionTimeout(conn_timeout):\n conn = http_connect(\n target_service_obj.get_ip(), target_service_obj.get_port(),\\\n filesystem, directory, method, path, headers)\n with Timeout(node_timeout):\n resp = conn.getexpect()\n\n if resp.status == HTTP_CONTINUE:\n conn.resp = None\n self.logger.info(\"HTTP continue %s\" % resp.status)\n create_connection = True\n elif is_success(resp.status):\n conn.resp = resp\n self.logger.info(\"Successfull status:%s\" % resp.status)\n create_connection = True\n elif resp.status == HTTP_INSUFFICIENT_STORAGE:\n self.logger.error('ERROR Insufficient Storage' \\\n 'ip:%s, port:%s' %(target_service_obj.get_ip(), \\\n target_service_obj.get_port()))\n create_connection = False\n self.check_transfer_component_map[target_service_obj] = \"Failed\"\n except (Exception, Timeout) as err:\n self.logger.exception(\n \"Exception occured: %s during http connect id :%s, \\\n Expected: 100-continue\" % (err, target_service_obj.get_id()))\n create_connection = False\n self.check_transfer_component_map[target_service_obj] = \"Failed\"\n\n # sending component list to target node over http connection\n if create_connection:\n conn.reader = str(comp_list)\n self.logger.info(\"Sending component List: %s\" % \\\n conn.reader)\n try:\n with ChunkWriteTimeout(node_timeout):\n conn.send(conn.reader)\n conn.send_data = True\n except (Exception, ChunkWriteTimeout) as err:\n self.logger.error('Exception occured : %s at id : %s \\\n info: send file failed' %(target_service_obj.get_id(), err))\n conn.send_data = False\n self.check_transfer_component_map[target_service_obj] = \\\n \"Failed\"\n\n self.logger.info(\"Sending component list:%s completed ip:%s port:%s\"\n %(comp_list, target_service_obj.get_ip(), \\\n target_service_obj.get_port()))\n\n def get_conn_response(conn, comp_list, node_timeout):\n \"\"\"\n Getting connection response\n \"\"\"\n try:\n with Timeout(node_timeout):\n if conn.resp:\n self.logger.debug(\"conn.resp returned\")\n return conn.resp\n else:\n self.logger.debug(\"conn.getexpect()\")\n return conn.getexpect()\n except (Exception, Timeout):\n self.check_transfer_component_map[target_service_obj] = \\\n \"Failed\"\n self.logger.exception('get_conn_response: Trying to get \\\n final status for id:%s' %(target_service_obj.get_id()))\n\n retry_count = 3\n counter = 0\n if conn.send_data:\n response = get_conn_response(conn, comp_list, node_timeout)\n if response and is_success(response.status):\n # send intermediate status to GL\n self.logger.info(\"Sending intermediate state to GL for \\\n target service : %s\" % target_service_obj.get_id())\n transferred_comp_list = []\n for comp in comp_list:\n transferred_comp_list.append((comp, target_service_obj))\n while counter < retry_count:\n counter += 1\n gl_info_obj = self._request_handler.get_gl_info()\n conn_obj = self._request_handler.connector(\\\n IoType.EVENTIO, gl_info_obj)\n if conn_obj != None:\n ret = self._request_handler.comp_transfer_info(\\\n self.service_id, transferred_comp_list, \\\n conn_obj)\n if ret.status.get_status_code() == Resp.SUCCESS: \n self.logger.info(\"Sent intermediate response \" \\\n \":%s to GL\" %transferred_comp_list)\n self.update_final_transfer_status(comp_list)\n self.check_transfer_component_map[\\\n target_service_obj] = True \n break\n else:\n self.logger.warning(\"Sending intermediate\" \\\n \"response to GL failed, Retrying\")\n conn_obj.close()\n if ret.status.get_status_code() != Resp.SUCCESS:\n self.check_transfer_component_map[target_service_obj] =\\\n \"Failed\"\n else:\n self.logger.error('get_response failed id:%s' \\\n %(target_service_obj.get_id()))\n self.check_transfer_component_map[target_service_obj] = \\\n \"Failed\"", "def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(4)\n\n self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))", "def _addMethod(self, effect, verb, resource, conditions):\n if verb != \"*\" and not hasattr(HttpVerb, verb):\n raise NameError(\"Invalid HTTP verb \" + verb +\n \". Allowed verbs in HttpVerb class\")\n resourcePattern = re.compile(self.pathRegex)\n if not resourcePattern.match(resource):\n raise NameError(\"Invalid resource path: \" + resource +\n \". Path should match \" + self.pathRegex)\n\n if resource[:1] == \"/\":\n resource = resource[1:]\n\n resourceArn = (\"arn:aws:execute-api:\" +\n self.region + \":\" +\n self.awsAccountId + \":\" +\n self.restApiId + \"/\" +\n self.stage + \"/\" +\n verb + \"/\" +\n resource)\n\n if effect.lower() == \"allow\":\n self.allowMethods.append({\n 'resourceArn': resourceArn,\n 'conditions': conditions\n })\n elif effect.lower() == \"deny\":\n self.denyMethods.append({\n 'resourceArn': resourceArn,\n 'conditions': conditions\n })", "def add_url_rules(self):\n self.app.add_url_rule(\"/\", \"root\",\n lambda: self.file_renderer(\"index.html\"),\n methods=[\"GET\", \"POST\", \"PUT\", \"DELETE\"])\n self.app.add_url_rule(\"/<path:path>\", \"all_files\", lambda path:\n self.file_renderer(path), methods=[\"GET\", \"POST\",\n \"PUT\",\n \"DELETE\"])", "def __call__(self, method, url, *args, **kwargs):\n url = urllib.parse.urljoin('/networks/{}/'.format(self.network_id), url)\n return super(NetworksMixin, self).__call__(method, url, *args, **kwargs)", "def lookup(self, *urls):\n path = 'urlLookup'\n return self._session.post(path, urls)", "def handle(self, *args, **options):\n self.create_indices()\n self.bulk()", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))", "def append(\n self,\n ids: Iterable[str],\n preds: Iterable[str],\n targets: Iterable[str],\n ) -> None:\n self._ids += list(ids)\n self._preds += list(preds)\n self._targets += list(targets)", "def add_new_urls(self, urls):\n if urls is None or len(urls) == 0:\n return \n for url in urls:\n self.add_new_url(url)", "def _route(self, args, request=None):\n return self._openstack_heat_proxy, []", "def autofixTargets(self, local_ctx):\n pass", "def add_req (self, src, dst):\n raise NotImplementedError", "def setup_ovh_ingress_link(release):\n ovh_ingress_path = os.path.join(ABSOLUTE_HERE, 'config', 'ovh', 'ovh_mybinder_org_ingress.yaml')\n stdout = subprocess.check_output([\n 'kubectl',\n 'apply',\n '-f',\n ovh_ingress_path,\n '-n',\n release\n ])\n print(stdout.decode('utf8'))", "def PostCollectors(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def add(self, **kwargs):\n\n return self._doAdd(\n self.getResourceManager().getSdk().hosts,\n **kwargs\n )", "def add_request(self, method, relative_url, headers, body, source_ip,\n server_name=None, version=None, instance_id=None):\n try:\n header_dict = wsgiref.headers.Headers(headers)\n connection_host = header_dict.get('host')\n connection = httplib.HTTPConnection(connection_host)\n\n\n connection.putrequest(\n method, relative_url,\n skip_host='host' in header_dict,\n skip_accept_encoding='accept-encoding' in header_dict)\n\n for header_key, header_value in headers:\n connection.putheader(header_key, header_value)\n connection.endheaders()\n connection.send(body)\n\n response = connection.getresponse()\n response.read()\n response.close()\n\n return request_info.ResponseTuple(\n '%d %s' % (response.status, response.reason), [], '')\n except (httplib.HTTPException, socket.error):\n logging.exception(\n 'An error occured while sending a %s request to \"%s%s\"',\n method, connection_host, relative_url)\n return request_info.ResponseTuple('0', [], '')", "def seed(context, data):\n for key in (\"url\", \"urls\"):\n for url in ensure_list(context.params.get(key)):\n url = url % data\n context.emit(data={\"url\": url})", "def run_server_by_urls(getter_url, handler_url):\n get_method = find_getter(getter_url)\n logger.debug('start serving')\n get_method(getter_url, handler_url)", "def get_urls(self, **kwargs):\n pass # pragma: no cover", "def urlfor( request, *args, **kwargs ):", "def index():\n return make_json_response(ENDPOINT_LIST)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_patch_action(self):\n pass", "def add_view(config):\n config.add_route('ogcproxy', '/ogcproxy')\n config.add_view('papyrus_ogcproxy.views:ogcproxy', route_name='ogcproxy')", "def get_hosts(self, target, listener_type):", "def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_create_action(self):\n pass", "def CreateRequests(self, args):\n\n health_check_ref = self.HTTP_HEALTH_CHECKS_ARG.ResolveAsResource(\n args, self.resources)\n\n request = self.messages.ComputeHttpHealthChecksInsertRequest(\n httpHealthCheck=self.messages.HttpHealthCheck(\n name=health_check_ref.Name(),\n host=args.host,\n port=args.port,\n description=args.description,\n requestPath=args.request_path,\n checkIntervalSec=args.check_interval,\n timeoutSec=args.timeout,\n healthyThreshold=args.healthy_threshold,\n unhealthyThreshold=args.unhealthy_threshold,\n ),\n project=self.project)\n\n return [request]", "def get_api_urls(user, repo, endpoint, start, limit=50):\n req_urls = []\n queries = {}\n queries['start'] = start\n count = 0\n stop = start/limit\n while count <= stop:\n new_url = make_req_url(user, repo, endpoint, limit, queries)\n req_urls.append(new_url)\n queries['start'] -= limit\n count += 1\n return req_urls", "def __call__(self, environ, start_response):\r\n\r\n rpc_method = self._get_rpc_handler(environ)\r\n request_method = environ[\"REQUEST_METHOD\"]\r\n\r\n if rpc_method:\r\n if request_method == \"POST\":\r\n return self._do_grpc_request(rpc_method, environ, start_response)\r\n elif request_method == \"OPTIONS\":\r\n return self._do_cors_preflight(environ, start_response)\r\n else:\r\n start_response(\"400 Bad Request\", [])\r\n return []\r\n\r\n if self._application:\r\n return self._application(environ, start_response)\r\n else:\r\n start_response(\"404 Not Found\", [])\r\n return []", "def add_func(self, transmute_func, transmute_context):\n swagger_path = transmute_func.get_swagger_path(transmute_context)\n for p in transmute_func.paths:\n self.add_path(p, swagger_path)", "def proc_inc_coms():\n content = request.json \n host_str = content['hosts']\n coms = content['commands']\n hosts = host_str.split('|')\n for h in hosts:\n com_file = \"/tmp/cc/hosts/\" + h # write the commands the file named <ip>\n if os.path.isfile(com_file):\n with open(com_file, 'a') as f:\n f.write(coms)\n else:\n with open(com_file, 'w') as f:\n f.write(coms)\n return \"\"", "def _add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options):\n if callable(rule):\n rule = rule(blueprint_setup.url_prefix)\n elif blueprint_setup.url_prefix:\n rule = blueprint_setup.url_prefix + rule\n options.setdefault('subdomain', blueprint_setup.subdomain)\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func)\n defaults = blueprint_setup.url_defaults\n if 'defaults' in options:\n defaults = dict(defaults, **options.pop('defaults'))\n blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint),\n view_func, defaults=defaults, **options)", "def main():\n app.run(host='127.0.0.1', port=443, debug=True)\n CORS(app)", "def _add(self, arn, targets):\n\n # TODO: In the future, add support for the optional Port and\n # AvailabilityZone parameters. For now, keeping this dead simple.\n targets = [{'Id': t} for t in targets]\n\n try:\n yield self.api_call(\n self.elbv2_conn.register_targets,\n TargetGroupArn=arn,\n Targets=targets)\n except botocore.exceptions.ClientError as e:\n raise exceptions.UnrecoverableActorFailure(str(e))", "def send_actions(self, actions):\n pass", "def webAdd( self, web ):\n web.addOutput( self )", "def _addMethod(self, effect, verb, resource, conditions):\n if verb != '*' and not hasattr(HttpVerb, verb):\n raise NameError('Invalid HTTP verb ' + verb + '. Allowed verbs in HttpVerb class')\n resourcePattern = re.compile(self.pathRegex)\n if not resourcePattern.match(resource):\n raise NameError('Invalid resource path: ' + resource + '. Path should match ' + self.pathRegex)\n\n if resource[:1] == '/':\n resource = resource[1:]\n\n resourceArn = 'arn:aws:execute-api:{}:{}:{}/{}/{}/{}'.format(self.region, self.awsAccountId, self.restApiId, self.stage, verb, resource)\n\n if effect.lower() == 'allow':\n self.allowMethods.append({\n 'resourceArn': resourceArn,\n 'conditions': conditions\n })\n elif effect.lower() == 'deny':\n self.denyMethods.append({\n 'resourceArn': resourceArn,\n 'conditions': conditions\n })", "def collectTargets(self, output):\n pass", "def add_routes(self):\n pass" ]
[ "0.49094117", "0.48981446", "0.4891663", "0.4882995", "0.48656306", "0.48633826", "0.4838656", "0.48215118", "0.48095515", "0.4799887", "0.47985315", "0.47854328", "0.47777006", "0.47676295", "0.47660863", "0.47645792", "0.47565734", "0.47402588", "0.4731982", "0.47042406", "0.46917313", "0.4677963", "0.46611992", "0.4660296", "0.46583098", "0.4655292", "0.46538755", "0.4645811", "0.4635391", "0.4624777", "0.46246454", "0.46234906", "0.4622868", "0.46213168", "0.46153843", "0.45966357", "0.45935422", "0.45921826", "0.4581423", "0.45661163", "0.45609432", "0.45606628", "0.45565405", "0.45561412", "0.455346", "0.4545531", "0.45302036", "0.452844", "0.45217115", "0.45211202", "0.45134947", "0.4504693", "0.44954753", "0.44918156", "0.44827804", "0.44826072", "0.4482449", "0.44814745", "0.44744757", "0.4474242", "0.44719636", "0.44497618", "0.44479278", "0.44407913", "0.44357705", "0.44343787", "0.44300088", "0.4429333", "0.44290417", "0.4428678", "0.44282013", "0.44271725", "0.44223347", "0.44191334", "0.44191244", "0.44157916", "0.4413873", "0.44091374", "0.4406827", "0.4401162", "0.44004694", "0.44004616", "0.439769", "0.43974036", "0.43958732", "0.4393578", "0.43934232", "0.43903062", "0.43845206", "0.43814522", "0.43760535", "0.43722188", "0.43705976", "0.43696454", "0.43689534", "0.43676597", "0.43670717", "0.4365668", "0.43625537", "0.435592", "0.43536326" ]
0.0
-1
Make RPC call to 'Envoy Controller' to remove the proxy rule for the UE.
def deactivate_he_urls_for_ue(ip: IPAddress, rule_id: str) -> bool: try: chan = ServiceRegistry.get_rpc_channel( SERVICE_NAME, ServiceRegistry.LOCAL, ) except grpc.RpcError: logging.error('Cant get RPC channel to %s', SERVICE_NAME) return False client = EnvoyControllerStub(chan) try: he_info = DeactivateUEHeaderEnrichmentRequest(ue_ip=ip, rule_id=rule_id) ret = client.DeactivateUEHeaderEnrichment(he_info, timeout=TIMEOUT_SEC) return ret.result == DeactivateUEHeaderEnrichmentResult.SUCCESS except grpc.RpcError as err: logging.error( "Deactivate HE proxy error[%s] %s", err.code(), err.details(), ) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_proxy(proxy):\n r = requests.get(\"http://127.0.0.1:5010/delete/?proxy={}\".format(proxy))", "def client(api_client, proxy):\n\n proxy.deploy()\n\n assert api_client().get(\"/echo/anything\").status_code == 200\n\n proxy.mapping_rules.delete(proxy.mapping_rules.list()[0][\"id\"])\n proxy.deploy()\n\n return api_client(disable_retry_status_list={404})", "def delete_nat_rule(**kwargs):\n proxy = kwargs['proxy']\n sessiontoken = kwargs['sessiontoken']\n nat_id = kwargs['objectname']\n tier1_id = kwargs['tier1_id']\n\n result = remove_sddc_nat_json(proxy, sessiontoken, nat_id, tier1_id)\n if result is not None:\n print(\"\\n\")\n params = {'proxy':proxy, 'sessiontoken':sessiontoken, 'objectname':nat_id, 'tier1_id':tier1_id}\n get_nat_rules(**params)\n else:\n print('Something went wrong. Please check your syntax and try again.')\n sys.exit(1)", "def fusion_api_delete_proxy_server(self, api=None, headers=None):\n return self.proxyserver.delete(api, headers)", "def test_remove_proxy(matrix):\n matrix.remove_proxy_config()\n assert matrix.external_port == 8008", "def remove_redirect(self, expr, node_host, node_port, openflow_host, openflow_port):\n pusher = self.StaticFlowEntryPusher(openflow_host, openflow_port)\n pusher.remove({\"name\":\"request_hands_off-\" + node_host + \"-\" + node_port + \"-\" + expr})\n pusher.remove({\"name\":\"request_out-\" + node_host + \"-\" + node_port + \"-\" + expr})\n pusher.remove({\"name\":\"request_in-\" + node_host + \"-\" + node_port + \"-\" + expr})", "def remove_robots(): #py:remove_robots\n RUR._remove_robots_()", "def fusion_api_remove_san_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dm.delete(name, uri, api, headers)", "def removeResource(self, rouri, resuri):\n # Find proxy for resource\n (proxyuri, manifest) = self.getROResourceProxy(resuri, rouri)\n if proxyuri == None:\n return (404, \"Resource proxy not found in manifest\")\n assert isinstance(proxyuri, rdflib.URIRef)\n # Delete proxy\n (status, reason, headers, uri, data) = self.doRequestFollowRedirect(proxyuri,\n method=\"DELETE\")\n return (status, reason)", "def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")", "def delete_match_api_rule(self, handle_id, table_id):\n pass", "def stop_proxy(args, address):\n logging.info(\"stopping transparent proxy for {0}\".format(address))\n\n # Delete DNS route\n rule = [\"PREROUTING\", \"-s\", address, \"-p\", \"udp\", \"--dport\", \"53\",\n \"-j\", \"REDIRECT\", \"--to-ports\", str(args.dnsport)]\n\n if args.interface:\n rule += [\"-i\", args.interface]\n\n delete_rule(rule, table=\"nat\")\n\n # Delete TCP route\n rule = [\"PREROUTING\", \"-s\", address, \"-p\", \"tcp\", \"--syn\", \"!\", \"--dport\", str(args.resultport),\n \"-j\", \"REDIRECT\", \"--to-ports\", str(args.proxyport)]\n\n if args.interface:\n rule += [\"-i\", args.interface]\n\n delete_rule(rule, table=\"nat\")", "def test_esg_firewall_rule_uninstall(self):\n self._common_uninstall_delete(\n 'esg_id|id', esg_firewall.delete,\n {'rule': {\n 'esg_id': 'esg_id'\n }},\n ['firewallRule'], {\n 'uri_parameters': {'edgeId': 'esg_id', 'ruleId': 'id'}\n },\n additional_params=['rule_id']\n )", "def DestroyProxies(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DestroyProxies\", params, headers=headers)\n response = json.loads(body)\n model = models.DestroyProxiesResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def test_delete_proxy():\n result = False\n\n proxy = {\n 'name': 'proxy',\n 'address': 'proxy2.ntnxlab.local',\n 'port': '8080',\n 'http': True,\n 'https': True,\n 'socks': False,\n 'username': '',\n 'password': '',\n }\n\n cluster_obj = prism.Cluster(api_client=_api())\n config_obj = prism.Config(api_client=_api())\n clusters = cluster_obj.get_all_uuids()\n for each_uuid in clusters:\n config_obj.remove_proxy(name=proxy['name'], clusteruuid=each_uuid)\n cluster_proxy = config_obj.get_proxy(clusteruuid=each_uuid)\n\n if not cluster_proxy:\n result = True\n\n assert result", "def test_delete_hyperflex_proxy_setting_policy(self):\n pass", "def _delete_router(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def DELETE(self, req):\r\n req.headers['X-Remove-Container-Meta-Access-Control-Allow-Origin'] = 'x'\r\n req.headers['X-Remove-Container-Meta-Access-Control-Allow-Methods'] = 'x'\r\n req.headers['X-Remove-Container-Meta-Access-Control-Allow-Headers'] = 'x'\r\n req.headers['X-Remove-Container-Meta-Access-Control-Expose-Headers'] = 'x'\r\n req.headers['X-Remove-Container-Meta-Access-Control-Max-Age'] = 'x'\r\n\r\n resp = req.get_response(self.app, method='POST', headers=req.headers)\r\n\r\n return resp", "def _stop(self):\r\n self._client.prerouting.delete_rule(self._remoteRule)\r\n self._client.output.delete_rule(self._localRule)\r\n self._client.prerouting.delete_rule(self._rosremoteRule)\r\n self._client.output.delete_rule(self._roslocalRule)\r\n\r\n return self._container.stop(self._name)", "def unset(cls, client, resource, args) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tunsetresource = rewriteaction()\n\t\t\t\tif type(resource) != type(unsetresource):\n\t\t\t\t\tunsetresource.name = resource\n\t\t\t\telse :\n\t\t\t\t\tunsetresource.name = resource.name\n\t\t\t\treturn unsetresource.unset_resource(client, args)\n\t\t\telse :\n\t\t\t\tif type(resource[0]) != cls :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tunsetresources = [ rewriteaction() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tunsetresources[i].name = resource[i]\n\t\t\t\telse :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tunsetresources = [ rewriteaction() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tunsetresources[i].name = resource[i].name\n\t\t\t\tresult = cls.unset_bulk_request(client, unsetresources, args)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "def fusion_api_delete_rack_manager(self, uri, name=None, param='', api=None, headers=None):\n return self.rackmanager.delete(uri=uri, name=name, param=param, api=api, headers=headers)", "def revoke(self):\r\n return http.Request('DELETE', self.get_url()), parsers.parse_empty", "def remove_subscriber_he_flows(\n self, ue_addr: IPAddress, rule_id: str = \"\",\n rule_num: int = -1,\n ):\n ue_ip_str = ipv4_address_to_str(ue_addr)\n\n if self._ue_rule_counter.get(ue_ip_str) == 0:\n return\n logging.info(\n \"Del HE rule: ue-ip: %s rule_id: %s rule %d\",\n ue_addr, rule_id, rule_num,\n )\n\n if rule_num == -1:\n ip_match_in = get_ue_ip_match_args(ue_addr, Direction.IN)\n match_in = MagmaMatch(\n eth_type=get_eth_type(ue_addr),\n **ip_match_in,\n )\n flows.delete_flow(self._datapath, self.tbl_num, match_in)\n\n ip_match_out = get_ue_ip_match_args(ue_addr, Direction.OUT)\n match_out = MagmaMatch(\n eth_type=get_eth_type(ue_addr),\n **ip_match_out,\n )\n flows.delete_flow(self._datapath, self.tbl_num, match_out)\n else:\n match = MagmaMatch()\n flows.delete_flow(\n self._datapath, self.tbl_num, match,\n cookie=rule_num, cookie_mask=flows.OVS_COOKIE_MATCH_ALL,\n )\n\n success = deactivate_he_urls_for_ue(ue_addr, rule_id)\n logging.debug(\"Del HE proxy: %s\", success)\n if success:\n if rule_num == -1:\n self._ue_rule_counter.delete(ue_ip_str)\n else:\n self._ue_rule_counter.dec(ue_ip_str)", "def Run(self, args):\n identifiers = args.CONCEPTS.revision.Parse().AsDict()\n if identifiers[\"revisionsId\"] == \"auto\":\n del identifiers[\"revisionsId\"]\n defaults.FallBackToDeployedProxyRevision(identifiers)\n\n return apigee.APIsClient.Undeploy(identifiers)", "def removeProxyManagerConnection(address=None):\n global __mgr_cache__\n #: :type: ProxyManager\n if hasattr(__mgr_cache__[address], 'shutdown'):\n __mgr_cache__[address].shutdown()\n del __mgr_cache__[address]", "def remove_proxy_config(self):\n self.external_port = 8008", "def remove_ongoing_rule():\n rule_id = request.args.get('id')\n database.update(database.QUERY[mn() + '_remove_from_life'].format(rule_id))\n database.update(database.QUERY[mn() + '_delete_ongoing_rule'].format(rule_id))\n update_all_rules()\n\n send_ongoing_rule_message('remove_ongoing_rule', {'rule_id': rule_id})\n\n return json.dumps({'status': 'OK'})", "def remove_service(self, zeroconf, service_type, name):", "def removeRule(self, *args):\n return _libsbml.Model_removeRule(self, *args)", "def handle_delete(self, request, user, *args, **kwargs):\n try:\n\n self.log.info('Delete rule from an environment')\n\n # User permission\n if not has_perm(user, AdminPermission.VIP_VALIDATION, AdminPermission.WRITE_OPERATION):\n self.log.error(\n u'User does not have permission to perform the operation.')\n raise UserNotAuthorizedError(None)\n\n id_rule = kwargs.get('id_rule')\n\n if not is_valid_int_greater_zero_param(id_rule):\n self.log.error(\n u'The id_rule parameter is not a valid value: %s.', id_rule)\n raise InvalidValueError(None, 'id_rule', id_rule)\n\n rule = Rule.objects.get(pk=id_rule)\n rule.delete()\n\n return self.response(dumps_networkapi({}))\n\n except InvalidValueError, e:\n return self.response_error(269, e.param, e.value)\n except Rule.DoesNotExist:\n return self.response_error(358)\n except UserNotAuthorizedError:\n return self.not_authorized()\n except Exception, e:\n return self.response_error(1)", "def removeControl(*args):", "def removeControl(*args):", "def removeControl(*args):", "def removeControl(*args):", "def test_remove_virt_realm(self):\n pass", "def _delete_bridge(self, method, api, header, data):\n self._execute_api(method, api, header)", "def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def remove_model(model):\n rospy.wait_for_service('/gazebo/delete_model')\n try:\n rospy.logwarn(\"Call the method for removing the model: \" + model)\n remove_model_proxy = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)\n remove_model_proxy(model)\n except rospy.ServiceException, ex:\n print \"Service call delete_model failed: %e\" % ex", "def clear_control_lines() -> Response:\n try:\n uuid = str(request.args['uuid'])\n\n if uuid in control_lines:\n control_lines[uuid].clear()\n\n return Response(status=200)\n\n except:\n return Response(status=500)", "def remove_proxy(self, proxied):\n\n val = self.proxy_map[proxied]\n\n try:\n del self.proxy_map[proxied]\n except KeyError:\n pass\n\n try:\n del self.proxy_map[val]\n del self.capability_map[val]\n except KeyError:\n pass", "def remove_endpoint_from_sipserver(self, endpoint: str) -> None:", "def fusion_api_remove_switch(self, name=None, uri=None, api=None, headers=None):\n return self.switch.delete(name, uri, api, headers)", "def test_remove():\n client = TestClient()\n client.run('config set proxies.https=myurl')\n client.run('config rm proxies.https')\n conf_file = load(client.cache.conan_conf_path)\n assert 'myurl' not in conf_file", "def rpc_remove_connection(client, source, dest,\n rpc_user=BTC_RPC_USER, rpc_password=BTC_RPC_PASSWD, rpc_port=BTC_RPC_PORT):\n try:\n rpc_server = get_ip_by_unknown(client, source)\n dest = get_ip_by_unknown(client, dest)\n rpc_connection = AuthServiceProxy(\"http://%s:%s@%s:%s\" % (rpc_user, rpc_password, rpc_server, rpc_port))\n rpc_connection.addnode(dest, \"remove\")\n return True\n except JSONRPCException as err:\n print(err)\n return False", "def undeploy_advance_op_handler(self):\n self.__undeploy_function(\"advance_op\")", "def delete(self, package=\"\", uid=\"\", params={}):\n return self.__post('delete-nat-rule', package, uid, params)", "def unregister(self, target, hostname, listener_type):", "def _unregister_from_server(self):\n self.remote_controller.unregister()", "def iptables_delete(nid, rid):\n if nid != -1 and (hl.getNode(\"ID\",nid) and hl.getNode(\"ID\",nid)[\"Address\"] != \"self\"):\n url = hl.getNode(\"ID\", nid)[\"Address\"] \n hl.nodePost(url+\"/deleterule/\",{\"ID\" : rid}) \n else:\n hl.removeIPRule(rid)\n \n return redirect(url_for('confirm', confirmed = \"IP Table Rule Deleted!\"))", "def route_removed(self, prefix, next_hop, as_path):", "def unset():\n rino.remote.unset()", "def cloudflare_waf_firewall_rule_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n rule_id = args['id']\n zone_id = args.get('zone_id', client.zone_id)\n\n response = client.cloudflare_waf_firewall_rule_delete_request(rule_id, zone_id)\n\n return CommandResults(\n readable_output=f'Firewall rule {rule_id} was successfully deleted.',\n raw_response=response\n )", "def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))", "def unregister_router(self, hostname):", "def clean_proxy_nodes(rig_top_node):\n proxy_nodes = cmds.ls('proxy_*', type='network')\n\n if proxy_nodes:\n cmds.delete(proxy_nodes)\n LOG.debug(\"Deleted proxy nodes: {nodes}\".format(nodes=''.join(proxy_nodes)))\n\n if cmds.objExists('proxy_nodes'):\n print 'Deleting set'\n cmds.delete('proxy_nodes')", "def unset(cls, client, resource, args) :\n try :\n if type(resource) is not list :\n unsetresource = nshttpprofile()\n if type(resource) != type(unsetresource):\n unsetresource.name = resource\n else :\n unsetresource.name = resource.name\n return unsetresource.unset_resource(client, args)\n else :\n if type(resource[0]) != cls :\n if (resource and len(resource) > 0) :\n unsetresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n unsetresources[i].name = resource[i]\n else :\n if (resource and len(resource) > 0) :\n unsetresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n unsetresources[i].name = resource[i].name\n result = cls.unset_bulk_request(client, unsetresources, args)\n return result\n except Exception as e :\n raise e", "def DeleteRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteRule\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)", "def __del__(self):\n _cantera.reactornet_del(self.__reactornet_id)", "def fusion_api_delete_hypervisor_manager(self, name=None, uri=None, api=None, headers=None):\n return self.hypervisor_mgr.delete(name=name, uri=uri, api=api, headers=headers)", "def fusion_api_delete_server_hardware(self, name=None, uri=None, api=None, headers=None):\n return self.sh.delete(name, uri, api, headers)", "def do(self, argin):\n device=self.target\n\n device._remove_receptors_helper(argin)\n message = \"CBFSubarray RemoveReceptors command completed OK\"\n self.logger.info(message)\n return (ResultCode.OK, message)", "def delete_rule(rule, table=None):\n cmdline = [IPTABLES_PATH]\n if table:\n cmdline += [\"-t\", table]\n cmdline += [\"-D\"] + rule\n return call(cmdline)", "def RemoveReceptors(self, argin):\n command = self.get_command_object(\"RemoveReceptors\")\n (return_code, message) = command(argin)\n return [[return_code], [message]]", "def _remove_receptors_helper(self, argin):\n receptor_to_vcc = dict([*map(int, pair.split(\":\"))] for pair in\n self._proxy_cbf_controller.receptorToVcc)\n for receptorID in argin:\n if receptorID in self._receptors:\n vccID = receptor_to_vcc[receptorID]\n vccProxy = self._proxies_vcc[vccID - 1]\n\n # unsubscribe from events\n vccProxy.unsubscribe_event(self._events_state_change_vcc[vccID][0]) # state\n vccProxy.unsubscribe_event(self._events_state_change_vcc[vccID][1]) # healthState\n del self._events_state_change_vcc[vccID]\n del self._vcc_state[self._fqdn_vcc[vccID - 1]]\n del self._vcc_health_state[self._fqdn_vcc[vccID - 1]]\n\n # reset receptorID and subarrayMembership Vcc attribute:\n vccProxy.receptorID = 0\n vccProxy.subarrayMembership = 0\n\n self._receptors.remove(receptorID)\n self._proxies_assigned_vcc.remove(vccProxy)\n self._group_vcc.remove(self._fqdn_vcc[vccID - 1])\n else:\n log_msg = \"Receptor {} not assigned to subarray. Skipping.\".format(str(receptorID))\n self.logger.warn(log_msg)\n\n # transitions to EMPTY if not assigned any receptors\n if not self._receptors:\n self._update_obs_state(ObsState.EMPTY)", "def deleteAgent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def disable(self):\n self.registrar.unregister_service(\"map\", namespace=__name__)\n self.registrar.unregister_service(\"directions\", namespace=__name__)", "def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )", "def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )", "def unregister(self, parent):\n parent.unregisterCommand('delete')\n parent.unregisterCommand('meshcreated')", "def Perform(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveInternalWires_Perform(self, *args)", "def delete(self, request, checksum=None):\n if not checksum:\n raise core_exceptions.InvalidRequestException()\n\n try:\n HaProxyConfigModel.objects.filter(checksum=checksum).delete()\n except HaProxyConfigModel.DoesNotExist:\n raise core_exceptions.DoesNotExistException()\n\n return Response({'deleted': True})", "def test_dlr_interface_uninstall(self):\n self._common_uninstall_delete(\n 'id|dlr_id',\n dlr_interface.delete,\n {},\n delete_args=['interfaces'],\n delete_kwargs={\n 'uri_parameters': {'edgeId': 'dlr_id'},\n 'query_parameters_dict': {'index': 'id'}\n }\n )", "def do_DELETE(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n if self.server.db.remove_agent(agent_id):\n #send response\n common.echo_json_response(self, 200, \"Success\")\n return\n else:\n #send response\n common.echo_json_response(self, 404)\n return\n else:\n common.echo_json_response(self, 404)\n return", "def revoke(config, hostname, username):\n\n response = make_api_request('DELETE', config, '/machines/' + hostname +\n '/users/' + username)\n print 'Permission revoked successfully.'", "def remote_destroy(self):\r\n if self._receivers:\r\n for interface in reduce(set.union, self._receivers.itervalues()):\r\n interface.unregisterProtocol(self)\r\n\r\n self._receivers = None\r\n\r\n if self._endpoint:\r\n self._endpoint.unregisterProtocol(self)\r\n self._endpoint = None", "def __del__(self):\n self.DcMotor.run(Adafruit_MotorHAT.RELEASE) # changed rightMotor to DcMotor , RFMH_2019_02_28\n del self.motorhat", "def Destroy(sliver_name):\n rec = sliver_name\n if rec['instantiation'] == 'delegated':\n account.get(rec['name']).ensure_destroyed()\n logger.log(\"api_calls: Destroy %s\"%rec['name'])\n else:\n raise Exception, \"Only PLC can destroy non delegated slivers.\"", "def remove_owner(urn: str, owner_urn: str) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.remove_owner(owner=_get_owner_urn(owner_urn))\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"remove owners\")\n for mcp in dataproduct_patcher.build():\n print(json.dumps(mcp.to_obj()))\n graph.emit(mcp)", "def remove(self, middleware):\n pass # pragma: no cover", "def delete(self, urns, client_cert, credentials, best_effort): ### FIX the response\n result = []\n slice_urn = urns[0]\n # try:\n for urn in urns:\n if self._verify_users:\n logger.debug(\"delete: authenticate the user for %s\" % (urn))\n client_urn, client_uuid, client_email =\\\n self.auth(client_cert, credentials, urn, (\"deletesliver\",))\n logger.info(\"Client urn=%s, uuid=%s, email=%s\" % (\n client_urn, client_uuid, client_email,))\n\n try:\n links_db, nodes, links = self.SESlices.get_link_db(urn)\n except Exception as e:\n raise geni_ex.GENIv3GeneralError(\"Slice does not exist.\")\n\n reservation_ports = self.SESlices._allocate_ports_in_slice(nodes)[\"ports\"]\n\n portsVlansPairs = getPortsVlansPairs(links_db)\n\n try:\n for portVlanItem in portsVlansPairs:\n (in_port, out_port, in_vlan, out_vlan) = portVlanItem\n se_provision.deleteSwitchingRule(in_port, out_port, in_vlan, out_vlan)\n logger.debug(\"unprovision SE-Slice-Urn=%s, in_port=%s , out_port=%s, in_vlan=%s, out_port=%s\" % (urn,in_port, out_port, in_vlan, out_vlan))\n except:\n logger.warning(\"Problem in communication with SE\")\n\n # expires_date = datetime.strptime(links_db['geni_expires'], RFC3339_FORMAT_STRING)\n expires_date = links_db['geni_expires']\n\n\n for sliver in links_db[\"geni_sliver_urn\"]:\n result.append( \n { \n \"geni_sliver_urn\": sliver,\n \"geni_expires\": expires_date,\n \"geni_allocation_status\": \"geni_unallocated\",\n \"geni_operational_status\" : \"geni_notready\"\n }\n )\n\n # Mark resources as free\n self.SEResources.free_resource_reservation(reservation_ports)\n\n # Remove reservation\n self.SESlices.remove_link_db(urn)\n \n logger.info(\"delete successfully completed: %s\", slice_urn)\n \n return result\n\n # except:\n\n # raise geni_ex.GENIv3GeneralError(\"Delete Failed. Requested resources are not available.\")", "def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def test_routing_redistribution_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_redistribution.delete,\n {'rule': {}}\n )", "def fusion_api_clear_interconnect_ports(self, body, uri, api=None, param='', headers=None):\n param = '/statistics/reset%s' % param\n return self.ic.put(body=body, uri=uri, api=api, headers=headers, param=param)", "async def unfollow(\n self,\n *,\n index: str,\n error_trace: t.Optional[bool] = None,\n filter_path: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n human: t.Optional[bool] = None,\n pretty: t.Optional[bool] = None,\n ) -> ObjectApiResponse[t.Any]:\n if index in SKIP_IN_PATH:\n raise ValueError(\"Empty value passed for parameter 'index'\")\n __path = f\"/{_quote(index)}/_ccr/unfollow\"\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query[\"error_trace\"] = error_trace\n if filter_path is not None:\n __query[\"filter_path\"] = filter_path\n if human is not None:\n __query[\"human\"] = human\n if pretty is not None:\n __query[\"pretty\"] = pretty\n __headers = {\"accept\": \"application/json\"}\n return await self.perform_request( # type: ignore[return-value]\n \"POST\", __path, params=__query, headers=__headers\n )", "def test_remove_section():\n client = TestClient()\n client.run('config rm proxies')\n conf_file = load(client.cache.conan_conf_path)\n assert '[proxies]' not in conf_file", "def RoutingInterfaceNotificationUnregister(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def RoutingInterfaceNotificationUnregister(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def remove(ip):\n return __apf_cmd(\"-u {}\".format(ip))", "def firewallOff():\n pass", "def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action(self):\n pass", "def remove_rule(self, ip_protocol, from_port, to_port,\r\n src_group_name, src_group_owner_id, cidr_ip):\r\n target_rule = None\r\n for rule in self.rules:\r\n if rule.ip_protocol == ip_protocol:\r\n if rule.from_port == from_port:\r\n if rule.to_port == to_port:\r\n target_rule = rule\r\n target_grant = None\r\n for grant in rule.grants:\r\n if grant.name == src_group_name:\r\n if grant.owner_id == src_group_owner_id:\r\n if grant.cidr_ip == cidr_ip:\r\n target_grant = grant\r\n if target_grant:\r\n rule.grants.remove(target_grant)\r\n if len(rule.grants) == 0:\r\n self.rules.remove(target_rule)", "def unregister(self, worker):\n\n iface = self.workers[worker]\n rules = [rule for rule in self._list() if rule[-1] == iface]\n for rule in rules:\n self.delete(rule[-2], iface, rule[1])\n del self.workers[worker]", "def unproxy_service(self, *service_ids) -> None:\n\n for service_id in service_ids:\n router_key = self._router_key(self._router_id(service_id))\n middleware_key = self._middleware_key(self._middleware_id(service_id))\n tservice_key = self._tservice_key(self._tservice_id(service_id))\n\n self._zk.delete(router_key, recursive=True)\n self._zk.delete(middleware_key, recursive=True)\n self._zk.delete(tservice_key, recursive=True)\n\n # prevents \"KV connection error: middlewares cannot be a standalone element\"\n middlewares_key = f\"/{self._prefix}/http/middlewares\"\n if not self._zk.get_children(middlewares_key):\n self._zk.delete(middlewares_key)\n\n self._trigger_configuration_update()", "def fusion_api_remove_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def remote_destroyTunnel(self, name, targetIP):\r\n if name not in self._bridges:\r\n raise InternalError('Bridge does not exist.')\r\n\r\n key = (name, targetIP)\r\n\r\n if key not in self._uid:\r\n raise InternalError('Tunnel deos not exist.')\r\n\r\n return execute(('/usr/bin/ovs-vsctl', 'del-port',\r\n 'gre-{0}'.format(self._uid.pop(key))),\r\n reactor=self._reactor)", "def DeleteOIDCClient(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def map_clear(self, src_port, dst_port, command_logger=None):\r\n\r\n #Isolate source and destination port numbers from list provided by Cloudshell\r\n source = src_port[2]\r\n dest = dst_port[2]\r\n #Define URI to delete rules via REST\r\n uri = 'http://' + self.address + '/rest/rules?'\r\n #Create the parameters for the rules to be deleted from the Packetmaster\r\n paramsA = {'priority': 32768,\r\n 'match[in_port]': source}\r\n paramsB = {'priority': 32768,\r\n 'match[in_port]': dest}\r\n #Make REST delete requests for the rules to be deleted\r\n try:\r\n responseA = requests.delete(uri, params=paramsA, auth=(self.username, self.password))\r\n responseB = requests.delete(uri, params=paramsB, auth=(self.username, self.password))\r\n except ConnectionError as e:\r\n raise e", "def do(self):\n\n self.logger.debug(\"Entering RemoveAllReceptors()\")\n\n device=self.target\n\n # For LMC0.6.0: use a helper instead of a command so that it doesn't care about the obsState\n device._remove_receptors_helper(device._receptors[:])\n\n message = \"CBFSubarray RemoveAllReceptors command completed OK\"\n self.logger.info(message)\n return (ResultCode.OK, message)" ]
[ "0.5732244", "0.57202536", "0.5547828", "0.5529435", "0.5497504", "0.54183865", "0.53863347", "0.5307268", "0.5297264", "0.52534926", "0.5240638", "0.5222272", "0.52119", "0.52097315", "0.5205946", "0.5182716", "0.5167885", "0.5167784", "0.5166036", "0.51652503", "0.516177", "0.5155356", "0.51513517", "0.5126588", "0.51022977", "0.50988406", "0.5097772", "0.5095397", "0.5084291", "0.5081276", "0.5079817", "0.5079817", "0.5079817", "0.5079817", "0.5060275", "0.50375736", "0.5032633", "0.5020654", "0.5007269", "0.5002364", "0.49865025", "0.49838567", "0.49806303", "0.49705976", "0.49605474", "0.49519718", "0.49403283", "0.49331772", "0.49329144", "0.4915809", "0.49041358", "0.49025849", "0.49011236", "0.48999017", "0.48995113", "0.48833025", "0.48715168", "0.48664442", "0.4861742", "0.48581403", "0.48521674", "0.484717", "0.48460248", "0.48350534", "0.48308897", "0.4826249", "0.48196167", "0.48183572", "0.48183572", "0.48150274", "0.48079932", "0.4795465", "0.47930014", "0.47925505", "0.4792424", "0.4789141", "0.47828144", "0.47823417", "0.47803086", "0.47778884", "0.47722116", "0.47649172", "0.47642452", "0.47624007", "0.47594714", "0.47487038", "0.4747295", "0.47444054", "0.4743099", "0.4739178", "0.47357002", "0.47318956", "0.47302908", "0.47282085", "0.47266495", "0.4726394", "0.47259334", "0.47247818", "0.47236553", "0.47176522" ]
0.48373026
63
Update the data structures to recognize a new process was created
def alloc_proc(self, process, delta_t): self._process_list.append(process)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def updateProcess(self, machine, process):\n\n stamp = time.time() - self.initTime\n if machine in self.activity.keys():\n if ((\"processes\" in self.activity[machine].keys()) and \n (process in self.activity[machine][\"processes\"].keys())):\n self.activity[machine][\"processes\"][process].append(stamp)\n else:\n self.activity[machine][\"processes\"] = {process : [stamp]}\n else:\n self.activity[machine] = {\"filtered activity\" : [],\n \"raw activity\" : [],\n \"time\" : [],\n \"processes\" : {process : [stamp]}}", "def add_process(self):\n process_id = str(self.processBox.currentText())\n\n arguments = {}\n\n for row in range(0, self.processTableWidget.rowCount()):\n p_id = \"\"\n val = None\n\n if self.processTableWidget.item(row, 0):\n p_id = self.processTableWidget.item(row, 0).text()\n if self.processTableWidget.item(row, 2):\n val = self.processTableWidget.item(row, 2).text()\n if len(val) > 0:\n try:\n val = json.loads(val)\n except json.JSONDecodeError:\n pass\n else:\n val = None\n if p_id != \"\":\n if val:\n arguments[p_id] = val\n\n self.processgraph = self.processgraph.add_process(process_id, arguments)\n # Refresh process graph in GUI\n self.reload_processgraph_view()", "def _updateComponentsInfo(self):\n def removeItems(processDetail, measurements):\n self._components.remove(processDetail)\n self._compMeasurements.remove(measurements)\n\n myName = self.__class__.__name__\n # dictionary[componentName] = componentPID\n componentsInfo = self._getComponentsInfo()\n for processDetail, measurements in zip(self._components, self._compMeasurements):\n try:\n newPID = componentsInfo[processDetail.name]\n if int(newPID) == processDetail.pid:\n # ok, component still runs under the same PID\n # update list of child processes (some may have (dis)appeared)\n logging.debug(\"Component %s runs under the same PID, refreshing\"\n \" list of child processes ...\" % processDetail.getDetails())\n try:\n processDetail.refresh()\n except NoSuchProcess as ex:\n logging.error(\"Could not update list of children processes \"\n \"for %s, reason: %s\" % (processDetail.getDetails(), ex))\n del componentsInfo[processDetail.name]\n else:\n logging.warn(\"Component %s seems to have been restarted \"\n \"(different PID:%s, was:%s).\" % (processDetail.name,\n newPID, processDetail.pid))\n try:\n pd = ProcessDetail(newPID, processDetail.name)\n index = self._components.index(processDetail)\n self._components[index] = pd\n measurements.clear()\n except (NoSuchProcess, AccessDenied) as ex:\n logging.error(\"%s: component %s ignored, reason: %s\" % (myName, processDetail.name, ex))\n removeItems(processDetail, measurements)\n except KeyError:\n m = \"Component %s seems not running anymore, removed from polling.\" % processDetail.name\n logging.warning(m)\n removeItems(processDetail, measurements)\n\n if len(componentsInfo) > 0:\n logging.info(\"Some new components appeared since last check ...\")\n for compName, compPID in componentsInfo.items():\n self._setUpProcessDetailAndMeasurements(compPID, compName)", "def _UpdateProcessingStatus(self, pid, process_status, used_memory):", "def _create_process_instance(self, process_id, name, module, cls, config, proc_attr):\n # SERVICE INSTANCE.\n process_instance = for_name(module, cls)\n if not isinstance(process_instance, BaseService):\n raise ContainerConfigError(\"Instantiated service not a BaseService %r\" % process_instance)\n\n # Prepare service instance\n process_instance.errcause = \"\"\n process_instance.id = process_id\n process_instance.container = self.container\n process_instance.CFG = config\n process_instance._proc_name = name\n process_instance._proc_start_time = time.time()\n for att, att_val in proc_attr.iteritems():\n setattr(process_instance, att, att_val)\n\n #Unless the process has been started as part of another Org, default to the container Org or the ION Org\n if config.has_key('org_governance_name'):\n process_instance.org_governance_name = config['org_governance_name']\n else:\n process_instance.org_governance_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))\n\n\n # Add stateful process operations\n if hasattr(process_instance, \"_flush_state\"):\n def _flush_state():\n with process_instance._state_lock:\n state_obj = process_instance.container.state_repository.put_state(process_instance.id, process_instance._proc_state,\n state_obj=process_instance._proc_state_obj)\n state_obj.state = None # Make sure memory footprint is low for larger states\n process_instance._proc_state_obj = state_obj\n process_instance._proc_state_changed = False\n\n def _load_state():\n if not hasattr(process_instance, \"_proc_state\"):\n process_instance._proc_state = {}\n try:\n with process_instance._state_lock:\n new_state, state_obj = process_instance.container.state_repository.get_state(process_instance.id)\n process_instance._proc_state.clear()\n process_instance._proc_state.update(new_state)\n process_instance._proc_state_obj = state_obj\n process_instance._proc_state_changed = False\n except NotFound as nf:\n log.debug(\"No persisted state available for process %s\", process_instance.id)\n except Exception as ex:\n log.warn(\"Process %s load state failed: %s\", process_instance.id, str(ex))\n process_instance._flush_state = _flush_state\n process_instance._load_state = _load_state\n process_instance._state_lock = RLock()\n process_instance._proc_state = {}\n process_instance._proc_state_obj = None\n process_instance._proc_state_changed = False\n\n # PROCESS RESTART: Need to check whether this process had persisted state.\n # Note: This could happen anytime during a system run, not just on RESTART boot\n log.debug(\"Loading persisted state for process %s\", process_id)\n process_instance._load_state()\n\n # start service dependencies (RPC clients)\n self._start_process_dependencies(process_instance)\n\n return process_instance", "def test_getStateIncludesProcesses(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.__getstate__()['processes'],\r\n {'foo': (['arg1', 'arg2'], 1, 2, {})})", "def _update(self):\n candidates = _find_running_exe(path.join(self.run_dir, \"osiris\"))\n\n try:\n if not candidates: # No process running found\n self.processes = None\n # Try to find a job in queue\n jobs = _get_grid_jobs()\n if not jobs: # Either no qstat or empty list\n self.running_mode = \"\"\n else:\n script_path = path.abspath(path.join(self.run_dir, \"start.sh\"))\n valid_jobs = list(filter(lambda j: j[\"script\"] == script_path, jobs))\n if valid_jobs:\n if len(valid_jobs) > 1:\n logger.warning(\"More than one grid job was found for the run.\")\n self.job = valid_jobs[0]\n self.running_mode = \"grid\"\n else: # No queued job\n self.running_mode = \"\"\n\n else:\n self.processes = list(map(psutil.Process, candidates))\n self.running_mode = \"local\"\n\n except psutil.NoSuchProcess:\n # If the processes have died before processing was completed.\n self.processes = None\n self.running_mode = \"\"", "def update(self):\n if self.process:\n retcode = self.process.poll()\n # Windows exit code\n if retcode is None:\n # logging.debug(\"Update {}, Process: {}, RUNNING\".format(self.hash,self.process))\n self.status = \"Running\"\n else:\n # Add more handling for irregular retcodes\n # See i.e. http://www.symantec.com/connect/articles/windows-system-error-codes-exit-codes-description\n # logging.debug(\"Update {}, Process: {}, DONE\".format(self.hash,self.process))\n self.run_status = \"Finished\"\n self.finish_time = datetime.datetime.now()\n else:\n # This process has not been started]\n raise\n pass", "def addProcessObject(self, process, env, uid=None, gid=None):\n name = process.getName()\n self.processes[name] = (process, env, uid, gid)\n self.delay[name] = self.minRestartDelay\n if self.running:\n self.startProcess(name)", "def watch_process(self):\n psutil.wait_procs([psutil.Process(self._proc.pid)],\n callback=self.start)", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def add_process(self, process):\n self.processes[process.namespec()] = process", "def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))", "def _launch_process_watcher(self, parent_pid, child_pid, child_host, child_port, minecraft_dir):\n\n multiprocessing.freeze_support()\n parent_conn, child_conn = multiprocessing.Pipe()\n self._logger.info(\"Starting process watcher for process {} @ {}:{}\".format(child_pid, child_host, child_port))\n p = multiprocessing.Process(\n target=InstanceManager._process_watcher, args=(\n parent_pid, child_pid, \n child_host, child_port, \n minecraft_dir, child_conn))\n \n def update_port(port):\n parent_conn.send([port])\n # p.daemon = True\n\n p.start()\n return p, update_port", "def _RegisterProcess(self, process):\n if process is None:\n raise ValueError('Missing process.')\n\n if process.pid in self._processes_per_pid:\n raise KeyError(\n 'Already managing process: {0!s} (PID: {1:d})'.format(\n process.name, process.pid))\n\n self._processes_per_pid[process.pid] = process", "def reload(self):\n\t\tdel self.processes\n\t\tself.processes = {}\n\t\tpids = os.listdir(self.basedir)\n\t\tfor spid in pids:\n\t\t\ttry:\n\t\t\t\tpid = int(spid)\n\t\t\texcept:\n\t\t\t\tcontinue\n\n\t\t\tself.processes[pid] = process(pid, self.basedir)", "def _instantiate_processes(self, input=None, context=None):\n# FIX: ALLOW Projections (??ProjectionTiming TUPLES) TO BE INTERPOSED BETWEEN MECHANISMS IN PATHWAY\n# FIX: AUGMENT LinearMatrix TO USE FULL_CONNECTIVITY_MATRIX IF len(sender) != len(receiver)\n\n # # MODIFIED 2/8/17 OLD: [SEE BELOW]\n # self.variable = []\n # MODIFIED 2/8/17 END\n self.mechanismsDict = {}\n self._all_mech_tuples = []\n self._allMechanisms = MechanismList(self, self._all_mech_tuples)\n\n # Get list of processes specified in arg to init, possibly appended by EVCMechanism (with prediction processes)\n processes_spec = self.processes\n\n # Assign default Process if PROCESS is empty, or invalid\n if not processes_spec:\n from PsyNeuLink.Components.Process import Process_Base\n processes_spec.append(ProcessTuple(Process_Base(), None))\n\n # If input to system is specified, number of items must equal number of processes with origin mechanisms\n if input is not None and len(input) != len(self.originMechanisms):\n raise SystemError(\"Number of items in input ({}) must equal number of processes ({}) in {} \".\n format(len(input), len(self.originMechanisms),self.name))\n\n #region VALIDATE EACH ENTRY, STANDARDIZE FORMAT AND INSTANTIATE PROCESS\n\n # Convert all entries to (process, input) tuples, with None as filler for absent input\n input_index = input_index_curr = 0\n for i in range(len(processes_spec)):\n\n # MODIFIED 2/8/17 NEW:\n # Get list of origin mechanisms for processes that have already been converted\n # (for use below in assigning input)\n orig_mechs_already_processed = list(p[0].originMechanisms[0] for\n p in processes_spec if isinstance(p,ProcessTuple))\n # MODIFIED 2/8/17 END\n\n # Entry is not a tuple\n # presumably it is a process spec, so enter it as first item of ProcessTuple\n if not isinstance(processes_spec[i], tuple):\n processes_spec[i] = ProcessTuple(processes_spec[i], None)\n\n # Entry is a tuple but not a ProcessTuple, so convert it\n if isinstance(processes_spec[i], tuple) and not isinstance(processes_spec[i], ProcessTuple):\n processes_spec[i] = ProcessTuple(processes_spec[i][0], processes_spec[i][1])\n\n # Input was NOT provided on command line, so get it from the process\n if input is None:\n process = processes_spec[i].process\n process_input = []\n for process_input_state in process.processInputStates:\n process_input.extend(process_input_state.value)\n processes_spec[i] = ProcessTuple(process, process_input)\n # Input was provided on command line, so assign that to input item of tuple\n else:\n # Assign None as input to processes implemented by controller (controller provides their input)\n # (e.g., prediction processes implemented by EVCMechanism)\n if processes_spec[i].process._isControllerProcess:\n processes_spec[i] = ProcessTuple(processes_spec[i].process, None)\n else:\n # MODIFIED 2/8/17 NEW:\n # Replace input item in tuple with one from command line\n # Note: check if origin mechanism for current process is same as any previous one;\n # if it is, use that one (and don't increment index for input\n # otherwise, assign input and increment input_index\n try:\n input_index_curr = orig_mechs_already_processed.index(processes_spec[i][0].originMechanisms[0])\n except ValueError:\n input_index += 1\n processes_spec[i] = ProcessTuple(processes_spec[i].process, input[input_index_curr])\n input_index_curr = input_index\n # MODIFIED 2/8/17 END\n\n # Validate input\n if (processes_spec[i].input is not None and\n not isinstance(processes_spec[i].input,(numbers.Number, list, np.ndarray))):\n raise SystemError(\"Second item of entry {0} ({1}) must be an input value\".\n format(i, processes_spec[i].input))\n\n process = processes_spec[i].process\n process_input = processes_spec[i].input\n\n # # MODIFIED 2/8/17 OLD: [MOVED ASSIGNMENT OF self.variable TO _instantiate_graph()\n # # SINCE THAT IS WHERE SYSTEM'S ORIGIN MECHANISMS ARE IDENTIFIED]\n # self.variable.append(process_input)\n # # MODIFIED 2/8/17 END\n\n # IMPLEMENT: THIS IS WHERE LEARNING SPECIFIED FOR A SYSTEM SHOULD BE IMPLEMENTED FOR EACH PROCESS IN THE\n # SYSTEM; NOTE: IF THE PROCESS IS ALREADY INSTANTIATED WITHOUT LEARNING\n # (FIRST CONDITIONAL BELOW), MAY NEED TO BE RE-INSTANTIATED WITH LEARNING\n # (QUESTION: WHERE TO GET SPECS FOR PROCESS FOR RE-INSTANTIATION??)\n\n # If process item is a Process object, assign process_input as default\n if isinstance(process, Process):\n if process_input is not None:\n process._assign_defaults(variable=process_input, context=context)\n # If learning_rate is specified for system but not for process, then apply to process\n # # MODIFIED 3/21/17 OLD:\n # if self.learning_rate and not process.learning_rate:\n # # FIX: assign_params WANTS TO CREATE A ParamaterState ON process FOR learning_rate\n # process.assign_params(request_set={LEARNING_RATE:self.learning_rate})\n # # MODIFIED 3/21/17 NEW:[learning_rate SHOULD BE NOT BE RE-ASSIGNED FOR PROCESS, BUT RATHER ON EXECUTE]\n # if self.learning_rate is not None and process.learning_rate is None:\n # process.learning_rate = self.learning_rate\n # # MODIFIED 3/21/17 END\n\n # Otherwise, instantiate Process\n else:\n if inspect.isclass(process) and issubclass(process, Process):\n # FIX: MAKE SURE THIS IS CORRECT\n # Provide self as context, so that Process knows it is part of a System (and which one)\n # Note: this is used by Process._instantiate_pathway() when instantiating first Mechanism\n # in Pathway, to override instantiation of projections from Process.input_state\n process = Process(default_input_value=process_input,\n learning_rate=self.learning_rate,\n context=self)\n elif isinstance(process, dict):\n # IMPLEMENT: HANDLE Process specification dict here;\n # include process_input as ??param, and context=self\n raise SystemError(\"Attempt to instantiate process {0} in kwProcesses of {1} \"\n \"using a Process specification dict: not currently supported\".\n format(process.name, self.name))\n else:\n raise SystemError(\"Entry {0} of kwProcesses ({1}) must be a Process object, class, or a \"\n \"specification dict for a Process\".format(i, process))\n\n # # process should now be a Process object; assign to processList\n # self.processList.append(process)\n\n # Assign the Process a reference to this System\n process.systems.append(self)\n if process.learning:\n self.learning = True\n\n # Get max of Process phaseSpecs\n self._phaseSpecMax = int(max(math.floor(process._phaseSpecMax), self._phaseSpecMax))\n\n # Iterate through mechanism tuples in Process' mech_tuples\n # to construct self._all_mech_tuples and mechanismsDict\n # FIX: ??REPLACE WITH: for sender_mech_tuple in Process._mech_tuples\n for sender_mech_tuple in process._mech_tuples:\n\n sender_mech = sender_mech_tuple.mechanism\n\n # THIS IS NOW DONE IN _instantiate_graph\n # # Add system to the Mechanism's list of systems of which it is member\n # if not self in sender_mech_tuple[MECHANISM].systems:\n # sender_mech.systems[self] = INTERNAL\n\n # Assign sender mechanism entry in self.mechanismsDict, with mech_tuple as key and its Process as value\n # (this is used by Process._instantiate_pathway() to determine if Process is part of System)\n # If the sender is already in the System's mechanisms dict\n if sender_mech_tuple.mechanism in self.mechanismsDict:\n existing_mech_tuple = self._allMechanisms._get_tuple_for_mech(sender_mech)\n if not sender_mech_tuple is existing_mech_tuple:\n # Contents of tuple are the same, so use the tuple in _allMechanisms\n if (sender_mech_tuple.phase == existing_mech_tuple.phase and\n sender_mech_tuple.params == existing_mech_tuple.params):\n pass\n # Contents of tuple are different, so raise exception\n else:\n if sender_mech_tuple.phase != existing_mech_tuple.phase:\n offending_tuple_field = 'phase'\n offending_value = PHASE_ITEM\n else:\n offending_tuple_field = 'process_input'\n offending_value = PARAMS_ITEM\n raise SystemError(\"The same mechanism in different processes must have the same parameters:\"\n \"the {} ({}) for {} in {} does not match the value({}) in {}\".\n format(offending_tuple_field,\n sender_mech_tuple.mechanism,\n sender_mech_tuple[offending_value],\n process,\n existing_mech_tuple[offending_value],\n self.mechanismsDict[sender_mech_tuple.mechanism]\n ))\n # Add to entry's list\n self.mechanismsDict[sender_mech].append(process)\n else:\n # Add new entry\n self.mechanismsDict[sender_mech] = [process]\n if not sender_mech_tuple in self._all_mech_tuples:\n self._all_mech_tuples.append(sender_mech_tuple)\n\n process._allMechanisms = MechanismList(process, tuples_list=process._mech_tuples)\n\n # # MODIFIED 2/8/17 OLD: [SEE ABOVE]\n # self.variable = convert_to_np_array(self.variable, 2)\n # # MODIFIED 2/8/17 END\n #\n # # Instantiate processList using process_tuples, and point self.processes to it\n # # Note: this also points self.params[kwProcesses] to self.processes\n self.process_tuples = processes_spec\n self._processList = ProcessList(self, self.process_tuples)\n self.processes = self._processList.processes", "def _register_process(self, process_instance, name):\n # Add process instance to container's process dict\n if name in self.procs_by_name:\n log.warn(\"Process name already registered in container: %s\" % name)\n self.procs_by_name[name] = process_instance\n self.procs[process_instance.id] = process_instance\n\n # Add Process to resource registry\n # Note: In general the Process resource should be created by the CEI PD, but not all processes are CEI\n # processes. How to deal with this?\n process_instance.errcause = \"registering\"\n\n if process_instance._proc_type != IMMEDIATE_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):\n proc_obj = Process(name=process_instance.id, label=name, proctype=process_instance._proc_type)\n proc_id, _ = self.container.resource_registry.create(proc_obj)\n process_instance._proc_res_id = proc_id\n\n # Associate process with container resource\n self.container.resource_registry.create_association(self.cc_id, \"hasProcess\", proc_id)\n else:\n process_instance._proc_res_id = None\n\n # Process type specific registration\n # TODO: Factor out into type specific handler functions\n if process_instance._proc_type == SERVICE_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):\n # Registration of SERVICE process: in resource registry\n service_list, _ = self.container.resource_registry.find_resources(restype=\"Service\", name=process_instance.name, id_only=True)\n if service_list:\n process_instance._proc_svc_id = service_list[0]\n if len(service_list) > 1:\n log.warn(\"More than 1 Service resource found with name %s: %s\", process_instance.name, service_list)\n else:\n # We are starting the first process of a service instance\n # TODO: This should be created by the HA Service agent in the future\n svc_obj = Service(name=process_instance.name, exchange_name=process_instance._proc_listen_name, state=ServiceStateEnum.READY)\n process_instance._proc_svc_id, _ = self.container.resource_registry.create(svc_obj)\n\n # Create association to service definition resource\n svcdef_list, _ = self.container.resource_registry.find_resources(restype=\"ServiceDefinition\",\n name=process_instance.name, id_only=True)\n if svcdef_list:\n if len(svcdef_list) > 1:\n log.warn(\"More than 1 ServiceDefinition resource found with name %s: %s\", process_instance.name, svcdef_list)\n self.container.resource_registry.create_association(process_instance._proc_svc_id,\n \"hasServiceDefinition\", svcdef_list[0])\n else:\n log.error(\"Cannot find ServiceDefinition resource for %s\", process_instance.name)\n\n self.container.resource_registry.create_association(process_instance._proc_svc_id, \"hasProcess\", proc_id)\n\n elif process_instance._proc_type == AGENT_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.DIRECTORY):\n # Registration of AGENT process: in Directory\n caps = process_instance.get_capabilities()\n self.container.directory.register(\"/Agents\", process_instance.id,\n **dict(name=process_instance._proc_name,\n container=process_instance.container.id,\n resource_id=process_instance.resource_id,\n agent_id=process_instance.agent_id,\n def_id=process_instance.agent_def_id,\n capabilities=caps))\n\n self._call_proc_state_changed(process_instance, ProcessStateEnum.RUNNING)", "async def add_process(self, ctx, process, name):\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f\"The process {process} is already being displayed\")\n elif name in PROCESSES.values():\n await ctx.send(f\"The process name {name} is already being displayed\")\n\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been added\")", "async def edit_process(self, ctx, old_name, new_name):\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def appendProcess(self, pid: int, numberOfVariables: int, processTable, diagnostics) -> int:\n self.memory[pid] = []\n\n for _i in range(numberOfVariables):\n self.memory[pid].append(MemoryItem())\n\n return 0", "def identify_processes(self) -> Dict[int, dict]:\n\n processes = {}\n\n for process in self.behavior[\"generic\"]:\n\n proc_name, proc_path = split_path(process[\"process_path\"])\n\n processes[int(process[\"pid\"])] = {\n FieldNames.PROCESS_IMAGE: proc_name,\n FieldNames.PROCESS_IMAGE_PATH: proc_path,\n FieldNames.PROCESS_ID: int(process[\"pid\"]),\n }\n\n return processes", "def _setUpProcessDetailAndMeasurements(self, compPID, compName):\n myName = self.__class__.__name__\n try:\n pd = ProcessDetail(compPID, compName)\n self._components.append(pd)\n self._compMeasurements.append(Measurements(self.numOfMeasurements))\n m = (\"%s: loaded process information on %s:%s\" % (myName, compName, compPID))\n logging.info(m)\n except (NoSuchProcess, AccessDenied) as ex:\n logging.error(\"%s: component %s ignored, reason: %s\" % (myName, compName, ex))", "def process():", "def pslist(self) -> Generator[dict, None, None]:\n\n # Function to switch fields to represent a parent\n def _convert_to_parent_fields(process: dict) -> dict:\n output = {}\n for left, right in [\n (FieldNames.PROCESS_IMAGE, FieldNames.PARENT_PROCESS_IMAGE),\n (FieldNames.PROCESS_ID, FieldNames.PARENT_PROCESS_ID),\n (FieldNames.COMMAND_LINE, FieldNames.PARENT_COMMAND_LINE),\n (FieldNames.PROCESS_IMAGE_PATH, FieldNames.PARENT_PROCESS_IMAGE_PATH),\n ]:\n output[right] = process[left]\n\n return output\n\n # Use the pstree dict output to get a mapping from pid -> proc\n procs = self.session.plugins.pstree()._make_process_dict()\n\n parent_procs: Dict[int, dict] = {}\n\n # Add the system idle process\n parent_procs[0] = {\n FieldNames.PARENT_PROCESS_ID: 0,\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"System Idle Process\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\\\\\",\n }\n\n for proc in procs.values():\n\n parent_pid = proc.InheritedFromUniqueProcessId\n\n # Get the current processes info\n command_line = str(proc.Peb.ProcessParameters.CommandLine)\n image_path = str(proc.Peb.ProcessParameters.ImagePathName)\n\n if int(proc.pid) == 4:\n process_image = \"SYSTEM\"\n process_image_path = \"\\\\\"\n else:\n process_image, process_image_path = split_path(image_path)\n\n current_proc = {\n FieldNames.EVENT_TYPE: EventTypes.PROCESS_LAUNCHED,\n FieldNames.PROCESS_ID: int(proc.pid),\n FieldNames.COMMAND_LINE: command_line,\n FieldNames.PROCESS_IMAGE: process_image,\n FieldNames.PROCESS_IMAGE_PATH: process_image_path,\n }\n\n # Keep track of the processes.\n self.processes[int(proc.pid)] = current_proc\n\n current_as_parent = _convert_to_parent_fields(current_proc)\n parent_procs[int(proc.pid)] = current_as_parent\n\n # Parse the parent process\n if parent_pid not in parent_procs:\n\n # Do we the _EPROCESS for this process?\n if int(parent_pid) in procs:\n parent = procs[int(parent_pid)]\n parent_image_path = parent.Peb.ProcessParameters.ImagePathName\n\n parent_process_image, parent_process_image_path = split_path(\n str(parent_image_path)\n )\n\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent.pid),\n FieldNames.PARENT_COMMAND_LINE: parent.Peb.ProcessParameters.CommandLine,\n FieldNames.PARENT_PROCESS_IMAGE: parent_process_image,\n FieldNames.PARENT_PROCESS_IMAGE_PATH: parent_process_image_path,\n }\n\n # If not, make a dummy one with the PID\n else:\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent_pid),\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\",\n }\n\n parent_procs[int(parent_pid)] = parent_proc\n\n yield {**current_proc, **parent_procs[int(parent_pid)]}", "def start_processes(self):\n data_thread = CAThread(target=self.handle_event, args=())\n data_thread.start()\n\n self.counter_pv = PV(self.get_counter_pv_name())\n self.counter_pv.add_callback(self.on_change, index=1)\n\n self.acq_pv = PV(self.get_acquire_pv_name())\n self.acq_pv.add_callback(self.acq_done, index=2)", "def update_processgraph(self):\n graph = self.processgraphEdit.toPlainText()\n self.processgraph.graph = json.loads(graph)\n self.processgraph.builder.processes = json.loads(graph)\n #widget = self.processgraphWidget\n #self.load_dict_into_widget(widget, self.processgraph.graph)\n #widget.show()", "def start_new_processes(self):\n # initialize cache to mutualize calls to Variable.get in DAGs\n # needs to be done before this process is forked to create the DAG parsing processes.\n SecretCache.init()\n\n while self._parallelism - len(self._processors) > 0 and self._file_path_queue:\n file_path = self._file_path_queue.popleft()\n # Stop creating duplicate processor i.e. processor with the same filepath\n if file_path in self._processors:\n continue\n\n callback_to_execute_for_file = self._callback_to_execute[file_path]\n processor = self._create_process(\n file_path,\n self._pickle_dags,\n self._dag_ids,\n self.get_dag_directory(),\n callback_to_execute_for_file,\n )\n\n del self._callback_to_execute[file_path]\n Stats.incr(\"dag_processing.processes\", tags={\"file_path\": file_path, \"action\": \"start\"})\n\n processor.start()\n self.log.debug(\"Started a process (PID: %s) to generate tasks for %s\", processor.pid, file_path)\n self._processors[file_path] = processor\n self.waitables[processor.waitable_handle] = processor\n\n Stats.gauge(\"dag_processing.file_path_queue_size\", len(self._file_path_queue))", "def GetPublishedProcesses():\r\n pass", "def test_addProcess(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.protocols, {})\r\n self.assertEqual(self.pm.processes,\r\n {\"foo\": ([\"arg1\", \"arg2\"], 1, 2, {})})\r\n self.pm.startService()\r\n self.reactor.advance(0)\r\n self.assertEqual(self.pm.protocols.keys(), [\"foo\"])", "def _update_PID(self):\n self.pid = PID(p=self.paramP, i=self.paramI, d=self.paramD, setpoint=self.voltageSetpoint, memory=self.paramMemory)", "def new_process() -> Process:\n return multiprocessing.Process()", "def create(self):\n\n if self.__created:\n return self.process\n\n # Change process name if needed\n if self.processName is not None and self.processName != 'PAT':\n if self.verbose:\n print(\"\")\n print(\"Changing process name from %r to %r...\" % ('PAT', self.processName))\n change_process_name(self.process.framework, 'PAT', self.processName)\n\n if len(self.__systematics) > 0:\n if self.verbose:\n print(\"\")\n\n default_systematics_options = {\n 'jec': {'jetCollection': self.__miniaod_jet_collection,\n 'metCollection': self.__miniaod_met_collection,\n 'uncertaintiesFile': None},\n 'jer': {'jetCollection': self.__miniaod_jet_collection,\n 'metCollection': self.__miniaod_met_collection,\n 'genJetCollection': self.__miniaod_gen_jet_collection,\n 'resolutionFile': self.__jer_resolution_file,\n 'scaleFactorFile': self.__jer_scale_factor_file}\n }\n\n systematics = {}\n for syst in self.__systematics:\n user_systematics_options = self.__systematicsOptions[syst] if syst in self.__systematicsOptions else {}\n systematics[syst] = copy.deepcopy(default_systematics_options[syst])\n systematics[syst].update(user_systematics_options)\n\n print(\"\")\n Systematics.doSystematics(self, systematics)\n\n\n # Add the framework to the path as the last element\n self.path += cms.Sequence(self.process.framework)\n self.process.p = self.path\n\n if self.verbose:\n print(\"\")\n print(\"Framework configuration done.\")\n print(\" Producers: %s\" % ', '.join(self.producers))\n print(\" Analyzers: %s\" % ', '.join(self.analyzers))\n print(\"\")\n\n # Specify scheduling of analyzers and producers\n self.process.framework.analyzers_scheduling = cms.untracked.vstring(self.analyzers)\n self.process.framework.producers_scheduling = cms.untracked.vstring(self.producers)\n\n self.__created = True\n return self.process", "def reload_process(self):\n try:\n output = subprocess.check_output([\"pidof\", \"haproxy\"])\n pids = output.strip().split(\" \")\n except Exception as exc:\n command = \"/usr/sbin/haproxy -f {{ dest }} -p /var/run/haproxy.pid\"\n else:\n command = \"/usr/sbin/haproxy -f {{ dest }} -p /var/run/haproxy.pid -sf xyz\"\n command = command.replace(\"xyz\", \" \".join(pids))\n\n command = command.replace(\"{{ dest }}\", self.dest)\n log.debug(\"Running reload_cmd: {}\".format(command))\n\n args = shlex.split(command)\n process = subprocess.Popen(args)", "def register_proc(self, pid: int):\n self.processes.add(pid)", "def addProcess(self, name, args, uid=None, gid=None, env={}):\n class SimpleProcessObject(object):\n\n def starting(self):\n pass\n\n def stopped(self):\n pass\n\n def getName(self):\n return name\n\n def getCommandLine(self):\n return args\n\n def getFileDescriptors(self):\n return []\n\n self.addProcessObject(SimpleProcessObject(), env, uid, gid)", "def addProcess(self, name, proc_config):\n if self.processes.has_key(name):\n raise KeyError(\"remove %s first\" % name)\n p = self.engineProtocol()\n p.service = self\n p.name = name\n proc_config.processProtocol = p\n self.processes[name] = proc_config\n if self.running:\n self.startProcess(name)\n return p.deferred", "def _StartMonitoringProcess(self, process):\n if process is None:\n raise ValueError('Missing process.')\n\n pid = process.pid\n\n if pid in self._process_information_per_pid:\n raise KeyError(\n 'Already monitoring process (PID: {0:d}).'.format(pid))\n\n if pid in self._rpc_clients_per_pid:\n raise KeyError(\n 'RPC client (PID: {0:d}) already exists'.format(pid))\n\n rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()\n\n # Make sure that a worker process has started its RPC server.\n # The RPC port will be 0 if no server is available.\n rpc_port = process.rpc_port.value\n time_waited_for_process = 0.0\n while not rpc_port:\n time.sleep(0.1)\n rpc_port = process.rpc_port.value\n time_waited_for_process += 0.1\n\n if time_waited_for_process >= self._RPC_SERVER_TIMEOUT:\n raise IOError(\n 'RPC client unable to determine server (PID: {0:d}) port.'.format(\n pid))\n\n hostname = 'localhost'\n\n if not rpc_client.Open(hostname, rpc_port):\n raise IOError((\n 'RPC client unable to connect to server (PID: {0:d}) '\n 'http://{1:s}:{2:d}').format(pid, hostname, rpc_port))\n\n self._rpc_clients_per_pid[pid] = rpc_client\n self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)", "def check_launcher():\n\n # Storage in memory which holds info about currently running checks\n storage = {}\n\n # Storage in memory which holds process info: process id and project objects\n processes = {}\n\n # Close previously opened connections (if the exist)\n django.db.connections.close_all()\n\n while True:\n # Making Copy in order to compare updates in data base\n new_storage = copy.deepcopy(storage)\n\n # Fetch data from database\n check_sync(new_storage)\n\n # Get storage keys in order to compare storages for changes\n old_keys = set(storage.keys())\n new_keys = set(new_storage.keys())\n\n # Get keys of elements in init storage and updated storage\n added_checks = new_keys.difference(old_keys)\n deleted_checks = old_keys.difference(new_keys)\n common_checks = new_keys.intersection(old_keys)\n\n # Launch new processes\n for check_id in added_checks:\n # Spawn new process with name Process#id, where id = check_id\n start_process(check_id, new_storage, processes)\n\n # Stop (kill) deleted check's prorcesses\n for check_id in deleted_checks:\n stop_process(check_id, storage, processes)\n\n for check_id in common_checks:\n if storage[check_id] != new_storage[check_id]:\n stop_process(check_id, storage, processes)\n # Spawn new process with name Process#id, where id = check_id\n start_process(check_id, new_storage, processes)\n\n storage = copy.deepcopy(new_storage)\n time.sleep(30)", "def do_list(self,line):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n # sys.exit(1)\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n print('##############################################')\n print('PID #'+' Processor #'+' Status')\n print('##############################################')\n spark_ls = []\n for processor in dict_processor:\n if processor.get('processor') == 'spark<spark_worker>' or processor.get('processor') == 'spark<spark_master>':\n spark_ls.append(processor)\n del dict_processor[dict_processor.index(processor)]\n # print dict_processor\n for processor in dict_processor:\n space_pid = 7 - len(processor.get('PID'))\n space_name = 30 - len(processor.get('processor'))\n if processor.get('status') == 'Running':\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[32m' +processor.get('status')+ '\\33[0m'\n else:\n print str(processor.get('PID'))+space_pid*' '+processor.get('processor') + space_name*' '+ '\\33[33m' +processor.get('status')+ '\\33[0m'\n # space_num = 30 - len(k)\n # print k + space_num*' '+v\n print 7*' '+'spark'\n for item in spark_ls:\n space_pid = 8 - len(item.get('PID'))\n space_name = 29 - len(item.get('processor').split('<')[1].split('>')[0])\n if item.get('status')=='Running':\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[32m'+item.get('status')+'\\33[0m'\n else:\n print str(item.get('PID'))+space_pid*' '+item.get('processor').split('<')[1].split('>')[0] + space_name*' '+ '\\33[33m'+item.get('status')+'\\33[0m'\n print('##############################################')\n else:\n print(\"cmd is not support from this host\")", "def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))", "def test_spawned_process(self):\n DescriptorSchema.objects.create(\n name=\"Test schema\", slug=\"test-schema\", contributor=self.contributor\n )\n spawned_process = Process.objects.filter(slug=\"test-save-file\").latest()\n # Patch the process to create Entity, so its bahaviour can be tested.\n spawned_process.entity_type = \"test-schema\"\n spawned_process.entity_descriptor_schema = \"test-schema\"\n spawned_process.save()\n\n # Make sure user can spawn the process.\n spawned_process.set_permission(Permission.VIEW, self.contributor)\n self.collection.set_permission(Permission.VIEW, self.user)\n Data.objects.create(\n name=\"Test data\",\n contributor=self.contributor,\n process=Process.objects.filter(slug=\"test-spawn-new\").latest(),\n collection=self.collection,\n )\n\n # Created and spawned objects should be done.\n self.assertEqual(Data.objects.filter(status=Data.STATUS_DONE).count(), 2)\n\n # Check that permissions are inherited.\n child = Data.objects.last()\n self.assertTrue(self.user.has_perm(Permission.VIEW, child))\n self.assertEqual(child.collection.pk, self.collection.pk)\n self.assertEqual(child.entity.collection.pk, self.collection.pk)", "def add_process(self, model_id, n_cores, n_time, s_time):\n p = Process(n_cores=n_cores, time_needed=n_time, model_id=model_id, start_time=s_time)\n self.process_list.append(p)", "def dynamic_pid(self):\n pass", "def init_process_list_from_json(self, js_file):\n import json\n print(\"LOADING \" + js_file)\n mdx = json.load(open(js_file, 'r'))\n\n models = mdx['models']\n mod_dat = {}\n for m in models:\n mid = m['id']\n\n needed_cores = m['needed_cores']\n needed_time = m['requested_time']\n if needed_time < 0:\n needed_time = 9223372036854775800\n mod_dat[mid] = {'needed_cores': needed_cores, 'needed_time': needed_time}\n for task in mdx['scheduler_inputs']:\n mid = task['model_id']\n needed_time = mod_dat[mid]['needed_time']\n needed_cores = mod_dat[mid]['needed_cores']\n scheduled_start_time = task['start_time']\n p = Process(n_cores=needed_cores, time_needed=needed_time, model_id=mid, start_time=scheduled_start_time)\n self.process_list.append(p)", "def __init__(self, status_in, data_out):\n Process.__init__(self)\n self.input_stream = status_in\n self.data_out = data_out\n self._UPDATE_INTERVAL_MS = 10\n self._status_labels = {} # A dictionary, whose keys are strings and whose values are Tkinter label variables", "def _set_processes(self, processes: int = 1):\n self.__processes = processes", "def init_process_list_from_json(self, js_file):\n self.process_list = []\n import json\n print(\"LOADING \" + js_file)\n mdx = json.load(open(js_file, 'r'))\n\n models = mdx['models']\n mod_dat = {}\n for m in models:\n mid = m['id']\n\n needed_cores = m['needed_cores']\n needed_time = m['requested_time']\n if needed_time < 0:\n needed_time = 9223372036854775800\n mod_dat[mid] = {'needed_cores': needed_cores, 'needed_time': needed_time}\n for task in mdx['scheduler_inputs']:\n mid = task['model_id']\n needed_time = mod_dat[mid]['needed_time']\n needed_cores = mod_dat[mid]['needed_cores']\n scheduled_start_time = task['start_time']\n p = RRProcessStatus(n_cores=needed_cores, time_needed=needed_time, model_id=mid,\n start_time=scheduled_start_time)\n self.process_list.append(p)", "def get_processes(self):\n processes = {}\n # Get ps output\n cmd = [\"ps\", \"-Z\"]\n # Split by newlines and remove first line (\"LABEL USER PID PPID NAME\")\n # TODO: surround with try/except?\n psz = subprocess.check_output(self.shell + cmd).decode().split('\\n')[1:]\n for line in psz:\n line = line.strip(\"\\r\")\n if line:\n try:\n p = Process(line, self.android_version)\n except ValueError as e:\n self.log.warning(e)\n else:\n processes[p.pid] = p\n return processes", "def manage_process(self, *args):\n process_name = args[0]\n try:\n box_config = self.bc_dao.get_one([self.box_id, process_name])\n if not box_config.is_on:\n if box_config.pid is not None:\n self._kill_process(box_config)\n return\n\n if not box_config.pid or not psutil.pid_exists(box_config.pid):\n self._start_process(box_config)\n elif box_config.pid and psutil.pid_exists(box_config.pid):\n self._poll_process(box_config)\n except Exception as e:\n self.logger.error(f'Exception: {e}', exc_info=True)", "def start_processing(self):", "def spawn_process(self, name=None, module=None, cls=None, config=None, process_id=None):\n if process_id and not is_valid_identifier(process_id, ws_sub='_'):\n raise BadRequest(\"Given process_id %s is not a valid identifier\" % process_id)\n\n # Generate a new process id if not provided\n # TODO: Ensure it is system-wide unique\n process_id = process_id or \"%s.%s\" % (self.container.id, self.proc_id_pool.get_id())\n log.debug(\"ProcManager.spawn_process(name=%s, module.cls=%s.%s, config=%s) as pid=%s\", name, module, cls, config, process_id)\n\n process_cfg = deepcopy(CFG)\n if config:\n # Use provided config. Must be dict or DotDict\n if not isinstance(config, DotDict):\n config = DotDict(config)\n if config.get_safe(\"process.config_ref\"):\n # Use a reference\n config_ref = config.get_safe(\"process.config_ref\")\n log.info(\"Enhancing new process spawn config from ref=%s\" % config_ref)\n matches = re.match(r'^([A-Za-z]+):([A-Za-z0-9_\\.]+)/(.*)$', config_ref)\n if matches:\n ref_type, ref_id, ref_ext = matches.groups()\n if ref_type == \"resources\":\n if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):\n try:\n obj = self.container.resource_registry.read(ref_id)\n if obj and hasattr(obj, ref_ext):\n ref_config = getattr(obj, ref_ext)\n if isinstance(ref_config, dict):\n dict_merge(process_cfg, ref_config, inplace=True)\n else:\n raise BadRequest(\"config_ref %s exists but not dict\" % config_ref)\n else:\n raise BadRequest(\"config_ref %s - attribute not found\" % config_ref)\n except NotFound as nf:\n log.warn(\"config_ref %s - object not found\" % config_ref)\n raise\n else:\n log.error(\"Container missing RESOURCE_REGISTRY capability to resolve process config ref %s\" % config_ref)\n elif ref_type == \"objects\":\n if self.container.has_capability(self.container.CCAP.OBJECT_STORE):\n try:\n obj = self.container.object_store.read_doc(ref_id)\n ref_config = obj\n if ref_ext:\n ref_config = get_safe(obj, ref_ext, None)\n if ref_config is None:\n raise BadRequest(\"config_ref %s - attribute not found\" % config_ref)\n\n if isinstance(ref_config, dict):\n dict_merge(process_cfg, ref_config, inplace=True)\n else:\n raise BadRequest(\"config_ref %s exists but not dict\" % config_ref)\n except NotFound as nf:\n log.warn(\"config_ref %s - object not found\" % config_ref)\n raise\n else:\n log.error(\"Container missing OBJECT_STORE capability to resolve process config ref %s\" % config_ref)\n else:\n raise BadRequest(\"Unknown reference type in: %s\" % config_ref)\n\n dict_merge(process_cfg, config, inplace=True)\n if self.container.spawn_args:\n # Override config with spawn args\n dict_merge(process_cfg, self.container.spawn_args, inplace=True)\n\n #log.debug(\"spawn_process() pid=%s process_cfg=%s\", process_id, process_cfg)\n\n # PROCESS TYPE. Determines basic process context (messaging, service interface)\n # One of the constants defined at the top of this file\n\n service_cls = named_any(\"%s.%s\" % (module, cls))\n process_type = get_safe(process_cfg, \"process.type\") or getattr(service_cls, \"process_type\", \"service\")\n\n process_start_mode = get_safe(config, \"process.start_mode\")\n\n process_instance = None\n\n # alert we have a spawning process, but we don't have the instance yet, so give the class instead (more accurate than name)\n self._call_proc_state_changed(\"%s.%s\" % (module, cls), ProcessStateEnum.PENDING)\n\n try:\n # Additional attributes to set with the process instance\n proc_attr = {\"_proc_type\": process_type,\n \"_proc_spawn_cfg\": config\n }\n\n # spawn process by type\n if process_type == SERVICE_PROCESS_TYPE:\n process_instance = self._spawn_service_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == STREAM_PROCESS_TYPE:\n process_instance = self._spawn_stream_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == AGENT_PROCESS_TYPE:\n process_instance = self._spawn_agent_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == STANDALONE_PROCESS_TYPE:\n process_instance = self._spawn_standalone_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == IMMEDIATE_PROCESS_TYPE:\n process_instance = self._spawn_immediate_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == SIMPLE_PROCESS_TYPE:\n process_instance = self._spawn_simple_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n else:\n raise BadRequest(\"Unknown process type: %s\" % process_type)\n\n self._register_process(process_instance, name)\n\n process_instance.errcause = \"OK\"\n log.info(\"ProcManager.spawn_process: %s.%s -> pid=%s OK\", module, cls, process_id)\n\n if process_type == IMMEDIATE_PROCESS_TYPE:\n log.info('Terminating immediate process: %s', process_instance.id)\n self.terminate_process(process_instance.id)\n\n # terminate process also triggers TERMINATING/TERMINATED\n self._call_proc_state_changed(process_instance, ProcessStateEnum.EXITED)\n\n else:\n #Update local policies for the new process\n if self.container.has_capability(self.container.CCAP.GOVERNANCE_CONTROLLER):\n self.container.governance_controller.update_container_policies(process_instance, safe_mode=True)\n\n return process_instance.id\n\n except IonProcessError:\n errcause = process_instance.errcause if process_instance else \"instantiating process\"\n log.exception(\"Error spawning %s %s process (process_id: %s): %s\", name, process_type, process_id, errcause)\n return None\n\n except Exception:\n errcause = process_instance.errcause if process_instance else \"instantiating process\"\n log.exception(\"Error spawning %s %s process (process_id: %s): %s\", name, process_type, process_id, errcause)\n\n # trigger failed notification - catches problems in init/start\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n\n raise", "def CreateProcesses(self, umpire_config, env):\n if ('services' not in umpire_config or\n 'instalog' not in umpire_config['services']):\n return None\n cli_path = os.path.join(env.server_toolkit_dir, 'py', 'instalog', 'cli.py')\n config_path = self.GenerateConfigFile(umpire_config, env)\n proc_config = {\n 'executable': cli_path,\n 'name': SERVICE_NAME,\n # Have to use --no-daemon when starting instalog, because Umpire will\n # supervise the process by its pid.\n 'args': ['--config', config_path, 'start', '--no-daemon'],\n 'path': '/tmp',\n 'env': os.environ}\n proc = umpire_service.ServiceProcess(self)\n proc.SetConfig(proc_config)\n return [proc]", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def process(self):\n pass", "def num_processes(self, new_value):", "async def find_processes(self, msg):\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in [\"java.exe\", \"javaw.exe\"] and proc.cwd() in PROCESSES.keys():\n running_processes.append(proc.cwd())\n\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Online <:GreenTick:592083498534174721>\", inline=self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Offline <:RedCross:592082557961633877>\", inline=self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Admin Required <:OrangeUnknown:592082676891123722>\", inline=self.inline)\n await msg.edit(content=\"\", embed=new_embed)", "def sync_processes(self, *args, **kwargs):\n return True", "def OnUpdate(self, event):\n # Check remote - TODO\n # Query database for status of processing\n # 2018-04-11 13:25:56.914000\n self.controller.checkRemote()\n seriesprocesses = self.controller.db.getActiveProcesses()\n self.m_dataViewListCtrlCloud.DeleteAllItems()\n for series in seriesprocesses:\n # time delta\n t1 = datetime.datetime.strptime(series[4], '%Y-%m-%d %H:%M:%S.%f')\n if series[5] is not None:\n t2 = datetime.datetime.strptime(series[5], '%Y-%m-%d %H:%M:%S.%f')\n else:\n t2 = datetime.datetime.now()\n tdiff = t2 - t1\n # Load to window\n self.m_dataViewListCtrlCloud.AppendItem(\n [False, series[0], series[1], series[2].upper(), self.getStatus(series[3]), str(tdiff)])", "def start(self, *args):\n if args[0] == 'all':\n params = args[1:]\n for x in self.processers.keys():\n cmd = ['python', 'processmgr.py']\n cmd.append(x.replace('process', ''))\n cmd.extend(params)\n p = subprocess.Popen(cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False)\n self.processers[x] = p\n print 'run %s' % x\n else:\n cmd = ['python', 'processmgr.py']\n cmd.extend(args)\n p = subprocess.Popen(cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=False)\n \n self.processers['process%s' % args[0]] = p\n print 'run process%s.' % args[0]", "def _upmsg(self, xmldoc):\r\n xml = xmldoc.toxml()\r\n pid = xmldoc.getElementsByTagName('id')[0].firstChild.toxml().zfill(4)\r\n try:\r\n pobj = self.getByID(pid).leaf\r\n except ValueError:\r\n pobj = None # this is a new program that hasn't been registered\r\n\r\n if isinstance(pobj, Program):\r\n if '<s>' in xml:\r\n status = xmldoc.getElementsByTagName('s')[0].firstChild.toxml()\r\n if status == '21':\r\n pobj.ranThen.update(pobj.ranThen + 1, force=True,\r\n silent=True)\r\n pobj.status.update(True, force=True, silent=True)\r\n elif status == '31':\r\n pobj.ranElse.update(pobj.ranElse + 1, force=True,\r\n silent=True)\r\n pobj.status.update(False, force=True, silent=True)\r\n\r\n if '<r>' in xml:\r\n plastrun = xmldoc.getElementsByTagName('r')[0]. \\\r\n firstChild.toxml()\r\n plastrun = datetime.strptime(plastrun, '%y%m%d %H:%M:%S')\r\n pobj.lastRun.update(plastrun, force=True, silent=True)\r\n\r\n if '<f>' in xml:\r\n plastfin = xmldoc.getElementsByTagName('f')[0]. \\\r\n firstChild.toxml()\r\n plastfin = datetime.strptime(plastfin, '%y%m%d %H:%M:%S')\r\n pobj.lastFinished.update(plastfin, force=True, silent=True)\r\n\r\n if '<on />' in xml or '<off />' in xml:\r\n pobj.enabled.update('<on />' in xml, force=True, silent=True)\r\n\r\n self.parent.log.info('ISY Updated Program: ' + pid)", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def generate_processes(emg, source, processes, conf, specifications):\n # Import Specifications\n or_models = list(processes.models.values())\n or_processes = list(processes.environment.values())\n or_entry = processes.entry\n\n all_instance_maps = specifications[\"manual event models\"].get(\"specification\")\n fragment_name = emg.abstract_task_desc['fragment']\n descriptions = None\n for imap in all_instance_maps.get(\"manual event models\", []):\n if fragment_name in imap.get('fragments', []):\n descriptions = imap.get(\"model\", None)\n\n # Import manual process\n if descriptions and (\"functions models\" in descriptions or \"environment processes\" in descriptions):\n manual_processes = ProcessCollection(emg.logger, emg.conf)\n manual_processes.parse_event_specification(descriptions)\n\n # Decide on process replacements\n if manual_processes.entry:\n if (get_conf_property(conf, \"enforce replacement\") and or_entry) or not or_entry:\n or_entry = manual_processes.entry\n\n # Replace rest processes\n for collection, manual in ((or_models, manual_processes.models.values()),\n (or_processes, manual_processes.environment.values())):\n for process in manual:\n if process.pretty_id in {p.pretty_id for p in collection} and \\\n get_conf_property(conf, \"enforce replacement\"):\n collection[[p.pretty_id for p in collection].index(process.pretty_id)] = process\n elif process.pretty_id not in {p.pretty_id for p in collection}:\n collection.insert(0, process)\n else:\n emg.logger.info(\"There is no specification for {!r} or it has invalid format\".format(fragment_name))\n\n processes.entry = or_entry\n processes.models = {p.pretty_id: p for p in or_models}\n processes.environment = {p.pretty_id: p for p in or_processes}\n processes.establish_peers(strict=True)", "def __init__(self, process_list):\n self.__process_list = process_list\n self.__n_initial = len(process_list)", "def _process(proc_data: Dict) -> Dict:\n root_int_list = {'pos', 'flags', 'mnt_id', 'ino', 'clockid', 'ticks',\n 'settime flags', 'size', 'count'}\n epoll_int_list = {'tfd', 'pos'}\n inotify_int_list = {'wd'}\n\n for key, val in proc_data.items():\n if key in root_int_list:\n proc_data[key] = int(val)\n\n if 'epoll' in proc_data:\n for key, val in proc_data['epoll'].items():\n if key in epoll_int_list:\n proc_data['epoll'][key] = int(val)\n\n if 'inotify' in proc_data:\n for key, val in proc_data['inotify'].items():\n if key in inotify_int_list:\n proc_data['inotify'][key] = int(val)\n\n return proc_data", "def setup_manager(self) -> None:\n\n #Clean out the process list.\n self.process_list.clear()\n for _ in range(self.num_processes):\n p = Process(target=self.multiprocessing_job,\n args=(self.process_job,))\n self.process_list.append(p)\n self.restart_required = False", "def _determineProcessStatus(self, procs):\n beforePids = set(self._deviceStats.pids)\n afterPidToProcessStats = {}\n pStatsWArgsAndSums, pStatsWoArgs = self._splitPStatMatchers()\n for pid, (name, psargs) in procs:\n pStats = self._deviceStats._pidToProcess.get(pid)\n if pStats:\n # We saw the process before, so there's a good\n # chance that it's the same.\n if pStats.match(name, psargs):\n # Yep, it's the same process\n log.debug(\"Found process %d on %s, matching %s %s with MD5\",\n pid, pStats._config.name, name, psargs)\n log.debug(\"%s found existing stat %s %s for pid %s - using MD5\", self._devId, pStats._config.name,\n pStats._config.originalName, pid)\n afterPidToProcessStats[pid] = pStats\n continue\n\n elif pStats.match(name, psargs, useMd5Digest=False):\n # In this case, our raw SNMP data from the\n # remote agent got futzed\n # It's the same process. Yay!\n log.debug(\"%s - Found process %d on %s, matching %s %s without MD5\",\n self._devId, pid, pStats._config.name, name, psargs)\n afterPidToProcessStats[pid] = pStats\n continue\n\n # Search for the first match in our list of regexes\n # that have arguments AND an MD5-sum argument matching.\n # Explicitly *IGNORE* any matchers not modeled by zenmodeler\n for pStats in pStatsWArgsAndSums:\n if pStats.match(name, psargs):\n log.debug(\"%s Found process %d on %s %s\",\n self._devId, pid, pStats._config.originalName, pStats._config.name)\n afterPidToProcessStats[pid] = pStats\n break\n else:\n # Now look for the first match in our list of regexes\n # that don't have arguments.\n for pStats in pStatsWoArgs:\n if pStats.match(name, psargs, useMd5Digest=False):\n log.debug(\"Found process %d on %s\",\n pid, pStats._config.name)\n afterPidToProcessStats[pid] = pStats\n break\n\n afterPids = set(afterPidToProcessStats)\n afterByConfig = reverseDict(afterPidToProcessStats)\n newPids = afterPids - beforePids\n deadPids = beforePids - afterPids\n\n restarted = {}\n for pid in deadPids:\n procStats = self._deviceStats._pidToProcess[pid]\n procStats.discardPid(pid)\n if procStats in afterByConfig:\n ZenProcessTask.RESTARTED += 1\n pConfig = procStats._config\n if pConfig.restart:\n restarted[procStats] = pConfig\n\n # Now that we've found all of the stragglers, check to see\n # what really is missing or not.\n missing = []\n for procStat in self._deviceStats.processStats:\n if procStat not in afterByConfig:\n missing.append(procStat._config)\n\n # For historical reasons, return the beforeByConfig\n beforeByConfig = reverseDict(self._deviceStats._pidToProcess)\n\n return (afterByConfig, afterPidToProcessStats,\n beforeByConfig, newPids, restarted, deadPids,\n missing)", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def _monit(self):\n # For each children\n for index, (cls, kwargs, proc) in enumerate(self._children):\n\n # Spawn children if required\n if proc is None:\n\n try:\n self._children[index][2] = cls(**kwargs)\n self._children[index][2].start()\n except Exception as e:\n self._children.pop(index)\n logger.error(f'Could not instantiate/spawn child \\'{cls.__name__}\\' due to: {str(e)}')\n logger.info(f'Removed child \\'{cls.__name__}\\' from processes list.')\n\n elif not (proc.is_alive() or proc.exitcode is None):\n\n # Join process and delete instance\n proc.join(timeout=0.1)\n self._children[index][2] = None", "def pre_fork(self, process_manager):\n process_manager.add_process(self.publish_daemon, name=self.__class__.__name__)", "def run_manager(self) -> None:\n \n for p in self.process_list:\n try:\n p.daemon = True\n p.start()\n except:\n self.process_list.remove(p)\n p = Process(target=self.multiprocessing_job, args=(self.process_job,))\n p.daemon = True\n self.process_list.append(p)\n p.start()\n #Every 1 seconds, check for active Processes.\n while True:\n sleep(1)\n running = any(p.is_alive() for p in self.process_list)\n if not running or not active_children:\n self.restart_required = True\n break\n self.logger.info(self.name + \" has finished managing.\")", "def _create_process(self, process, name):\n def _run():\n try:\n while True:\n process.loop()\n except KeyboardInterrupt:\n pass\n except:\n self._logger.exception('Process %s died!', name)\n return ProcessEnvironment().create_process(_run, name)", "def collectData(self):\n\n self.data.datahash = {} # dict of system data\n\n vmstat_dict = self._getvmstat()\n if vmstat_dict:\n self.data.datahash.update(vmstat_dict)\n\n uptime_dict = self._getuptime()\n if uptime_dict:\n self.data.datahash.update(uptime_dict)\n\n log.log( \"<system>system.collectData(): new system list created\", 7 )", "def process_selected(self):\n self.processTableWidget.clear()\n for p in self.processes:\n if \"id\" in p:\n if p['id'] == str(self.processBox.currentText()):\n process = p\n if \"parameters\" in process:\n #info(self.iface, \"New Process {}\".format(process['parameters']))\n self.processTableWidget.setRowCount(len(process['parameters']))\n self.processTableWidget.setColumnCount(3)\n self.processTableWidget.setHorizontalHeaderLabels(['Parameter', 'Type', 'Value'])\n header = self.processTableWidget.horizontalHeader()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)\n\n counter = 0\n for key, val in process['parameters'].items():\n # if key != \"data\" and key != \"imagery\":\n qitem = QTableWidgetItem(key)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n if \"required\" in val:\n if val[\"required\"]:\n boldFont = QtGui.QFont()\n boldFont.setBold(True)\n qitem.setFont(boldFont)\n\n self.processTableWidget.setItem(counter, 0, qitem)\n if \"schema\" in val:\n if \"type\" in val[\"schema\"]:\n type = QTableWidgetItem(str(val['schema']['type']))\n type.setFlags(QtCore.Qt.ItemIsEnabled)\n self.processTableWidget.setItem(counter, 1, type)\n if \"examples\" in val[\"schema\"]:\n example = QTableWidgetItem(str(val['schema']['examples'][0]))\n self.processTableWidget.setItem(counter, 2, example)\n counter += 1\n return\n else:\n info(self.iface, \"New Process: Parameters not found\")", "def launch_new_process(self, config_settings, create_status_output_file):\n # Clear temp folder since a new Core process is to be launched\n self.ext_env_handler.delete_temp_folder_contents()\n\n # create Status file\n if create_status_output_file:\n self.ext_output_status_handler.write_status_file(config_settings.__getattribute__(self.config_public_settings.operation), self.seq_no, status=self.status.Transitioning.lower())\n else:\n self.ext_output_status_handler.update_file(self.seq_no)\n # launch core code in a process and exit extension handler\n process = self.process_handler.start_daemon(self.seq_no, config_settings, self.ext_env_handler)\n self.logger.log(\"exiting extension handler\")\n exit(Constants.ExitCode.Okay)", "def process(self):\n self._processed = True\n # We need to load up previous section_maps info\n with open(os.path.join(self.home, 'section_maps'), 'rb') as _file:\n section_maps = pickle.load(_file)\n\n # This will ensure that sections persist with the same -a, -b nomenclature over time\n self.groups.section_maps = section_maps\n self.groups.period_info = {}\n\n super().process()", "def procinfo(self):\n\n info = {}\n info[\"pid\"] = self.pid\n info[\"exe\"] = self.exe\n info[\"procname\"] = self.procname\n\n return info", "def get_process_mapping():\n with open('/proc/{0}/stat'.format(os.getpid())) as f:\n self_tty = f.read().split()[STAT_TTY]\n processes = {}\n for pid in os.listdir('/proc'):\n if not pid.isdigit():\n continue\n try:\n stat = '/proc/{0}/stat'.format(pid)\n cmdline = '/proc/{0}/cmdline'.format(pid)\n with open(stat) as fstat, open(cmdline) as fcmdline:\n stat = re.findall(r'\\(.+\\)|\\S+', fstat.read())\n cmd = fcmdline.read().split('\\x00')[:-1]\n ppid = stat[STAT_PPID]\n tty = stat[STAT_TTY]\n if tty == self_tty:\n processes[pid] = Process(\n args=tuple(cmd), pid=pid, ppid=ppid,\n )\n except IOError:\n # Process has disappeared - just ignore it.\n continue\n return processes", "def inspire_pidstore():", "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def fork_pc(self):\n self.program_counter.append(0)", "def __init__(self, ripple_buffers, spike_listener, position_estimator, \\\n place_field_handler, ripple_trigger_thread, ripple_trigger_condition, \\\n shared_place_fields, clusters=None):\n Process.__init__(self)\n\n # Graphics windows\n self._command_window = tkinter.Tk()\n tkinter.Label(self._command_window, text=\"Enter command to execute...\").pack()\n self._key_entry = tkinter.Entry(self._command_window)\n self._key_entry.bind(\"<Return>\", self.process_command)\n self._key_entry.pack()\n exit_button = tkinter.Button(self._command_window, text='Quit', command=self.kill_gui)\n exit_button.pack()\n\n self._keep_running = Event()\n self._spike_listener = spike_listener\n self._position_estimator = position_estimator\n self._place_field_handler = place_field_handler\n self._ripple_trigger_thread = ripple_trigger_thread\n self._ripple_trigger_condition = ripple_trigger_condition\n if clusters is None:\n self._n_clusters = self._spike_listener.get_n_clusters()\n self._clusters = range(self._n_clusters)\n self._tetrodes = self._spike_listener.get_tetrodes()\n self._n_tetrodes = len(self._tetrodes)\n else:\n # TODO: Fetch indices for these clusters\n self._n_clusters = 0\n self._clusters = None\n pass\n self._cluster_colormap = colormap.magma(np.linspace(0, 1, self._n_clusters))\n\n # Large arrays that are shared across processes\n self._new_ripple_frame_availale = threading.Event()\n self._shared_raw_lfp_buffer = np.reshape(np.frombuffer(ripple_buffers[0], dtype='double'), (self._n_tetrodes, RiD.LFP_BUFFER_LENGTH))\n self._shared_ripple_power_buffer = np.reshape(np.frombuffer(ripple_buffers[1], dtype='double'), (self._n_tetrodes, RiD.RIPPLE_POWER_BUFFER_LENGTH))\n self._shared_place_fields = np.reshape(np.frombuffer(shared_place_fields, dtype='double'), (self._n_clusters, PositionAnalysis.N_POSITION_BINS[0], PositionAnalysis.N_POSITION_BINS[1]))\n\n # Local copies of the shared data that can be used at a leisurely pace\n self._lfp_tpts = np.linspace(0, RiD.LFP_BUFFER_TIME, RiD.LFP_BUFFER_LENGTH)\n self._ripple_power_tpts = np.linspace(0, RiD.LFP_BUFFER_TIME, RiD.RIPPLE_POWER_BUFFER_LENGTH)\n self._local_lfp_buffer = np.zeros((self._n_tetrodes, RiD.LFP_BUFFER_LENGTH), dtype='double')\n self._local_ripple_power_buffer = np.zeros((self._n_tetrodes, RiD.RIPPLE_POWER_BUFFER_LENGTH), dtype='double')\n self._most_recent_pf = np.zeros((PositionAnalysis.N_POSITION_BINS[0], PositionAnalysis.N_POSITION_BINS[1]), \\\n dtype='float')\n\n # Automatically keep only a fixed number of entries in this buffer... Useful for plotting\n self._pos_timestamps = deque([], self.__N_POSITION_ELEMENTS_TO_PLOT)\n self._pos_x = deque([], self.__N_POSITION_ELEMENTS_TO_PLOT)\n self._pos_y = deque([], self.__N_POSITION_ELEMENTS_TO_PLOT)\n self._speed = deque([], self.__N_POSITION_ELEMENTS_TO_PLOT)\n self._spk_clusters = deque([], self.__N_SPIKES_TO_PLOT)\n self._spk_timestamps = deque([], self.__N_SPIKES_TO_PLOT)\n self._spk_pos_x = deque([], self.__N_SPIKES_TO_PLOT)\n self._spk_pos_y = deque([], self.__N_SPIKES_TO_PLOT)\n\n # Figure/Animation element. So far the following have been included\n # Ripple detection\n # Place Fields\n # Position/Spikes overalaid\n self._rd_fig = None\n self._pf_fig = None\n self._pos_fig = None\n self._rd_ax = None\n self._pf_ax = None\n self._spk_pos_ax = None\n self._rd_frame = []\n self._spk_pos_frame = []\n self._pf_frame = []\n self._anim_objs = []\n self._thread_list = []\n\n # Communication buffers\n self._position_buffer = self._position_estimator.get_position_buffer_connection()\n self._spike_buffer = self._place_field_handler.get_spike_place_buffer_connection(self.__CLUSTERS_TO_PLOT)\n logging.info(MODULE_IDENTIFIER + \"Graphics interface started.\")", "def update_processes_config():\n\n config.set(\"ProcessDisplay\", \"processes\", str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)", "def test_removeProcess(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertEqual(len(self.pm.processes), 1)\r\n self.pm.removeProcess(\"foo\")\r\n self.assertEqual(len(self.pm.processes), 0)", "def re_process(self):\n rmtree(self.processed_dir)\n os.makedirs(self.processed_dir)\n self.process()\n\n print('Done!')", "def _spawn_service_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n\n listen_name = get_safe(config, \"process.listen_name\") or process_instance.name\n log.debug(\"Service Process (%s) listen_name: %s\", name, listen_name)\n process_instance._proc_listen_name = listen_name\n\n # Service RPC endpoint\n rsvc1 = self._create_listening_endpoint(node=self.container.node,\n from_name=listen_name,\n process=process_instance)\n # Named local RPC endpoint\n rsvc2 = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n # cleanup method to delete process queue\n cleanup = lambda _: self._cleanup_method(process_instance.id, rsvc2)\n\n # Start an ION process with the right kind of endpoint factory\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[rsvc1, rsvc2],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_service_process for %s\" % \",\".join((listen_name, process_instance.id)))\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n return process_instance", "def checkProcess(self):\n process = subprocess.Popen(\"ps -A | grep g13d\", stdout=subprocess.PIPE, shell=True)\n out, err = process.communicate()\n if out != '':\n self.ui.but_activate.setEnabled(False)\n self.ui.lab_active.setText(\"Running ok\")\n self.ui.lab_active.setStyleSheet(\"QLabel { background-color : none; color : green; }\");\n else:\n self.ui.but_activate.setEnabled(True)\n self.ui.lab_active.setText(\"Not Started\")\n self.ui.lab_active.setStyleSheet(\"QLabel { background-color : none; color : red; }\");", "def process(self):\n self.output_info = self.attributes.copy()", "def _initialize_metadata(self) -> None:\n\n # Write the pidfile. The SchedulerService will monitor it after a grace period.\n self.write_pid()\n self.write_process_name()\n self.write_fingerprint(ensure_text(self.options_fingerprint))\n self._logger.info(f\"pantsd {VERSION} running with PID: {self.pid}\")\n self.write_socket(self._server.port())", "def update_alive(self):\n if (not self.proc is None) and (not self.proc.is_alive()):\n print(\"process died in error, destroying proxy object\")\n self.reset()", "def __init__(self):\n self.child = os.fork()\n if self.child == 0:\n return\n else:\n self.watch()", "def wait_for_exec_to_start():\n node_instances = self.client.node_instances.list()\n for ni in node_instances:\n # this will keyerror out (and be retried) if the operation\n # didn't run yet\n pids[ni.node_id] = ni.runtime_properties['pid']", "def write_to_db( self ) :\n # first lets update the json file internally through: modify the path to figures\n # The json file has two keys: info and processes\n # we loop over all processes and we change the value of the key figure\n for proc in self.data[\"processes\"].keys():\n # for keys in self.data[\"processes\"][proc].keys():\n # each process has one figure\n try:\n # if keys == \"figure\":\n old_value = self.data[\"processes\"][proc][\"figure\"]\n new_value = self.datapath + \"/\" + old_value\n self.data[\"processes\"][proc][\"figure\"] = new_value\n except Exception as err:\n print( 'The key %s does not exist in the json file' % 'figure' )\n print( err )\n\n # Check the existence of the current json file inside the data base\n # the name of the json file starts with run_number as: run_number.json\n try:\n if self.collection.find_one({\"info.run\": {\"$eq\": self.run_number}}):\n # if the document with the given run number exists, delete it and re-write\n print( \"File %s already in database\" % self.data[\"info\"][\"filename\"] )\n self.collection.delete_one( {\"info.run\": {\"$eq\": self.run_number}} )\n self.collection.insert_one( self.data )\n\n else:\n print('File %s is going to be dumbed' % self.data[\"info\"][\"filename\"])\n self.collection.insert_one( self.data )\n\n except pymongo.errors.ServerSelectionTimeoutError as err:\n print('the data base server is down')\n print(err)\n sys.exit('check the database server if it is up and running ?')\n\n return 0", "def _on_parent_process_kill(self):" ]
[ "0.66696364", "0.6552674", "0.6385766", "0.6369811", "0.6323591", "0.6322", "0.6125703", "0.61249787", "0.6073016", "0.60558206", "0.6030704", "0.59983146", "0.59608686", "0.59429777", "0.58886784", "0.58749086", "0.58633256", "0.5861184", "0.58448863", "0.58353436", "0.5822032", "0.5821897", "0.5787912", "0.57189", "0.5698152", "0.5692378", "0.567891", "0.5669052", "0.5664925", "0.565891", "0.56559247", "0.5650277", "0.5607802", "0.55663204", "0.55642617", "0.5560831", "0.55466515", "0.5524358", "0.55174667", "0.5508251", "0.55038536", "0.55024624", "0.55014527", "0.5495101", "0.5477778", "0.5476122", "0.5472004", "0.54691887", "0.54640096", "0.5452185", "0.54504", "0.543409", "0.54316604", "0.5426717", "0.54244536", "0.5422433", "0.5422433", "0.5422433", "0.5422433", "0.5422433", "0.54148525", "0.5397192", "0.538701", "0.5373303", "0.53679264", "0.53620833", "0.53580153", "0.534725", "0.5332844", "0.533033", "0.5329412", "0.53226435", "0.5290285", "0.528662", "0.52837807", "0.52832466", "0.5280482", "0.5280135", "0.52756923", "0.52755463", "0.5273389", "0.52731586", "0.52724564", "0.52704316", "0.52572936", "0.52464145", "0.5243199", "0.52419627", "0.5241034", "0.5188912", "0.51885176", "0.5184023", "0.51808", "0.5178914", "0.5176016", "0.5173061", "0.5169672", "0.51667345", "0.5157724", "0.5157535" ]
0.61501944
6
Return the next process to run in the cpu. out_process_pid the pid of the process that just left the cpu, or None in case there was no process running. The engine is responsible for updating the usage time.
def schedule(self, pid, delta_t): size = len(self._process_list) if size == 0: return None process = self._process_list[self._last_index % size] self._last_index += 1 return process
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self) -> Union['psutil.Process', None]:\n psutil = attempt_import('psutil')\n pid = self.pid\n if pid is None:\n return None\n if not '_process' in self.__dict__ or self._process.pid != int(pid):\n try:\n self._process = psutil.Process(int(pid))\n except Exception as e:\n if self.pid_path.exists():\n self.pid_path.unlink()\n return None\n return self._process", "def get_process(self, pid):\n return self.processes.get(pid, None)", "def select_process(self):\n result = -1\n for idx in self.priority:\n if self.processes[idx].working_time < self.processes[idx].final_execution_time:\n result = idx\n break\n return result", "def get_pid_processor(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get info about processor\n # that PID was scheduled on last time\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n proc_stat = stat_file.readline().strip().split(' ')\n return int(proc_stat[39])\n except EnvironmentError:\n return -1", "def process(self):\n # type: () -> Optional[multiprocessing.Process]\n try:\n return self._process # type: ignore # pylint: disable=no-member\n except:\n return None", "def nextPwmOutput(self):\n hwidRef = YRefParam()\n if YAPI.YISERR(self._nextFunction(hwidRef)):\n return None\n if hwidRef.value == \"\":\n return None\n return YPwmOutput.FindPwmOutput(hwidRef.value)", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def get_overcloud_nodes_running_process(process):\n oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n oc_nodes_running_process = oc_procs_df.query('PROCESS==\"{}\"'.format(\n process))['overcloud_node'].unique()\n return oc_nodes_running_process", "def free_pid():\n host, pid, tid = get_process_id()\n while True:\n # PIDs are often restricted to a small range. On Linux the range >32k is by default not used.\n pid = random.randint(33000, 65000)\n if not process_alive(host, pid, tid):\n return pid", "def _get_pid(self):\n ps_txt = six.ensure_str(self.controller.run(\n args=[\"ps\", \"ww\", \"-u\"+str(os.getuid())]\n ).stdout.getvalue()).strip()\n lines = ps_txt.split(\"\\n\")[1:]\n\n for line in lines:\n if line.find(\"ceph-{0} -i {1}\".format(self.daemon_type, self.daemon_id)) != -1:\n log.info(\"Found ps line for daemon: {0}\".format(line))\n return int(line.split()[0])\n log.info(\"No match for {0} {1}: {2}\".format(\n self.daemon_type, self.daemon_id, ps_txt\n ))\n return None", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def next_available_PPN(self):\n if len(self.memory) < self.MaxMemPages:\n return len(self.memory) #physical page number is simply just next index in memory. \n else:\n return self.getLRUPPN() #Return the physical page number with the MINUMUM time stamp (least recently used)", "def get_cpu_usage(pid):\n\n cpuusage = subprocess.Popen(['ps','-o', 'pcpu', '-p', str(pid)], shell=False, stdout=subprocess.PIPE)\n cpuusage.stdout.readline()\n return float(cpuusage.stdout.readline().rstrip())", "def get_process_object(pid, die=True):\n try:\n return psutil.Process(pid)\n except psutil.NoSuchProcess as e:\n if die:\n raise e\n else:\n return None", "def update_pid_running_on_dpdk_cpu(self):\n #cpu_list = self.get_cpu_list_by_mask", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def get_my_process():\n return get_process_object(os.getpid())", "def get_process_speed(self, pid):\n raise NotImplementedError()", "def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def pid(self):\n # type: () -> Optional[int]\n try:\n return self._process.pid # type: ignore # pylint: disable=no-member\n except:\n return None", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def process(proc_data):\n\n # No further processing\n return proc_data", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def get_process(self) -> ApplyResult:\n return self._process", "def get_pid(self, file_path) -> int | None:\n if file_path in self._processors:\n return self._processors[file_path].pid\n return None", "def get_qc_process(process):\n\n # Each entry in input_output_maps is an input/output specification with a single\n # input and any number of outputs. This gets the first input.\n first_io = process.input_output_maps[0]\n first_in_artifact = first_io[0]['uri']\n\n processes = process.lims.get_processes(inputartifactlimsid=first_in_artifact.id)\n seq_processes = [proc for proc in processes if proc.type_name in [p[1] for p in QC_PROCESSES]]\n # Use the last sequencing process. In case of crashed runs, this will be the right one.\n try:\n return seq_processes[-1]\n except IndexError:\n return None", "def schedule(self, pid, delta_t):\n\n\t\tif len(self._process_list) == 0: \n\t\t\treturn None\n\n\t\tif self._last_index >= len(self._process_list):\n\t\t\tself._last_index = 0\n\t\t\n\t\twhile True:\n\n\t\t\tpriority = sort_priority()\n\t\t\t# print \"SCHEDULING PRIORITY \", priority, \"\\n\"\n\t\t\tactual_index = self._last_index + 1\n\n\t\t\twhile True:\n\t\t\t\t# print \"FIND PROCESS \" , actual_index, \"\\n\"\n\t\t\t\t\n\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\tactual_index = 0\n\t\t\t\t\n\t\t\t\tprocess = self._process_list[actual_index] \n\t\t\t\t\n\t\t\t\tif find_priority(process.get_priority()) == priority:\n\t\t\t\t\tself._last_index = self._process_list.index(process)\n\t\t\t\t\treturn process\n\t\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tactual_index += 1\n\n\t\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\t\tactual_index = 0\n\t\t\t\t\t\tpriority = sort_priority()\n\n\t\t\t\t\t# print \"LAST INDEX: \", self._last_index, \" ACTUAL INDEX: \", actual_index\t\t\t\t\t\n\n\t\t\t\t\tif actual_index == self._last_index: \n\t\t\t\t\t\tbreak", "def _maybe_use_running_output(process, outputs):\n if os.environ.get(\"POLYSQUARE_ALWAYS_PRINT_PROCESS_OUTPUT\", None):\n return running_output(process, outputs)\n\n return None", "def get_process_by_process_id(self, process_id):\n try:\n process = Process.objects.get(pk=process_id)\n except Process.DoesNotExist:\n process = None\n\n return process", "def compute_memory_usage(app_pid):\n logging.info(\"Computing memory usage...\")\n\n try:\n p = subprocess.Popen(\n \"top -l 1 -pid {PID}\".format(PID=app_pid),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n shell=True\n )\n output = p.communicate()[0].strip()\n\n memory_usage = round(float(str(list(filter(('').__ne__, str(output.splitlines()[-1]).split(\" \")))[7])[:-1]) / 1024.00, 2)\n logging.info(\"Memory usage: [{MEMORY} MB]\".format(MEMORY=memory_usage))\n\n except Exception as e:\n logging.error(\"Computing the memory usage failed with error '{ERROR}'\".format(ERROR=e))\n return None\n\n logging.info(\"Memory usage computed successfuly!\")\n return memory_usage", "def get_cpu_number():\n try:\n output = subprocess.check_output('lscpu').decode(\"utf-8\")\n for line in output.splitlines():\n m = re.match(r'NUMA node0.*:\\s*\\d+-(\\d+)', line)\n if m:\n return m.group(1)\n except OSError:\n pass\n sys.stderr.write(\"Warning: Unable to select CPU ID, using 0\\n\")\n return 0", "def emit(self, pid):\n print \"Emit with pid: {}\".format(pid)\n if pid is None:\n return False\n metrics = {'cpu': 0,\n 'ram': 1,\n 'net': 2,\n 'bytes_sent': 1,\n 'bytes_recv': 1,\n 'packets_sent': 1,\n 'packets_recv': 1,\n 'errin': 0,\n 'errout': 0,\n 'dropin': 0,\n 'dropout': 0,\n 'disk': 0,\n 'files': 0,\n 'time': calendar.timegm(time.gmtime()) * 1000}\n #return False # prevent this\n ##########################################################################################################\n # psutil read metrics\n try:\n # self.proc = psutil.Process(pid)\n process_name = None\n if self.personal_cloud.lower() == \"stacksync\":\n if os.name == 'nt':\n process_name = \"javaw.exe\"\n elif os.name == \"posix\":\n process_name = \"java\"\n\n elif self.personal_cloud.lower() == \"dropbox\":\n if os.name == 'nt':\n process_name = \"Dropbox.exe\"\n elif os.name == \"posix\":\n process_name = \"dropbox\"\n elif self.personal_cloud.lower() == \"owncloud\":\n process_name = \"owncloudcmd\"\n elif self.personal_cloud.lower() == 'mega':\n process_name = \"megacmd\"\n\n if self.proc is None or self.proc.pid != pid:\n self.proc = psutil.Process(pid)\n\n if process_name == self.proc.name() or \"owncloudcmd\" == process_name or \"megacmd\" == process_name:\n print \"OKEY match {} == {}\".format(self.proc.name(), process_name)\n else:\n print \"sync client does not match: {}\".format(process_name)\n return False\n\n except Exception as ex:\n print \"sync client is not running! {}\".format(pid)\n print ex.message\n return False # exit as the process is not alive.\n\n ##########################################################################################################\n print \"PID: {} [{}]\".format(pid, self.personal_cloud.lower())\n try:\n if self.personal_cloud.lower() == \"stacksync\":\n # todo lookup for stacksync process here => using psutil\n cpu_usage = int(math.ceil(self.proc.cpu_percent(0)))\n ram_usage = self.proc.memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"owncloud\":\n cpu_usage = int(math.ceil(self.proc.children()[0].cpu_percent(interval=1)))\n ram_usage = self.proc.children()[0].memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"mega\":\n cpu_usage = int(math.ceil(self.proc.children()[0].cpu_percent(interval=1)))\n ram_usage = self.proc.children()[0].memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"dropbox\":\n # todo lookup for dropbox process here => using psutil\n cpu_usage = int(math.ceil(self.proc.cpu_percent(0)))\n ram_usage = self.proc.memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n except Exception as e:\n print e.message\n\n # assign the network usage metric\n\n if self.prev_metric is not None:\n # do nothing because its the first emit ant there are no previous metric to compare\n # last_net = self.prev_metric['metrics']['net']\n last_time = self.prev_metric['metrics']['time']\n\n curr_net_counter = psutil.net_io_counters(pernic=True)['eth0'] # read the bytes from somewhere\n curr_time = metrics['time']\n elapsed_time = (curr_time - last_time) / 1000 # segons\n for key, value in curr_net_counter.__dict__.items():\n metrics[key] = (value - getattr(self.prev_net_counter, key)) / elapsed_time # unit is seconds\n self.prev_net_counter = curr_net_counter\n # assign hard drive usage metric\n\n if os.name == \"nt\":\n drive_usage = \"1234\"\n elif os.name == \"posix\":\n drive_usage_cmd = ['/usr/bin/du', '-ks', '/home/vagrant/{}'.format(self.personal_folder)]\n drive_usage_output = subprocess.Popen(drive_usage_cmd, stdout=subprocess.PIPE)\n drive_usage = drive_usage_output.stdout.read()\n try:\n metrics['disk'] = int(drive_usage.split('\\t')[0]) # kilo bytes cast string to int\n except Exception as ex:\n print \"invalid literal for... memory unit\"\n metrics['disk'] = 1\n # assign add folder num of files metric\n\n\n\n\n if os.name == \"nt\":\n num_files = \"123\"\n elif os.name == \"posix\":\n find_cmd = '/usr/bin/find /home/vagrant/{} -type f'.format(self.personal_folder).split()\n proc_find = subprocess.Popen(find_cmd, stdout=subprocess.PIPE)\n wc_cmd = '/usr/bin/wc -l'.split()\n proc_wc = subprocess.Popen(wc_cmd, stdin=proc_find.stdout, stdout=subprocess.PIPE)\n num_files = proc_wc.communicate()[0]\n try:\n metrics['files'] = int(num_files.split('\\t')[0])\n except Exception as ex:\n print \"invalid literal for... file counter\"\n\n net_stats = self.traffic_monitor.notify_stats()\n # z = dict(x.items() + y.items()) => metrics\n # envez de esto dict join\n metrics['data_rate_size_up'] = net_stats['data_rate']['size_up']\n metrics['data_rate_size_down'] = net_stats['data_rate']['size_down']\n metrics['data_rate_pack_up'] = net_stats['data_rate']['pack_up']\n metrics['data_rate_pack_down'] = net_stats['data_rate']['pack_down']\n metrics['meta_rate_size_up'] = net_stats['meta_rate']['size_up']\n metrics['meta_rate_size_down'] = net_stats['meta_rate']['size_down']\n metrics['meta_rate_pack_up'] = net_stats['meta_rate']['pack_up']\n metrics['meta_rate_pack_down'] = net_stats['meta_rate']['pack_down']\n\n '''\n {'data_rate':\n {'size_up': 0.471, 'pack_down': 0.00175, 'pack_up': 0.00225, 'size_down': 0.612},\n 'meta_rate':\n {'size_up': 0.0, 'pack_down': 0.0, 'pack_up': 0.0, 'size_down': 0.0},\n 'time': 1461065156000\n }\n '''\n\n tags = ''\n if tags == '':\n tags = {\n 'profile': self.receipt,\n 'credentials': 'pc_credentials',\n 'client': self.personal_cloud.lower(),\n }\n\n data = {\n 'metrics': metrics,\n 'tags': tags\n }\n self.prev_metric = data # update the last emited metric\n msg = json.dumps(data)\n print msg\n\n self.channel.basic_publish(\n exchange='metrics',\n routing_key=self.hostname,\n body=msg)\n\n return True", "def GetCpuStats(self, pid):\n class ProcTaskInfo(ctypes.Structure):\n \"\"\"Struct for proc_pidinfo() call.\"\"\"\n _fields_ = [(\"pti_virtual_size\", ctypes.c_uint64),\n (\"pti_resident_size\", ctypes.c_uint64),\n (\"pti_total_user\", ctypes.c_uint64),\n (\"pti_total_system\", ctypes.c_uint64),\n (\"pti_threads_user\", ctypes.c_uint64),\n (\"pti_threads_system\", ctypes.c_uint64),\n (\"pti_policy\", ctypes.c_int32),\n (\"pti_faults\", ctypes.c_int32),\n (\"pti_pageins\", ctypes.c_int32),\n (\"pti_cow_faults\", ctypes.c_int32),\n (\"pti_messages_sent\", ctypes.c_int32),\n (\"pti_messages_received\", ctypes.c_int32),\n (\"pti_syscalls_mach\", ctypes.c_int32),\n (\"pti_syscalls_unix\", ctypes.c_int32),\n (\"pti_csw\", ctypes.c_int32),\n (\"pti_threadnum\", ctypes.c_int32),\n (\"pti_numrunning\", ctypes.c_int32),\n (\"pti_priority\", ctypes.c_int32)]\n PROC_PIDTASKINFO = 4\n def __init__(self):\n self.size = ctypes.sizeof(self)\n super(ProcTaskInfo, self).__init__() # pylint: disable=bad-super-call\n\n proc_info = ProcTaskInfo()\n if not self.libproc:\n self.libproc = ctypes.CDLL(ctypes.util.find_library('libproc'))\n self.libproc.proc_pidinfo(pid, proc_info.PROC_PIDTASKINFO, 0,\n ctypes.byref(proc_info), proc_info.size)\n\n # Convert nanoseconds to seconds.\n cpu_time = (proc_info.pti_total_user / 1000000000.0 +\n proc_info.pti_total_system / 1000000000.0)\n results = {'CpuProcessTime': cpu_time,\n 'ContextSwitches': proc_info.pti_csw}\n\n # top only reports idle wakeup count starting from OS X 10.9.\n if self.GetOSVersionName() >= os_version_module.MAVERICKS:\n results.update({'IdleWakeupCount': self._GetIdleWakeupCount(pid)})\n return results", "def get(input=None):\n if isinstance(input, datetime.datetime):\n return Elapsed((datetime.datetime.now() - input).total_seconds())\n if not input or isinstance(input, int):\n pid = input if input else os.getpid()\n output = os.popen(\"ps -p %s -o etime | grep -v ELAPSED\" %\n pid).read().strip()\n if output:\n return Elapsed(_parse_ps_output(output))", "def schedule(self, pid, delta_t):\n\t\tif len(self._process_list) == 0:\n\t\t\treturn None\n\n\t\tif self._last_index >= len(self._process_list):\n\t\t\tself._last_index = 0\n\t\t\n\t\twhile True:\n\n\t\t\tpriority = sort_priority()\n\t\t\t# print \"SCHEDULING PRIORITY \", priority, \"\\n\"\n\t\t\tactual_index = self._last_index + 1\n\n\t\t\twhile True:\n\t\t\t\t\n\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\tactual_index = 0\n\t\t\t\t\n\t\t\t\tprocess = self._process_list[actual_index] \n\t\t\t\t\n\t\t\t\tif find_priority(process.get_priority()) == priority:\n\t\t\t\t\tself._last_index = self._process_list.index(process)\n\t\t\t\t\t# print \"FIND PROCESS \" , actual_index, \"\\n\"\n\t\t\t\t\treturn process\n\t\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tactual_index += 1\n\n\t\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\t\tactual_index = 0\n\n\t\t\t\t\t# print \"LAST INDEX: \", self._last_index, \" ACTUAL INDEX: \", actual_index\t\t\t\t\t\n\n\t\t\t\t\tif actual_index == self._last_index: \n\t\t\t\t\t\tbreak", "def current_process(self):\n return self._current_process", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def pid(self):\n return self._process.pid", "def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')", "def get_overall_cpu_util(dut, exclude_proc_name=None):", "def _pid(self, name):\n return self.pid_lookup[name]", "def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None", "def get_next_gp(self):\n raise NotImplementedError('Abstract Method')", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def _obtain(self):\n\n while True:\n # make sure we're observing load maximums\n if self.max_load is not None:\n try:\n load = os.getloadavg()\n if jobserver_running_jobs() > 0 and load[1] > self.max_load:\n time.sleep(0.01)\n continue\n except NotImplementedError:\n pass\n\n # make sure we're observing memory maximum\n if self.max_mem is not None:\n mem_used, mem_total = memory_usage()\n mem_percent_used = 100.0 * float(mem_used) / float(mem_total)\n if jobserver_running_jobs() > 0 and mem_percent_used > self.max_mem:\n time.sleep(0.01)\n continue\n\n # get a token from the job pipe\n try:\n token = os.read(self.job_pipe[0], 1)\n return token\n except OSError as e:\n if e.errno != errno.EINTR:\n raise", "def _cpu_usage(self, e):\n\n cores = os.cpu_count()\n try:\n cpu_usage = int(self.cpu_entry.get())\n if cpu_usage < 0 or cpu_usage > 100:\n self.invalid_input()\n elif cpu_usage == 0:\n self.processes = 1\n else:\n self.processes = round(cpu_usage / 100 * cores)\n except ValueError:\n self.invalid_input()", "def worker_process(self, item):\n g_sleep()\n return item", "def pidGet(self) -> float:\n ...", "def pidGet(self) -> float:\n ...", "def unit_for_pid(pid):\n try:\n cgroup = slurp('/proc/%d/cgroup' % pid)\n match = re.search(\"1:name=systemd:/system.slice/(?:jvm:)?(.+?)\\.service\", cgroup)\n return match.group(1) if match else None\n except IOError:\n return None", "def resource_monitor(childpid, pipe_handle):\n # Get our pid\n ourpid = os.getpid()\n \n # Calculate how often disk should be checked\n disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_LINUX / repy_constants.CPU_POLLING_FREQ_LINUX)\n current_interval = -1 # What cycle are we on \n \n # Store time of the last interval\n last_time = getruntime()\n last_CPU_time = 0\n resume_time = 0 \n \n # Run forever...\n while True:\n ########### Check CPU ###########\n # Get elapsed time\n currenttime = getruntime()\n elapsedtime1 = currenttime - last_time # Calculate against last run\n elapsedtime2 = currenttime - resume_time # Calculate since we last resumed repy\n elapsedtime = min(elapsedtime1, elapsedtime2) # Take the minimum interval\n last_time = currenttime # Save the current time\n \n # Safety check, prevent ZeroDivisionError\n if elapsedtime == 0.0:\n continue\n \n # Get the total cpu at this point\n totalCPU = os_api.get_process_cpu_time(ourpid) # Our own usage\n totalCPU += os_api.get_process_cpu_time(childpid) # Repy's usage\n \n # Calculate percentage of CPU used\n percentused = (totalCPU - last_CPU_time) / elapsedtime\n \n # Do not throttle for the first interval, wrap around\n # Store the totalCPU for the next cycle\n if last_CPU_time == 0:\n last_CPU_time = totalCPU \n continue\n else:\n last_CPU_time = totalCPU\n \n # Calculate stop time\n stoptime = nanny.calculate_cpu_sleep_interval(nanny.get_resource_limit(\"cpu\"), percentused, elapsedtime)\n \n # If we are supposed to stop repy, then suspend, sleep and resume\n if stoptime > 0.0:\n # They must be punished by stopping\n os.kill(childpid, signal.SIGSTOP)\n\n # Sleep until time to resume\n time.sleep(stoptime)\n\n # And now they can start back up!\n os.kill(childpid, signal.SIGCONT)\n \n # Save the resume time\n resume_time = getruntime()\n\n # Send this information as a tuple containing the time repy was stopped and\n # for how long it was stopped\n write_message_to_pipe(pipe_handle, \"repystopped\", (currenttime, stoptime))\n \n \n ########### End Check CPU ###########\n # \n ########### Check Memory ###########\n \n # Get how much memory repy is using\n memused = os_api.get_process_rss()\n \n # Check if it is using too much memory\n if memused > nanny.get_resource_limit(\"memory\"):\n raise ResourceException, \"Memory use '\"+str(memused)+\"' over limit '\"+str(nanny.get_resource_limit(\"memory\"))+\"'.\"\n \n ########### End Check Memory ###########\n # \n ########### Check Disk Usage ###########\n # Increment our current cycle\n current_interval += 1;\n \n # Check if it is time to check the disk usage\n if (current_interval % disk_interval) == 0:\n # Reset the interval\n current_interval = 0\n \n # Calculate disk used\n diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)\n\n # Raise exception if we are over limit\n if diskused > nanny.get_resource_limit(\"diskused\"):\n raise ResourceException, \"Disk use '\"+str(diskused)+\"' over limit '\"+str(nanny.get_resource_limit(\"diskused\"))+\"'.\"\n\n # Send the disk usage information, raw bytes used\n write_message_to_pipe(pipe_handle, \"diskused\", diskused)\n \n ########### End Check Disk ###########\n \n # Sleep before the next iteration\n time.sleep(repy_constants.CPU_POLLING_FREQ_LINUX)", "def get_top():\n print(\"This processes are using the cpu the most:\")\n print(os.system(\"ps axo %cpu,pid,euser,cmd | sort -nr | head -n 5\"))", "def usePIDOutput(self, output):\n self.motor.pidWrite(output)", "def get_isolate_cpus(self):\n\n command = \"cat /proc/cpuinfo | grep processor | awk '{print $NF}'\"\n out = run_and_getout(command)\n str_out = out.decode(self.default_code).replace('\\n', ' ').strip()\n str_out = str(str_out)\n if str_out[0] == \"0\":\n return str_out[2:]\n else:\n return str_out", "def get_sequencing_process(process):\n\n # Each entry in input_output_maps is an input/output specification with a single\n # input and any number of outputs. This gets the first input.\n first_io = process.input_output_maps[0]\n first_in_artifact = first_io[0]['uri']\n\n processes = process.lims.get_processes(inputartifactlimsid=first_in_artifact.id)\n seq_processes = [proc for proc in processes if proc.type_name in [p[1] for p in SEQ_PROCESSES]]\n # Use the last sequencing process. In case of crashed runs, this will be the right one.\n try:\n return seq_processes[-1]\n except IndexError:\n return None", "def get_cpu_clock_cycles_of_pid(pid):\n try:\n with open(LINUX_PROCESS_STAT_LOCATION % pid, 'r') as f:\n pid_entries = f.read().split(' ')\n except IOError:\n return None\n\n pid_cycles = 0\n if len(pid_entries) > 14:\n pid_cycles = int(pid_entries[13]) + int(pid_entries[14])\n return pid_cycles", "def _get_pid(split_data, sensor):\n prot, ip_dst, port_dst, timestamp = split_data\n prot = prot.lower()\n\n if not sanitizer.check_get_pid_params(prot, ip_dst, port_dst, timestamp):\n return '-1,error checking input'\n\n return sensor.search_process(prot, ip_dst, port_dst, timestamp)", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def get_process_info_by_pid(self, pid):\n # TODO: discuss if self.logger needs to happen here? I think not? -BY\n\n for process in self.data_storage.running_data:\n if self.data_storage.running_data[process]['pid'] == pid:\n return self.data_storage.running_data[process]", "def get_process(ngrok_path, config_path=None, auth_token=None, region=None):\n if ngrok_path in _current_processes:\n # Ensure the process is still running and hasn't been killed externally\n if _current_processes[ngrok_path].proc.poll() is None:\n return _current_processes[ngrok_path]\n else:\n _current_processes.pop(ngrok_path, None)\n\n return _start_process(ngrok_path, config_path, auth_token, region)", "def pid(self):\n return self._get_process_id()", "def getProcInfo(self, line):\n try:\n pid, rss, cpu, cmdAndArgs = line.split(None, 3)\n except ValueError:\n # Defunct processes look like this (no RSS data)\n # '28835916 00:00:00 <defunct>'\n pid, cpu, cmdAndArgs = line.split(None, 2)\n rss = \"0\"\n # Exiting and Idle processes look like this\n # (no RSS data, TIME data == '-')\n # '11337738 - <exiting>'\n # '11862166 - <idle>'\n # _extractProcessMetrics(self, line) method will try\n # to parseCpuTime('-') with exception\n if cpu == \"-\":\n cpu = \"00:00:00\"\n\n return pid, rss, cpu, cmdAndArgs", "def find_e_hunt_pid():\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name', 'username'])\n except psutil.NoSuchProcess:\n pass\n else:\n if pinfo[\"name\"] == \"The Equinox Hunt.exe\":\n return pinfo['pid']\n raise EnvironmentError(\"Equinox Hunt not found!\")", "def allocated_cpu(self):\n return self._allocated_cpu", "def get_cpu_usage(pid):\n try:\n process = psutil.Process(pid) \n cpu = process.cpu_times()[0]\n logging.info(f\"Calculated CPU usage {cpu} for PID {pid}\")\n return float(cpu)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def cpu(self):\r\n return self._cpu", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def get_processor_output(location: Optional[pulumi.Input[str]] = None,\n processor_id: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProcessorResult]:\n ...", "def read(self):\n if self.alive:\n with self._register_poll():\n with _unblock_read(self._proc):\n return self._yield_ready_read()\n else:\n raise ProcessIsDeadError('Can not read. The process is already dead.')", "def output_node(self, port: int):\n return self._output_nodes_map[port]", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def run_processing_engine(input_file):\n from subprocess import Popen, PIPE\n p = Popen([\"python\", \"process.py\", input_file, \"-v\"], stdout=PIPE)\n return p.wait()", "def get_cpu_core():\n processor_info = subprocess.getoutput('dmidecode -t processor')\n cpu_core_value = re.findall(r'(?i)Core Count:\\s+(.*?)\\n', processor_info, re.S)[0]\n log.info('cpu_core value:{}'.format(cpu_core_value))\n if cpu_core_value:\n cpu_core = cpu_core_value\n else:\n cpu_core = ''\n return cpu_core", "def get_cpu_usage():\n process_details = RU_OBJ.get_curr_processes()\n return json.dumps(sorted(process_details, key=lambda k: k['name']))", "def next_job(data, rank):\n for j in data.queue:\n process = data.nodes[j[0]]\n status = j[1]\n if process.is_ready() and status == -1:\n j[1] = rank\n return process\n return None # no job ready to execute or job finished", "def _while_loop(pd, nmax, max_loop, ncore, outfile):\n \n # make sure the random seed is different for every processor\n random_number_seeds = [3456789, 7654321, 2435467, 8273645,\n 1085712, 4154712, 1248291, 8415917,\n 2345161, 5710916, 5718601, 7516234,\n 9235161, 4917519, 1111245, 8167834] \n \n # get the processor ID (1 - Ncore) and convert to single integer\n current = multiprocessing.current_process()\n pid = current._identity\n pid = pid[0]\n \n #\n # choose a different seed for each processor from the list so each processor has\n # a different randum number seed. Then, fiddle with each seed a little so \n # the seeds aren't the same every time the code is run\n seed = np.int(random_number_seeds[pid] * (np.random.rand()*(10.0 - 0.01) + 0.01))\n \n np.random.seed(seed)\n \n #print 'id and seed', pid, seed\n \n n_particles = 0\n loop_counter = 0\n fmin_scale = 1.0E-100 # -16 -> -32 -> -100\n F_max = np.max(pd.DF.f) #; F_min = np.min(pd.DF.f);\n F_min = np.min(pd.DF.f) * fmin_scale\n # F_max = np.max(pd.DF.f[:-1])#; F_max = 1.0E-88\n #print F_min, F_max\n if pd.optimize:\n relative_potential = pd._interpolate_relative_potential\n else:\n relative_potential = pd.DF.relative_potential\n \n \n pos = np.zeros((nmax, 3))\n vel = np.zeros((nmax, 3)) \n \n while (( n_particles < nmax) and (loop_counter < max_loop)):\n \n r = pd._choose_position()\n Psi = relative_potential(r) \n \n v = pd._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n \n f_E = pd.DF.interpolate_f(E)\n \n logF = ( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n # if choosing random F in log F, might be good to do the comparison in logspace as well\n #.... i.e log(F) <= log(f_E) \n #\n # 0 FOR F_E MEANS THAT E < E_min of the potential. THis happens when Psi - KE is smaller\n # than the value of the potential at large_r... should this be considered unbound \n # even though it isn't zero? Is this effectively zero? This has been adjusted in the velocity\n # picking routine but needs verification to make sure it works.... but regardless, I doubt\n # that this is the reason why the particles are failing for NFW but working for hernquist....\n #\n if np.abs(np.log10(f_E)) == np.inf:\n keep_particle = False\n _my_print('log value error... throwing out particle')\n else:\n keep_particle = (logF <= np.log10(f_E))\n\n \n if keep_particle: \n index = n_particles \n \n # \n # need to sample in cosine theta NOT theta!!!!! WTF!!!!!\n #\n #\n \n theta = np.arccos( np.random.rand() * (2.0) - 1.0)\n \n # convert position to cartesian using random theta and phi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n pos[index] = r * np.array([x,y,z])\n\n # repeat for velocity using new random numbersw\n theta = np.arccos( np.random.rand() * (2.0) - 1.0)\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n vel[index] = v * np.array([vx,vy,vz])\n \"\"\" \n #else: # do strict cartesian ... slower.... less attractive\n # \n # axis_list = [0, 1, 2]\n # random.shuffle(axis_list)\n \n # #axis_index = np.random.randint(3)\n # first_axis = axis_list[0]\n #\n # pos[index, first_axis] = np.random.rand() * (2.0*r) - r\n #del axis_list[axis_index]\n \n #axis_index = np.random.randint(2)\n # second_axis = axis_list[1]\n \n max_r = np.sqrt(r*r - pos[index,first_axis]**2)\n \n pos[index, second_axis] = np.random.rand()*(2.0 * max_r) - max_r\n #del axis_list[axis_index]\n \n max_r = np.sqrt(r*r - pos[index,first_axis]**2 - pos[index,second_axis]**2)\n \n third_axis = axis_list[2]\n pos[index, third_axis] = np.random.rand() * (2.0 * max_r) - max_r\n \n if np.sqrt(pos[index,0]**2 + pos[index,1]**2 + pos[index,2]**2) > r:\n _my_print('R IS TOO LARGE')\n \n ###\n axis_list = [0, 1, 2]\n random.shuffle(axis_list)\n \n #axis_index = np.random.randint(3)\n first_axis = axis_list[0]\n \n vel[index, first_axis] = np.random.rand() * (2.0*v) - v\n #del axis_list[axis_index]\n \n #axis_index = np.random.randint(2)\n second_axis = axis_list[1]\n \n max_v = np.sqrt(v*v - vel[index,first_axis]**2)\n \n vel[index, second_axis] = np.random.rand()*(2.0 * max_v) - max_v\n #del axis_list[axis_index]\n \n max_v = np.sqrt(v*v - vel[index,first_axis]**2 - vel[index,second_axis]**2)\n \n third_axis = axis_list[2]\n vel[index, third_axis] = np.random.rand() * (2.0 * max_v) - max_v \n \n \n \"\"\"\n n_particles = n_particles + 1\n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n # now write out to a temporary file\n f = open(outfile + \"_%02i_\"%(pid) + \".temp\", 'w')\n fmt = \"%12.12E %12.12E %12.12E %12.12E %12.12E %12.12E %12.12E\\n\"\n \n \n for i in np.arange(nmax):\n f.write(fmt%(pd.M_part, pos[i,0], pos[i,1], pos[i,2], \n vel[i,0], vel[i,1], vel[i,2]))\n \n \n f.close() \n \n return pos, vel", "def spinupoutputprocess():\n if __name__ == '__main__':\n _hwmgr = HardwareController(OUTPUT_SETTINGS)\n PROCESSES.append(_hwmgr)\n _hwmgr.start()", "def process_iter():\r\n def add(pid):\r\n proc = Process(pid)\r\n _pmap[proc.pid] = proc\r\n return proc\r\n\r\n def remove(pid):\r\n _pmap.pop(pid, None)\r\n\r\n a = set(get_pid_list())\r\n b = set(_pmap.keys())\r\n new_pids = a - b\r\n gone_pids = b - a\r\n\r\n for pid in gone_pids:\r\n remove(pid)\r\n for pid, proc in sorted(list(_pmap.items()) + \\\r\n list(dict.fromkeys(new_pids).items())):\r\n try:\r\n if proc is None: # new process\r\n yield add(pid)\r\n else:\r\n # use is_running() to check whether PID has been reused by\r\n # another process in which case yield a new Process instance\r\n if proc.is_running():\r\n yield proc\r\n else:\r\n yield add(pid)\r\n except NoSuchProcess:\r\n remove(pid)\r\n except AccessDenied:\r\n # Process creation time can't be determined hence there's\r\n # no way to tell whether the pid of the cached process\r\n # has been reused. Just return the cached version.\r\n yield proc", "def get_cpu(self):\n pass", "def next(self, *input):\n self.log.info(\"Starting next for task %s\" % self.__class__.__name__)\n\n self.comm.Barrier()\n\n # This should only be called once.\n try:\n if self.done:\n raise pipeline.PipelineStopIteration()\n except AttributeError:\n self.done = True\n\n # Extract a list of the tags for all input arguments\n input_tags = [\n (\n str(icont.attrs.get(\"tag\"))\n if isinstance(icont, memh5.MemDiskGroup)\n else \"\"\n )\n for icont in input\n ]\n\n # Process input and fetch output\n if self._no_input:\n if len(input) > 0:\n # This should never happen. Just here to catch bugs.\n raise RuntimeError(\"Somehow `input` was set.\")\n output = self.process()\n else:\n output = self.process(*input)\n\n # Return immediately if output is None to skip writing phase.\n if output is None:\n return\n\n # Insert the input tags into the output container\n output.attrs[\"input_tags\"] = input_tags\n\n output = self._process_output(output)\n\n # Increment internal counter\n self._count = self._count + 1\n\n self.log.info(\"Leaving next for task %s\" % self.__class__.__name__)\n\n # Return the output for the next task\n return output", "def status(pid_file):\n if not os.path.exists(pid_file):\n return None\n\n pid = None\n with open(pid_file, \"r\") as pf:\n pid = pf.read().strip()\n\n if not pid:\n logger.error(\"Unable to retrieve pid from %s\" % pid_file)\n return None\n\n if not pid.isdigit():\n logger.error(\"Invalid pid %s read from %s\" % (pid, pid_file))\n return None\n\n pid = int(pid)\n\n try:\n # Send 0 signal to check if the process is alive.\n os.kill(pid, 0)\n except OSError as e:\n logger.debug(\"%s\" % e, exc_info=True)\n return None\n return pid", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def process_memory():\n process = psutil.Process()\n return int(convert.bytetomb(process.memory_info().rss))", "def localStageOutPNN(self):\n return self.localStageOut['phedex-node']", "def _job_id_or_out(out):\n\n stdout = re.sub(\"[^0-9]\", \"\", str(out))\n if not stdout:\n stdout = out\n return stdout", "def usage(self):\n self.process = subprocess.Popen(\n# \"ps -u %s -o rss | awk '{sum+=$1} END {print sum}'\" % self.username,\n \"ps -p %s -o rss | awk '{sum+=$1} END {print sum}'\" % self.pid,\n shell=True, stdout=subprocess.PIPE)\n self.stdout_list = self.process.communicate()[0].split('\\n')\n return int(self.stdout_list[0])", "def process_state(self):\n process = self._get_process()\n if not self.is_on:\n process = StateOptions.NONE\n return self._update_feature(WashDeviceFeatures.PROCESS_STATE, process)", "def process_id(self):\n return self._process_id", "def process_id(self):\n return self._process_id", "def get(self):\n if self.proc is not None:\n return self.proc.get()\n\n return None", "def getPid(self):\n try:\n fh = open(self.filename)\n except OSError:\n return None\n line = fh.readline()\n try:\n return string.atoi(line) # trailing newline doesn't matter\n except ValueError:\n return None", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def process():\n reader = owslib.wps.WPSDescribeProcessReader()\n root = reader.readFromString(open(resource_file(\"process_description.xml\")).read())\n xml = root.findall(\"ProcessDescription\")[0]\n return owslib.wps.Process(xml)", "def pid(self):\n\t\treturn self.__pid", "def _launch_process_watcher(self, parent_pid, child_pid, child_host, child_port, minecraft_dir):\n\n multiprocessing.freeze_support()\n parent_conn, child_conn = multiprocessing.Pipe()\n self._logger.info(\"Starting process watcher for process {} @ {}:{}\".format(child_pid, child_host, child_port))\n p = multiprocessing.Process(\n target=InstanceManager._process_watcher, args=(\n parent_pid, child_pid, \n child_host, child_port, \n minecraft_dir, child_conn))\n \n def update_port(port):\n parent_conn.send([port])\n # p.daemon = True\n\n p.start()\n return p, update_port" ]
[ "0.59412354", "0.565315", "0.5516685", "0.54894006", "0.5331213", "0.51628983", "0.5010885", "0.5000854", "0.4969542", "0.49433258", "0.49325776", "0.49271032", "0.49117905", "0.49015146", "0.48910594", "0.48898846", "0.48877802", "0.48800886", "0.4823743", "0.4817428", "0.48147675", "0.48073757", "0.47668242", "0.4765273", "0.47608083", "0.4744827", "0.4729575", "0.4707817", "0.47034776", "0.46831846", "0.46777526", "0.46745622", "0.46739635", "0.46450526", "0.463921", "0.46334895", "0.46280667", "0.46235153", "0.46197534", "0.46020028", "0.4592348", "0.45911607", "0.45909312", "0.45907333", "0.45869726", "0.4570612", "0.45700467", "0.45685023", "0.45592716", "0.45592716", "0.45591673", "0.45520338", "0.45478377", "0.4522425", "0.4521543", "0.4518192", "0.45114216", "0.4491661", "0.44844985", "0.4478363", "0.44670147", "0.44596982", "0.44595286", "0.4454159", "0.4453059", "0.4451614", "0.4450799", "0.44499296", "0.44499296", "0.4443336", "0.4436675", "0.44310972", "0.44204697", "0.44193357", "0.44145945", "0.4410587", "0.44061938", "0.4405703", "0.44019088", "0.4397785", "0.4397629", "0.43951133", "0.43935126", "0.43836144", "0.43458351", "0.43448794", "0.43393704", "0.4336903", "0.4336614", "0.43332216", "0.433222", "0.4326016", "0.43252483", "0.43252483", "0.43252134", "0.43164554", "0.43164527", "0.43148765", "0.43142346", "0.43025976" ]
0.49695387
9
Return the next process to run in the cpu. out_process_pid the pid of the process that just left the cpu, or None in case there was no process running. The engine is responsible for updating the usage time.
def schedule(self, pid, delta_t): if len(self._process_list) == 0: return None if self._last_index >= len(self._process_list): self._last_index = 0 while True: priority = sort_priority() # print "SCHEDULING PRIORITY ", priority, "\n" actual_index = self._last_index + 1 while True: if actual_index >= len(self._process_list): actual_index = 0 process = self._process_list[actual_index] if find_priority(process.get_priority()) == priority: self._last_index = self._process_list.index(process) # print "FIND PROCESS " , actual_index, "\n" return process else: actual_index += 1 if actual_index >= len(self._process_list): actual_index = 0 # print "LAST INDEX: ", self._last_index, " ACTUAL INDEX: ", actual_index if actual_index == self._last_index: break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self) -> Union['psutil.Process', None]:\n psutil = attempt_import('psutil')\n pid = self.pid\n if pid is None:\n return None\n if not '_process' in self.__dict__ or self._process.pid != int(pid):\n try:\n self._process = psutil.Process(int(pid))\n except Exception as e:\n if self.pid_path.exists():\n self.pid_path.unlink()\n return None\n return self._process", "def get_process(self, pid):\n return self.processes.get(pid, None)", "def select_process(self):\n result = -1\n for idx in self.priority:\n if self.processes[idx].working_time < self.processes[idx].final_execution_time:\n result = idx\n break\n return result", "def get_pid_processor(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get info about processor\n # that PID was scheduled on last time\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n proc_stat = stat_file.readline().strip().split(' ')\n return int(proc_stat[39])\n except EnvironmentError:\n return -1", "def process(self):\n # type: () -> Optional[multiprocessing.Process]\n try:\n return self._process # type: ignore # pylint: disable=no-member\n except:\n return None", "def nextPwmOutput(self):\n hwidRef = YRefParam()\n if YAPI.YISERR(self._nextFunction(hwidRef)):\n return None\n if hwidRef.value == \"\":\n return None\n return YPwmOutput.FindPwmOutput(hwidRef.value)", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def get_overcloud_nodes_running_process(process):\n oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n oc_nodes_running_process = oc_procs_df.query('PROCESS==\"{}\"'.format(\n process))['overcloud_node'].unique()\n return oc_nodes_running_process", "def free_pid():\n host, pid, tid = get_process_id()\n while True:\n # PIDs are often restricted to a small range. On Linux the range >32k is by default not used.\n pid = random.randint(33000, 65000)\n if not process_alive(host, pid, tid):\n return pid", "def schedule(self, pid, delta_t):\n\t\tsize = len(self._process_list)\n\n\t\tif size == 0: \n\t\t\treturn None\n\n\t\tprocess = self._process_list[self._last_index % size]\n\t\tself._last_index += 1\n\t\treturn process", "def _get_pid(self):\n ps_txt = six.ensure_str(self.controller.run(\n args=[\"ps\", \"ww\", \"-u\"+str(os.getuid())]\n ).stdout.getvalue()).strip()\n lines = ps_txt.split(\"\\n\")[1:]\n\n for line in lines:\n if line.find(\"ceph-{0} -i {1}\".format(self.daemon_type, self.daemon_id)) != -1:\n log.info(\"Found ps line for daemon: {0}\".format(line))\n return int(line.split()[0])\n log.info(\"No match for {0} {1}: {2}\".format(\n self.daemon_type, self.daemon_id, ps_txt\n ))\n return None", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def next_available_PPN(self):\n if len(self.memory) < self.MaxMemPages:\n return len(self.memory) #physical page number is simply just next index in memory. \n else:\n return self.getLRUPPN() #Return the physical page number with the MINUMUM time stamp (least recently used)", "def get_cpu_usage(pid):\n\n cpuusage = subprocess.Popen(['ps','-o', 'pcpu', '-p', str(pid)], shell=False, stdout=subprocess.PIPE)\n cpuusage.stdout.readline()\n return float(cpuusage.stdout.readline().rstrip())", "def get_process_object(pid, die=True):\n try:\n return psutil.Process(pid)\n except psutil.NoSuchProcess as e:\n if die:\n raise e\n else:\n return None", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def update_pid_running_on_dpdk_cpu(self):\n #cpu_list = self.get_cpu_list_by_mask", "def get_my_process():\n return get_process_object(os.getpid())", "def get_process_speed(self, pid):\n raise NotImplementedError()", "def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def pid(self):\n # type: () -> Optional[int]\n try:\n return self._process.pid # type: ignore # pylint: disable=no-member\n except:\n return None", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def process(proc_data):\n\n # No further processing\n return proc_data", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def get_process(self) -> ApplyResult:\n return self._process", "def get_pid(self, file_path) -> int | None:\n if file_path in self._processors:\n return self._processors[file_path].pid\n return None", "def get_qc_process(process):\n\n # Each entry in input_output_maps is an input/output specification with a single\n # input and any number of outputs. This gets the first input.\n first_io = process.input_output_maps[0]\n first_in_artifact = first_io[0]['uri']\n\n processes = process.lims.get_processes(inputartifactlimsid=first_in_artifact.id)\n seq_processes = [proc for proc in processes if proc.type_name in [p[1] for p in QC_PROCESSES]]\n # Use the last sequencing process. In case of crashed runs, this will be the right one.\n try:\n return seq_processes[-1]\n except IndexError:\n return None", "def schedule(self, pid, delta_t):\n\n\t\tif len(self._process_list) == 0: \n\t\t\treturn None\n\n\t\tif self._last_index >= len(self._process_list):\n\t\t\tself._last_index = 0\n\t\t\n\t\twhile True:\n\n\t\t\tpriority = sort_priority()\n\t\t\t# print \"SCHEDULING PRIORITY \", priority, \"\\n\"\n\t\t\tactual_index = self._last_index + 1\n\n\t\t\twhile True:\n\t\t\t\t# print \"FIND PROCESS \" , actual_index, \"\\n\"\n\t\t\t\t\n\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\tactual_index = 0\n\t\t\t\t\n\t\t\t\tprocess = self._process_list[actual_index] \n\t\t\t\t\n\t\t\t\tif find_priority(process.get_priority()) == priority:\n\t\t\t\t\tself._last_index = self._process_list.index(process)\n\t\t\t\t\treturn process\n\t\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tactual_index += 1\n\n\t\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\t\tactual_index = 0\n\t\t\t\t\t\tpriority = sort_priority()\n\n\t\t\t\t\t# print \"LAST INDEX: \", self._last_index, \" ACTUAL INDEX: \", actual_index\t\t\t\t\t\n\n\t\t\t\t\tif actual_index == self._last_index: \n\t\t\t\t\t\tbreak", "def _maybe_use_running_output(process, outputs):\n if os.environ.get(\"POLYSQUARE_ALWAYS_PRINT_PROCESS_OUTPUT\", None):\n return running_output(process, outputs)\n\n return None", "def get_process_by_process_id(self, process_id):\n try:\n process = Process.objects.get(pk=process_id)\n except Process.DoesNotExist:\n process = None\n\n return process", "def compute_memory_usage(app_pid):\n logging.info(\"Computing memory usage...\")\n\n try:\n p = subprocess.Popen(\n \"top -l 1 -pid {PID}\".format(PID=app_pid),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n shell=True\n )\n output = p.communicate()[0].strip()\n\n memory_usage = round(float(str(list(filter(('').__ne__, str(output.splitlines()[-1]).split(\" \")))[7])[:-1]) / 1024.00, 2)\n logging.info(\"Memory usage: [{MEMORY} MB]\".format(MEMORY=memory_usage))\n\n except Exception as e:\n logging.error(\"Computing the memory usage failed with error '{ERROR}'\".format(ERROR=e))\n return None\n\n logging.info(\"Memory usage computed successfuly!\")\n return memory_usage", "def emit(self, pid):\n print \"Emit with pid: {}\".format(pid)\n if pid is None:\n return False\n metrics = {'cpu': 0,\n 'ram': 1,\n 'net': 2,\n 'bytes_sent': 1,\n 'bytes_recv': 1,\n 'packets_sent': 1,\n 'packets_recv': 1,\n 'errin': 0,\n 'errout': 0,\n 'dropin': 0,\n 'dropout': 0,\n 'disk': 0,\n 'files': 0,\n 'time': calendar.timegm(time.gmtime()) * 1000}\n #return False # prevent this\n ##########################################################################################################\n # psutil read metrics\n try:\n # self.proc = psutil.Process(pid)\n process_name = None\n if self.personal_cloud.lower() == \"stacksync\":\n if os.name == 'nt':\n process_name = \"javaw.exe\"\n elif os.name == \"posix\":\n process_name = \"java\"\n\n elif self.personal_cloud.lower() == \"dropbox\":\n if os.name == 'nt':\n process_name = \"Dropbox.exe\"\n elif os.name == \"posix\":\n process_name = \"dropbox\"\n elif self.personal_cloud.lower() == \"owncloud\":\n process_name = \"owncloudcmd\"\n elif self.personal_cloud.lower() == 'mega':\n process_name = \"megacmd\"\n\n if self.proc is None or self.proc.pid != pid:\n self.proc = psutil.Process(pid)\n\n if process_name == self.proc.name() or \"owncloudcmd\" == process_name or \"megacmd\" == process_name:\n print \"OKEY match {} == {}\".format(self.proc.name(), process_name)\n else:\n print \"sync client does not match: {}\".format(process_name)\n return False\n\n except Exception as ex:\n print \"sync client is not running! {}\".format(pid)\n print ex.message\n return False # exit as the process is not alive.\n\n ##########################################################################################################\n print \"PID: {} [{}]\".format(pid, self.personal_cloud.lower())\n try:\n if self.personal_cloud.lower() == \"stacksync\":\n # todo lookup for stacksync process here => using psutil\n cpu_usage = int(math.ceil(self.proc.cpu_percent(0)))\n ram_usage = self.proc.memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"owncloud\":\n cpu_usage = int(math.ceil(self.proc.children()[0].cpu_percent(interval=1)))\n ram_usage = self.proc.children()[0].memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"mega\":\n cpu_usage = int(math.ceil(self.proc.children()[0].cpu_percent(interval=1)))\n ram_usage = self.proc.children()[0].memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"dropbox\":\n # todo lookup for dropbox process here => using psutil\n cpu_usage = int(math.ceil(self.proc.cpu_percent(0)))\n ram_usage = self.proc.memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n except Exception as e:\n print e.message\n\n # assign the network usage metric\n\n if self.prev_metric is not None:\n # do nothing because its the first emit ant there are no previous metric to compare\n # last_net = self.prev_metric['metrics']['net']\n last_time = self.prev_metric['metrics']['time']\n\n curr_net_counter = psutil.net_io_counters(pernic=True)['eth0'] # read the bytes from somewhere\n curr_time = metrics['time']\n elapsed_time = (curr_time - last_time) / 1000 # segons\n for key, value in curr_net_counter.__dict__.items():\n metrics[key] = (value - getattr(self.prev_net_counter, key)) / elapsed_time # unit is seconds\n self.prev_net_counter = curr_net_counter\n # assign hard drive usage metric\n\n if os.name == \"nt\":\n drive_usage = \"1234\"\n elif os.name == \"posix\":\n drive_usage_cmd = ['/usr/bin/du', '-ks', '/home/vagrant/{}'.format(self.personal_folder)]\n drive_usage_output = subprocess.Popen(drive_usage_cmd, stdout=subprocess.PIPE)\n drive_usage = drive_usage_output.stdout.read()\n try:\n metrics['disk'] = int(drive_usage.split('\\t')[0]) # kilo bytes cast string to int\n except Exception as ex:\n print \"invalid literal for... memory unit\"\n metrics['disk'] = 1\n # assign add folder num of files metric\n\n\n\n\n if os.name == \"nt\":\n num_files = \"123\"\n elif os.name == \"posix\":\n find_cmd = '/usr/bin/find /home/vagrant/{} -type f'.format(self.personal_folder).split()\n proc_find = subprocess.Popen(find_cmd, stdout=subprocess.PIPE)\n wc_cmd = '/usr/bin/wc -l'.split()\n proc_wc = subprocess.Popen(wc_cmd, stdin=proc_find.stdout, stdout=subprocess.PIPE)\n num_files = proc_wc.communicate()[0]\n try:\n metrics['files'] = int(num_files.split('\\t')[0])\n except Exception as ex:\n print \"invalid literal for... file counter\"\n\n net_stats = self.traffic_monitor.notify_stats()\n # z = dict(x.items() + y.items()) => metrics\n # envez de esto dict join\n metrics['data_rate_size_up'] = net_stats['data_rate']['size_up']\n metrics['data_rate_size_down'] = net_stats['data_rate']['size_down']\n metrics['data_rate_pack_up'] = net_stats['data_rate']['pack_up']\n metrics['data_rate_pack_down'] = net_stats['data_rate']['pack_down']\n metrics['meta_rate_size_up'] = net_stats['meta_rate']['size_up']\n metrics['meta_rate_size_down'] = net_stats['meta_rate']['size_down']\n metrics['meta_rate_pack_up'] = net_stats['meta_rate']['pack_up']\n metrics['meta_rate_pack_down'] = net_stats['meta_rate']['pack_down']\n\n '''\n {'data_rate':\n {'size_up': 0.471, 'pack_down': 0.00175, 'pack_up': 0.00225, 'size_down': 0.612},\n 'meta_rate':\n {'size_up': 0.0, 'pack_down': 0.0, 'pack_up': 0.0, 'size_down': 0.0},\n 'time': 1461065156000\n }\n '''\n\n tags = ''\n if tags == '':\n tags = {\n 'profile': self.receipt,\n 'credentials': 'pc_credentials',\n 'client': self.personal_cloud.lower(),\n }\n\n data = {\n 'metrics': metrics,\n 'tags': tags\n }\n self.prev_metric = data # update the last emited metric\n msg = json.dumps(data)\n print msg\n\n self.channel.basic_publish(\n exchange='metrics',\n routing_key=self.hostname,\n body=msg)\n\n return True", "def get_cpu_number():\n try:\n output = subprocess.check_output('lscpu').decode(\"utf-8\")\n for line in output.splitlines():\n m = re.match(r'NUMA node0.*:\\s*\\d+-(\\d+)', line)\n if m:\n return m.group(1)\n except OSError:\n pass\n sys.stderr.write(\"Warning: Unable to select CPU ID, using 0\\n\")\n return 0", "def GetCpuStats(self, pid):\n class ProcTaskInfo(ctypes.Structure):\n \"\"\"Struct for proc_pidinfo() call.\"\"\"\n _fields_ = [(\"pti_virtual_size\", ctypes.c_uint64),\n (\"pti_resident_size\", ctypes.c_uint64),\n (\"pti_total_user\", ctypes.c_uint64),\n (\"pti_total_system\", ctypes.c_uint64),\n (\"pti_threads_user\", ctypes.c_uint64),\n (\"pti_threads_system\", ctypes.c_uint64),\n (\"pti_policy\", ctypes.c_int32),\n (\"pti_faults\", ctypes.c_int32),\n (\"pti_pageins\", ctypes.c_int32),\n (\"pti_cow_faults\", ctypes.c_int32),\n (\"pti_messages_sent\", ctypes.c_int32),\n (\"pti_messages_received\", ctypes.c_int32),\n (\"pti_syscalls_mach\", ctypes.c_int32),\n (\"pti_syscalls_unix\", ctypes.c_int32),\n (\"pti_csw\", ctypes.c_int32),\n (\"pti_threadnum\", ctypes.c_int32),\n (\"pti_numrunning\", ctypes.c_int32),\n (\"pti_priority\", ctypes.c_int32)]\n PROC_PIDTASKINFO = 4\n def __init__(self):\n self.size = ctypes.sizeof(self)\n super(ProcTaskInfo, self).__init__() # pylint: disable=bad-super-call\n\n proc_info = ProcTaskInfo()\n if not self.libproc:\n self.libproc = ctypes.CDLL(ctypes.util.find_library('libproc'))\n self.libproc.proc_pidinfo(pid, proc_info.PROC_PIDTASKINFO, 0,\n ctypes.byref(proc_info), proc_info.size)\n\n # Convert nanoseconds to seconds.\n cpu_time = (proc_info.pti_total_user / 1000000000.0 +\n proc_info.pti_total_system / 1000000000.0)\n results = {'CpuProcessTime': cpu_time,\n 'ContextSwitches': proc_info.pti_csw}\n\n # top only reports idle wakeup count starting from OS X 10.9.\n if self.GetOSVersionName() >= os_version_module.MAVERICKS:\n results.update({'IdleWakeupCount': self._GetIdleWakeupCount(pid)})\n return results", "def get(input=None):\n if isinstance(input, datetime.datetime):\n return Elapsed((datetime.datetime.now() - input).total_seconds())\n if not input or isinstance(input, int):\n pid = input if input else os.getpid()\n output = os.popen(\"ps -p %s -o etime | grep -v ELAPSED\" %\n pid).read().strip()\n if output:\n return Elapsed(_parse_ps_output(output))", "def current_process(self):\n return self._current_process", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def pid(self):\n return self._process.pid", "def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')", "def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None", "def _pid(self, name):\n return self.pid_lookup[name]", "def get_overall_cpu_util(dut, exclude_proc_name=None):", "def get_next_gp(self):\n raise NotImplementedError('Abstract Method')", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def _obtain(self):\n\n while True:\n # make sure we're observing load maximums\n if self.max_load is not None:\n try:\n load = os.getloadavg()\n if jobserver_running_jobs() > 0 and load[1] > self.max_load:\n time.sleep(0.01)\n continue\n except NotImplementedError:\n pass\n\n # make sure we're observing memory maximum\n if self.max_mem is not None:\n mem_used, mem_total = memory_usage()\n mem_percent_used = 100.0 * float(mem_used) / float(mem_total)\n if jobserver_running_jobs() > 0 and mem_percent_used > self.max_mem:\n time.sleep(0.01)\n continue\n\n # get a token from the job pipe\n try:\n token = os.read(self.job_pipe[0], 1)\n return token\n except OSError as e:\n if e.errno != errno.EINTR:\n raise", "def _cpu_usage(self, e):\n\n cores = os.cpu_count()\n try:\n cpu_usage = int(self.cpu_entry.get())\n if cpu_usage < 0 or cpu_usage > 100:\n self.invalid_input()\n elif cpu_usage == 0:\n self.processes = 1\n else:\n self.processes = round(cpu_usage / 100 * cores)\n except ValueError:\n self.invalid_input()", "def worker_process(self, item):\n g_sleep()\n return item", "def unit_for_pid(pid):\n try:\n cgroup = slurp('/proc/%d/cgroup' % pid)\n match = re.search(\"1:name=systemd:/system.slice/(?:jvm:)?(.+?)\\.service\", cgroup)\n return match.group(1) if match else None\n except IOError:\n return None", "def pidGet(self) -> float:\n ...", "def pidGet(self) -> float:\n ...", "def resource_monitor(childpid, pipe_handle):\n # Get our pid\n ourpid = os.getpid()\n \n # Calculate how often disk should be checked\n disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_LINUX / repy_constants.CPU_POLLING_FREQ_LINUX)\n current_interval = -1 # What cycle are we on \n \n # Store time of the last interval\n last_time = getruntime()\n last_CPU_time = 0\n resume_time = 0 \n \n # Run forever...\n while True:\n ########### Check CPU ###########\n # Get elapsed time\n currenttime = getruntime()\n elapsedtime1 = currenttime - last_time # Calculate against last run\n elapsedtime2 = currenttime - resume_time # Calculate since we last resumed repy\n elapsedtime = min(elapsedtime1, elapsedtime2) # Take the minimum interval\n last_time = currenttime # Save the current time\n \n # Safety check, prevent ZeroDivisionError\n if elapsedtime == 0.0:\n continue\n \n # Get the total cpu at this point\n totalCPU = os_api.get_process_cpu_time(ourpid) # Our own usage\n totalCPU += os_api.get_process_cpu_time(childpid) # Repy's usage\n \n # Calculate percentage of CPU used\n percentused = (totalCPU - last_CPU_time) / elapsedtime\n \n # Do not throttle for the first interval, wrap around\n # Store the totalCPU for the next cycle\n if last_CPU_time == 0:\n last_CPU_time = totalCPU \n continue\n else:\n last_CPU_time = totalCPU\n \n # Calculate stop time\n stoptime = nanny.calculate_cpu_sleep_interval(nanny.get_resource_limit(\"cpu\"), percentused, elapsedtime)\n \n # If we are supposed to stop repy, then suspend, sleep and resume\n if stoptime > 0.0:\n # They must be punished by stopping\n os.kill(childpid, signal.SIGSTOP)\n\n # Sleep until time to resume\n time.sleep(stoptime)\n\n # And now they can start back up!\n os.kill(childpid, signal.SIGCONT)\n \n # Save the resume time\n resume_time = getruntime()\n\n # Send this information as a tuple containing the time repy was stopped and\n # for how long it was stopped\n write_message_to_pipe(pipe_handle, \"repystopped\", (currenttime, stoptime))\n \n \n ########### End Check CPU ###########\n # \n ########### Check Memory ###########\n \n # Get how much memory repy is using\n memused = os_api.get_process_rss()\n \n # Check if it is using too much memory\n if memused > nanny.get_resource_limit(\"memory\"):\n raise ResourceException, \"Memory use '\"+str(memused)+\"' over limit '\"+str(nanny.get_resource_limit(\"memory\"))+\"'.\"\n \n ########### End Check Memory ###########\n # \n ########### Check Disk Usage ###########\n # Increment our current cycle\n current_interval += 1;\n \n # Check if it is time to check the disk usage\n if (current_interval % disk_interval) == 0:\n # Reset the interval\n current_interval = 0\n \n # Calculate disk used\n diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)\n\n # Raise exception if we are over limit\n if diskused > nanny.get_resource_limit(\"diskused\"):\n raise ResourceException, \"Disk use '\"+str(diskused)+\"' over limit '\"+str(nanny.get_resource_limit(\"diskused\"))+\"'.\"\n\n # Send the disk usage information, raw bytes used\n write_message_to_pipe(pipe_handle, \"diskused\", diskused)\n \n ########### End Check Disk ###########\n \n # Sleep before the next iteration\n time.sleep(repy_constants.CPU_POLLING_FREQ_LINUX)", "def get_top():\n print(\"This processes are using the cpu the most:\")\n print(os.system(\"ps axo %cpu,pid,euser,cmd | sort -nr | head -n 5\"))", "def usePIDOutput(self, output):\n self.motor.pidWrite(output)", "def get_isolate_cpus(self):\n\n command = \"cat /proc/cpuinfo | grep processor | awk '{print $NF}'\"\n out = run_and_getout(command)\n str_out = out.decode(self.default_code).replace('\\n', ' ').strip()\n str_out = str(str_out)\n if str_out[0] == \"0\":\n return str_out[2:]\n else:\n return str_out", "def get_sequencing_process(process):\n\n # Each entry in input_output_maps is an input/output specification with a single\n # input and any number of outputs. This gets the first input.\n first_io = process.input_output_maps[0]\n first_in_artifact = first_io[0]['uri']\n\n processes = process.lims.get_processes(inputartifactlimsid=first_in_artifact.id)\n seq_processes = [proc for proc in processes if proc.type_name in [p[1] for p in SEQ_PROCESSES]]\n # Use the last sequencing process. In case of crashed runs, this will be the right one.\n try:\n return seq_processes[-1]\n except IndexError:\n return None", "def get_cpu_clock_cycles_of_pid(pid):\n try:\n with open(LINUX_PROCESS_STAT_LOCATION % pid, 'r') as f:\n pid_entries = f.read().split(' ')\n except IOError:\n return None\n\n pid_cycles = 0\n if len(pid_entries) > 14:\n pid_cycles = int(pid_entries[13]) + int(pid_entries[14])\n return pid_cycles", "def _get_pid(split_data, sensor):\n prot, ip_dst, port_dst, timestamp = split_data\n prot = prot.lower()\n\n if not sanitizer.check_get_pid_params(prot, ip_dst, port_dst, timestamp):\n return '-1,error checking input'\n\n return sensor.search_process(prot, ip_dst, port_dst, timestamp)", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def get_process_info_by_pid(self, pid):\n # TODO: discuss if self.logger needs to happen here? I think not? -BY\n\n for process in self.data_storage.running_data:\n if self.data_storage.running_data[process]['pid'] == pid:\n return self.data_storage.running_data[process]", "def get_process(ngrok_path, config_path=None, auth_token=None, region=None):\n if ngrok_path in _current_processes:\n # Ensure the process is still running and hasn't been killed externally\n if _current_processes[ngrok_path].proc.poll() is None:\n return _current_processes[ngrok_path]\n else:\n _current_processes.pop(ngrok_path, None)\n\n return _start_process(ngrok_path, config_path, auth_token, region)", "def pid(self):\n return self._get_process_id()", "def getProcInfo(self, line):\n try:\n pid, rss, cpu, cmdAndArgs = line.split(None, 3)\n except ValueError:\n # Defunct processes look like this (no RSS data)\n # '28835916 00:00:00 <defunct>'\n pid, cpu, cmdAndArgs = line.split(None, 2)\n rss = \"0\"\n # Exiting and Idle processes look like this\n # (no RSS data, TIME data == '-')\n # '11337738 - <exiting>'\n # '11862166 - <idle>'\n # _extractProcessMetrics(self, line) method will try\n # to parseCpuTime('-') with exception\n if cpu == \"-\":\n cpu = \"00:00:00\"\n\n return pid, rss, cpu, cmdAndArgs", "def find_e_hunt_pid():\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name', 'username'])\n except psutil.NoSuchProcess:\n pass\n else:\n if pinfo[\"name\"] == \"The Equinox Hunt.exe\":\n return pinfo['pid']\n raise EnvironmentError(\"Equinox Hunt not found!\")", "def allocated_cpu(self):\n return self._allocated_cpu", "def get_cpu_usage(pid):\n try:\n process = psutil.Process(pid) \n cpu = process.cpu_times()[0]\n logging.info(f\"Calculated CPU usage {cpu} for PID {pid}\")\n return float(cpu)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def cpu(self):\r\n return self._cpu", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def get_processor_output(location: Optional[pulumi.Input[str]] = None,\n processor_id: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProcessorResult]:\n ...", "def read(self):\n if self.alive:\n with self._register_poll():\n with _unblock_read(self._proc):\n return self._yield_ready_read()\n else:\n raise ProcessIsDeadError('Can not read. The process is already dead.')", "def output_node(self, port: int):\n return self._output_nodes_map[port]", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def run_processing_engine(input_file):\n from subprocess import Popen, PIPE\n p = Popen([\"python\", \"process.py\", input_file, \"-v\"], stdout=PIPE)\n return p.wait()", "def get_cpu_core():\n processor_info = subprocess.getoutput('dmidecode -t processor')\n cpu_core_value = re.findall(r'(?i)Core Count:\\s+(.*?)\\n', processor_info, re.S)[0]\n log.info('cpu_core value:{}'.format(cpu_core_value))\n if cpu_core_value:\n cpu_core = cpu_core_value\n else:\n cpu_core = ''\n return cpu_core", "def get_cpu_usage():\n process_details = RU_OBJ.get_curr_processes()\n return json.dumps(sorted(process_details, key=lambda k: k['name']))", "def next_job(data, rank):\n for j in data.queue:\n process = data.nodes[j[0]]\n status = j[1]\n if process.is_ready() and status == -1:\n j[1] = rank\n return process\n return None # no job ready to execute or job finished", "def _while_loop(pd, nmax, max_loop, ncore, outfile):\n \n # make sure the random seed is different for every processor\n random_number_seeds = [3456789, 7654321, 2435467, 8273645,\n 1085712, 4154712, 1248291, 8415917,\n 2345161, 5710916, 5718601, 7516234,\n 9235161, 4917519, 1111245, 8167834] \n \n # get the processor ID (1 - Ncore) and convert to single integer\n current = multiprocessing.current_process()\n pid = current._identity\n pid = pid[0]\n \n #\n # choose a different seed for each processor from the list so each processor has\n # a different randum number seed. Then, fiddle with each seed a little so \n # the seeds aren't the same every time the code is run\n seed = np.int(random_number_seeds[pid] * (np.random.rand()*(10.0 - 0.01) + 0.01))\n \n np.random.seed(seed)\n \n #print 'id and seed', pid, seed\n \n n_particles = 0\n loop_counter = 0\n fmin_scale = 1.0E-100 # -16 -> -32 -> -100\n F_max = np.max(pd.DF.f) #; F_min = np.min(pd.DF.f);\n F_min = np.min(pd.DF.f) * fmin_scale\n # F_max = np.max(pd.DF.f[:-1])#; F_max = 1.0E-88\n #print F_min, F_max\n if pd.optimize:\n relative_potential = pd._interpolate_relative_potential\n else:\n relative_potential = pd.DF.relative_potential\n \n \n pos = np.zeros((nmax, 3))\n vel = np.zeros((nmax, 3)) \n \n while (( n_particles < nmax) and (loop_counter < max_loop)):\n \n r = pd._choose_position()\n Psi = relative_potential(r) \n \n v = pd._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n \n f_E = pd.DF.interpolate_f(E)\n \n logF = ( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n # if choosing random F in log F, might be good to do the comparison in logspace as well\n #.... i.e log(F) <= log(f_E) \n #\n # 0 FOR F_E MEANS THAT E < E_min of the potential. THis happens when Psi - KE is smaller\n # than the value of the potential at large_r... should this be considered unbound \n # even though it isn't zero? Is this effectively zero? This has been adjusted in the velocity\n # picking routine but needs verification to make sure it works.... but regardless, I doubt\n # that this is the reason why the particles are failing for NFW but working for hernquist....\n #\n if np.abs(np.log10(f_E)) == np.inf:\n keep_particle = False\n _my_print('log value error... throwing out particle')\n else:\n keep_particle = (logF <= np.log10(f_E))\n\n \n if keep_particle: \n index = n_particles \n \n # \n # need to sample in cosine theta NOT theta!!!!! WTF!!!!!\n #\n #\n \n theta = np.arccos( np.random.rand() * (2.0) - 1.0)\n \n # convert position to cartesian using random theta and phi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n pos[index] = r * np.array([x,y,z])\n\n # repeat for velocity using new random numbersw\n theta = np.arccos( np.random.rand() * (2.0) - 1.0)\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n vel[index] = v * np.array([vx,vy,vz])\n \"\"\" \n #else: # do strict cartesian ... slower.... less attractive\n # \n # axis_list = [0, 1, 2]\n # random.shuffle(axis_list)\n \n # #axis_index = np.random.randint(3)\n # first_axis = axis_list[0]\n #\n # pos[index, first_axis] = np.random.rand() * (2.0*r) - r\n #del axis_list[axis_index]\n \n #axis_index = np.random.randint(2)\n # second_axis = axis_list[1]\n \n max_r = np.sqrt(r*r - pos[index,first_axis]**2)\n \n pos[index, second_axis] = np.random.rand()*(2.0 * max_r) - max_r\n #del axis_list[axis_index]\n \n max_r = np.sqrt(r*r - pos[index,first_axis]**2 - pos[index,second_axis]**2)\n \n third_axis = axis_list[2]\n pos[index, third_axis] = np.random.rand() * (2.0 * max_r) - max_r\n \n if np.sqrt(pos[index,0]**2 + pos[index,1]**2 + pos[index,2]**2) > r:\n _my_print('R IS TOO LARGE')\n \n ###\n axis_list = [0, 1, 2]\n random.shuffle(axis_list)\n \n #axis_index = np.random.randint(3)\n first_axis = axis_list[0]\n \n vel[index, first_axis] = np.random.rand() * (2.0*v) - v\n #del axis_list[axis_index]\n \n #axis_index = np.random.randint(2)\n second_axis = axis_list[1]\n \n max_v = np.sqrt(v*v - vel[index,first_axis]**2)\n \n vel[index, second_axis] = np.random.rand()*(2.0 * max_v) - max_v\n #del axis_list[axis_index]\n \n max_v = np.sqrt(v*v - vel[index,first_axis]**2 - vel[index,second_axis]**2)\n \n third_axis = axis_list[2]\n vel[index, third_axis] = np.random.rand() * (2.0 * max_v) - max_v \n \n \n \"\"\"\n n_particles = n_particles + 1\n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n # now write out to a temporary file\n f = open(outfile + \"_%02i_\"%(pid) + \".temp\", 'w')\n fmt = \"%12.12E %12.12E %12.12E %12.12E %12.12E %12.12E %12.12E\\n\"\n \n \n for i in np.arange(nmax):\n f.write(fmt%(pd.M_part, pos[i,0], pos[i,1], pos[i,2], \n vel[i,0], vel[i,1], vel[i,2]))\n \n \n f.close() \n \n return pos, vel", "def spinupoutputprocess():\n if __name__ == '__main__':\n _hwmgr = HardwareController(OUTPUT_SETTINGS)\n PROCESSES.append(_hwmgr)\n _hwmgr.start()", "def process_iter():\r\n def add(pid):\r\n proc = Process(pid)\r\n _pmap[proc.pid] = proc\r\n return proc\r\n\r\n def remove(pid):\r\n _pmap.pop(pid, None)\r\n\r\n a = set(get_pid_list())\r\n b = set(_pmap.keys())\r\n new_pids = a - b\r\n gone_pids = b - a\r\n\r\n for pid in gone_pids:\r\n remove(pid)\r\n for pid, proc in sorted(list(_pmap.items()) + \\\r\n list(dict.fromkeys(new_pids).items())):\r\n try:\r\n if proc is None: # new process\r\n yield add(pid)\r\n else:\r\n # use is_running() to check whether PID has been reused by\r\n # another process in which case yield a new Process instance\r\n if proc.is_running():\r\n yield proc\r\n else:\r\n yield add(pid)\r\n except NoSuchProcess:\r\n remove(pid)\r\n except AccessDenied:\r\n # Process creation time can't be determined hence there's\r\n # no way to tell whether the pid of the cached process\r\n # has been reused. Just return the cached version.\r\n yield proc", "def get_cpu(self):\n pass", "def next(self, *input):\n self.log.info(\"Starting next for task %s\" % self.__class__.__name__)\n\n self.comm.Barrier()\n\n # This should only be called once.\n try:\n if self.done:\n raise pipeline.PipelineStopIteration()\n except AttributeError:\n self.done = True\n\n # Extract a list of the tags for all input arguments\n input_tags = [\n (\n str(icont.attrs.get(\"tag\"))\n if isinstance(icont, memh5.MemDiskGroup)\n else \"\"\n )\n for icont in input\n ]\n\n # Process input and fetch output\n if self._no_input:\n if len(input) > 0:\n # This should never happen. Just here to catch bugs.\n raise RuntimeError(\"Somehow `input` was set.\")\n output = self.process()\n else:\n output = self.process(*input)\n\n # Return immediately if output is None to skip writing phase.\n if output is None:\n return\n\n # Insert the input tags into the output container\n output.attrs[\"input_tags\"] = input_tags\n\n output = self._process_output(output)\n\n # Increment internal counter\n self._count = self._count + 1\n\n self.log.info(\"Leaving next for task %s\" % self.__class__.__name__)\n\n # Return the output for the next task\n return output", "def status(pid_file):\n if not os.path.exists(pid_file):\n return None\n\n pid = None\n with open(pid_file, \"r\") as pf:\n pid = pf.read().strip()\n\n if not pid:\n logger.error(\"Unable to retrieve pid from %s\" % pid_file)\n return None\n\n if not pid.isdigit():\n logger.error(\"Invalid pid %s read from %s\" % (pid, pid_file))\n return None\n\n pid = int(pid)\n\n try:\n # Send 0 signal to check if the process is alive.\n os.kill(pid, 0)\n except OSError as e:\n logger.debug(\"%s\" % e, exc_info=True)\n return None\n return pid", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def localStageOutPNN(self):\n return self.localStageOut['phedex-node']", "def process_memory():\n process = psutil.Process()\n return int(convert.bytetomb(process.memory_info().rss))", "def _job_id_or_out(out):\n\n stdout = re.sub(\"[^0-9]\", \"\", str(out))\n if not stdout:\n stdout = out\n return stdout", "def usage(self):\n self.process = subprocess.Popen(\n# \"ps -u %s -o rss | awk '{sum+=$1} END {print sum}'\" % self.username,\n \"ps -p %s -o rss | awk '{sum+=$1} END {print sum}'\" % self.pid,\n shell=True, stdout=subprocess.PIPE)\n self.stdout_list = self.process.communicate()[0].split('\\n')\n return int(self.stdout_list[0])", "def get(self):\n if self.proc is not None:\n return self.proc.get()\n\n return None", "def process_state(self):\n process = self._get_process()\n if not self.is_on:\n process = StateOptions.NONE\n return self._update_feature(WashDeviceFeatures.PROCESS_STATE, process)", "def process_id(self):\n return self._process_id", "def process_id(self):\n return self._process_id", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def getPid(self):\n try:\n fh = open(self.filename)\n except OSError:\n return None\n line = fh.readline()\n try:\n return string.atoi(line) # trailing newline doesn't matter\n except ValueError:\n return None", "def process():\n reader = owslib.wps.WPSDescribeProcessReader()\n root = reader.readFromString(open(resource_file(\"process_description.xml\")).read())\n xml = root.findall(\"ProcessDescription\")[0]\n return owslib.wps.Process(xml)", "def pid(self):\n\t\treturn self.__pid", "def _launch_process_watcher(self, parent_pid, child_pid, child_host, child_port, minecraft_dir):\n\n multiprocessing.freeze_support()\n parent_conn, child_conn = multiprocessing.Pipe()\n self._logger.info(\"Starting process watcher for process {} @ {}:{}\".format(child_pid, child_host, child_port))\n p = multiprocessing.Process(\n target=InstanceManager._process_watcher, args=(\n parent_pid, child_pid, \n child_host, child_port, \n minecraft_dir, child_conn))\n \n def update_port(port):\n parent_conn.send([port])\n # p.daemon = True\n\n p.start()\n return p, update_port" ]
[ "0.5940735", "0.56525385", "0.55158997", "0.5488523", "0.5330522", "0.5164322", "0.5010265", "0.50001043", "0.4968517", "0.49675882", "0.49425843", "0.49308014", "0.49262333", "0.4910881", "0.4902073", "0.4888884", "0.48885095", "0.4887398", "0.4878581", "0.4822437", "0.4816112", "0.48140374", "0.48067644", "0.4766661", "0.47629395", "0.47596774", "0.47437885", "0.47303748", "0.47059873", "0.4703673", "0.4683248", "0.46775612", "0.46745068", "0.46733883", "0.46439397", "0.46385866", "0.4627357", "0.46220326", "0.46183622", "0.46003526", "0.45907822", "0.45905507", "0.45899662", "0.458987", "0.4583966", "0.45693725", "0.4567813", "0.45673132", "0.45601878", "0.45579532", "0.45579532", "0.45506197", "0.45468453", "0.4521983", "0.45192644", "0.4518466", "0.45100984", "0.44912085", "0.4483732", "0.44773525", "0.4466387", "0.44580293", "0.44578975", "0.44536105", "0.44510862", "0.44507083", "0.44494453", "0.44488963", "0.44488963", "0.44407064", "0.44339928", "0.44292372", "0.44207236", "0.44188827", "0.44160235", "0.441092", "0.44059226", "0.4404102", "0.43999866", "0.43980148", "0.43969536", "0.43942243", "0.43918782", "0.43816084", "0.4345553", "0.4345139", "0.43387717", "0.43373084", "0.4335466", "0.43328556", "0.43307364", "0.43250608", "0.43250272", "0.432373", "0.432373", "0.43161136", "0.43153372", "0.43147764", "0.43130118", "0.43022156" ]
0.46316713
36
Return the next process to run in the cpu. out_process_pid the pid of the process that just left the cpu, or None in case there was no process running. The engine is responsible for updating the usage time.
def schedule(self, pid, delta_t): if len(self._process_list) == 0: return None if self._last_index >= len(self._process_list): self._last_index = 0 while True: priority = sort_priority() # print "SCHEDULING PRIORITY ", priority, "\n" actual_index = self._last_index + 1 while True: # print "FIND PROCESS " , actual_index, "\n" if actual_index >= len(self._process_list): actual_index = 0 process = self._process_list[actual_index] if find_priority(process.get_priority()) == priority: self._last_index = self._process_list.index(process) return process else: actual_index += 1 if actual_index >= len(self._process_list): actual_index = 0 priority = sort_priority() # print "LAST INDEX: ", self._last_index, " ACTUAL INDEX: ", actual_index if actual_index == self._last_index: break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self) -> Union['psutil.Process', None]:\n psutil = attempt_import('psutil')\n pid = self.pid\n if pid is None:\n return None\n if not '_process' in self.__dict__ or self._process.pid != int(pid):\n try:\n self._process = psutil.Process(int(pid))\n except Exception as e:\n if self.pid_path.exists():\n self.pid_path.unlink()\n return None\n return self._process", "def get_process(self, pid):\n return self.processes.get(pid, None)", "def select_process(self):\n result = -1\n for idx in self.priority:\n if self.processes[idx].working_time < self.processes[idx].final_execution_time:\n result = idx\n break\n return result", "def get_pid_processor(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get info about processor\n # that PID was scheduled on last time\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n proc_stat = stat_file.readline().strip().split(' ')\n return int(proc_stat[39])\n except EnvironmentError:\n return -1", "def process(self):\n # type: () -> Optional[multiprocessing.Process]\n try:\n return self._process # type: ignore # pylint: disable=no-member\n except:\n return None", "def nextPwmOutput(self):\n hwidRef = YRefParam()\n if YAPI.YISERR(self._nextFunction(hwidRef)):\n return None\n if hwidRef.value == \"\":\n return None\n return YPwmOutput.FindPwmOutput(hwidRef.value)", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def get_overcloud_nodes_running_process(process):\n oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n oc_nodes_running_process = oc_procs_df.query('PROCESS==\"{}\"'.format(\n process))['overcloud_node'].unique()\n return oc_nodes_running_process", "def free_pid():\n host, pid, tid = get_process_id()\n while True:\n # PIDs are often restricted to a small range. On Linux the range >32k is by default not used.\n pid = random.randint(33000, 65000)\n if not process_alive(host, pid, tid):\n return pid", "def schedule(self, pid, delta_t):\n\t\tsize = len(self._process_list)\n\n\t\tif size == 0: \n\t\t\treturn None\n\n\t\tprocess = self._process_list[self._last_index % size]\n\t\tself._last_index += 1\n\t\treturn process", "def _get_pid(self):\n ps_txt = six.ensure_str(self.controller.run(\n args=[\"ps\", \"ww\", \"-u\"+str(os.getuid())]\n ).stdout.getvalue()).strip()\n lines = ps_txt.split(\"\\n\")[1:]\n\n for line in lines:\n if line.find(\"ceph-{0} -i {1}\".format(self.daemon_type, self.daemon_id)) != -1:\n log.info(\"Found ps line for daemon: {0}\".format(line))\n return int(line.split()[0])\n log.info(\"No match for {0} {1}: {2}\".format(\n self.daemon_type, self.daemon_id, ps_txt\n ))\n return None", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def next_available_PPN(self):\n if len(self.memory) < self.MaxMemPages:\n return len(self.memory) #physical page number is simply just next index in memory. \n else:\n return self.getLRUPPN() #Return the physical page number with the MINUMUM time stamp (least recently used)", "def get_cpu_usage(pid):\n\n cpuusage = subprocess.Popen(['ps','-o', 'pcpu', '-p', str(pid)], shell=False, stdout=subprocess.PIPE)\n cpuusage.stdout.readline()\n return float(cpuusage.stdout.readline().rstrip())", "def get_process_object(pid, die=True):\n try:\n return psutil.Process(pid)\n except psutil.NoSuchProcess as e:\n if die:\n raise e\n else:\n return None", "def update_pid_running_on_dpdk_cpu(self):\n #cpu_list = self.get_cpu_list_by_mask", "def get_pid(self, pid):\n for p in self._queue:\n if p.id == pid:\n return p\n else: return 0", "def get_my_process():\n return get_process_object(os.getpid())", "def get_process_speed(self, pid):\n raise NotImplementedError()", "def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def pid(self):\n # type: () -> Optional[int]\n try:\n return self._process.pid # type: ignore # pylint: disable=no-member\n except:\n return None", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def process(proc_data):\n\n # No further processing\n return proc_data", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def get_process(self) -> ApplyResult:\n return self._process", "def get_pid(self, file_path) -> int | None:\n if file_path in self._processors:\n return self._processors[file_path].pid\n return None", "def get_qc_process(process):\n\n # Each entry in input_output_maps is an input/output specification with a single\n # input and any number of outputs. This gets the first input.\n first_io = process.input_output_maps[0]\n first_in_artifact = first_io[0]['uri']\n\n processes = process.lims.get_processes(inputartifactlimsid=first_in_artifact.id)\n seq_processes = [proc for proc in processes if proc.type_name in [p[1] for p in QC_PROCESSES]]\n # Use the last sequencing process. In case of crashed runs, this will be the right one.\n try:\n return seq_processes[-1]\n except IndexError:\n return None", "def _maybe_use_running_output(process, outputs):\n if os.environ.get(\"POLYSQUARE_ALWAYS_PRINT_PROCESS_OUTPUT\", None):\n return running_output(process, outputs)\n\n return None", "def get_process_by_process_id(self, process_id):\n try:\n process = Process.objects.get(pk=process_id)\n except Process.DoesNotExist:\n process = None\n\n return process", "def compute_memory_usage(app_pid):\n logging.info(\"Computing memory usage...\")\n\n try:\n p = subprocess.Popen(\n \"top -l 1 -pid {PID}\".format(PID=app_pid),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n shell=True\n )\n output = p.communicate()[0].strip()\n\n memory_usage = round(float(str(list(filter(('').__ne__, str(output.splitlines()[-1]).split(\" \")))[7])[:-1]) / 1024.00, 2)\n logging.info(\"Memory usage: [{MEMORY} MB]\".format(MEMORY=memory_usage))\n\n except Exception as e:\n logging.error(\"Computing the memory usage failed with error '{ERROR}'\".format(ERROR=e))\n return None\n\n logging.info(\"Memory usage computed successfuly!\")\n return memory_usage", "def get_cpu_number():\n try:\n output = subprocess.check_output('lscpu').decode(\"utf-8\")\n for line in output.splitlines():\n m = re.match(r'NUMA node0.*:\\s*\\d+-(\\d+)', line)\n if m:\n return m.group(1)\n except OSError:\n pass\n sys.stderr.write(\"Warning: Unable to select CPU ID, using 0\\n\")\n return 0", "def emit(self, pid):\n print \"Emit with pid: {}\".format(pid)\n if pid is None:\n return False\n metrics = {'cpu': 0,\n 'ram': 1,\n 'net': 2,\n 'bytes_sent': 1,\n 'bytes_recv': 1,\n 'packets_sent': 1,\n 'packets_recv': 1,\n 'errin': 0,\n 'errout': 0,\n 'dropin': 0,\n 'dropout': 0,\n 'disk': 0,\n 'files': 0,\n 'time': calendar.timegm(time.gmtime()) * 1000}\n #return False # prevent this\n ##########################################################################################################\n # psutil read metrics\n try:\n # self.proc = psutil.Process(pid)\n process_name = None\n if self.personal_cloud.lower() == \"stacksync\":\n if os.name == 'nt':\n process_name = \"javaw.exe\"\n elif os.name == \"posix\":\n process_name = \"java\"\n\n elif self.personal_cloud.lower() == \"dropbox\":\n if os.name == 'nt':\n process_name = \"Dropbox.exe\"\n elif os.name == \"posix\":\n process_name = \"dropbox\"\n elif self.personal_cloud.lower() == \"owncloud\":\n process_name = \"owncloudcmd\"\n elif self.personal_cloud.lower() == 'mega':\n process_name = \"megacmd\"\n\n if self.proc is None or self.proc.pid != pid:\n self.proc = psutil.Process(pid)\n\n if process_name == self.proc.name() or \"owncloudcmd\" == process_name or \"megacmd\" == process_name:\n print \"OKEY match {} == {}\".format(self.proc.name(), process_name)\n else:\n print \"sync client does not match: {}\".format(process_name)\n return False\n\n except Exception as ex:\n print \"sync client is not running! {}\".format(pid)\n print ex.message\n return False # exit as the process is not alive.\n\n ##########################################################################################################\n print \"PID: {} [{}]\".format(pid, self.personal_cloud.lower())\n try:\n if self.personal_cloud.lower() == \"stacksync\":\n # todo lookup for stacksync process here => using psutil\n cpu_usage = int(math.ceil(self.proc.cpu_percent(0)))\n ram_usage = self.proc.memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"owncloud\":\n cpu_usage = int(math.ceil(self.proc.children()[0].cpu_percent(interval=1)))\n ram_usage = self.proc.children()[0].memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"mega\":\n cpu_usage = int(math.ceil(self.proc.children()[0].cpu_percent(interval=1)))\n ram_usage = self.proc.children()[0].memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n elif self.personal_cloud.lower() == \"dropbox\":\n # todo lookup for dropbox process here => using psutil\n cpu_usage = int(math.ceil(self.proc.cpu_percent(0)))\n ram_usage = self.proc.memory_info().rss\n metrics['cpu'] = cpu_usage\n metrics['ram'] = ram_usage\n except Exception as e:\n print e.message\n\n # assign the network usage metric\n\n if self.prev_metric is not None:\n # do nothing because its the first emit ant there are no previous metric to compare\n # last_net = self.prev_metric['metrics']['net']\n last_time = self.prev_metric['metrics']['time']\n\n curr_net_counter = psutil.net_io_counters(pernic=True)['eth0'] # read the bytes from somewhere\n curr_time = metrics['time']\n elapsed_time = (curr_time - last_time) / 1000 # segons\n for key, value in curr_net_counter.__dict__.items():\n metrics[key] = (value - getattr(self.prev_net_counter, key)) / elapsed_time # unit is seconds\n self.prev_net_counter = curr_net_counter\n # assign hard drive usage metric\n\n if os.name == \"nt\":\n drive_usage = \"1234\"\n elif os.name == \"posix\":\n drive_usage_cmd = ['/usr/bin/du', '-ks', '/home/vagrant/{}'.format(self.personal_folder)]\n drive_usage_output = subprocess.Popen(drive_usage_cmd, stdout=subprocess.PIPE)\n drive_usage = drive_usage_output.stdout.read()\n try:\n metrics['disk'] = int(drive_usage.split('\\t')[0]) # kilo bytes cast string to int\n except Exception as ex:\n print \"invalid literal for... memory unit\"\n metrics['disk'] = 1\n # assign add folder num of files metric\n\n\n\n\n if os.name == \"nt\":\n num_files = \"123\"\n elif os.name == \"posix\":\n find_cmd = '/usr/bin/find /home/vagrant/{} -type f'.format(self.personal_folder).split()\n proc_find = subprocess.Popen(find_cmd, stdout=subprocess.PIPE)\n wc_cmd = '/usr/bin/wc -l'.split()\n proc_wc = subprocess.Popen(wc_cmd, stdin=proc_find.stdout, stdout=subprocess.PIPE)\n num_files = proc_wc.communicate()[0]\n try:\n metrics['files'] = int(num_files.split('\\t')[0])\n except Exception as ex:\n print \"invalid literal for... file counter\"\n\n net_stats = self.traffic_monitor.notify_stats()\n # z = dict(x.items() + y.items()) => metrics\n # envez de esto dict join\n metrics['data_rate_size_up'] = net_stats['data_rate']['size_up']\n metrics['data_rate_size_down'] = net_stats['data_rate']['size_down']\n metrics['data_rate_pack_up'] = net_stats['data_rate']['pack_up']\n metrics['data_rate_pack_down'] = net_stats['data_rate']['pack_down']\n metrics['meta_rate_size_up'] = net_stats['meta_rate']['size_up']\n metrics['meta_rate_size_down'] = net_stats['meta_rate']['size_down']\n metrics['meta_rate_pack_up'] = net_stats['meta_rate']['pack_up']\n metrics['meta_rate_pack_down'] = net_stats['meta_rate']['pack_down']\n\n '''\n {'data_rate':\n {'size_up': 0.471, 'pack_down': 0.00175, 'pack_up': 0.00225, 'size_down': 0.612},\n 'meta_rate':\n {'size_up': 0.0, 'pack_down': 0.0, 'pack_up': 0.0, 'size_down': 0.0},\n 'time': 1461065156000\n }\n '''\n\n tags = ''\n if tags == '':\n tags = {\n 'profile': self.receipt,\n 'credentials': 'pc_credentials',\n 'client': self.personal_cloud.lower(),\n }\n\n data = {\n 'metrics': metrics,\n 'tags': tags\n }\n self.prev_metric = data # update the last emited metric\n msg = json.dumps(data)\n print msg\n\n self.channel.basic_publish(\n exchange='metrics',\n routing_key=self.hostname,\n body=msg)\n\n return True", "def GetCpuStats(self, pid):\n class ProcTaskInfo(ctypes.Structure):\n \"\"\"Struct for proc_pidinfo() call.\"\"\"\n _fields_ = [(\"pti_virtual_size\", ctypes.c_uint64),\n (\"pti_resident_size\", ctypes.c_uint64),\n (\"pti_total_user\", ctypes.c_uint64),\n (\"pti_total_system\", ctypes.c_uint64),\n (\"pti_threads_user\", ctypes.c_uint64),\n (\"pti_threads_system\", ctypes.c_uint64),\n (\"pti_policy\", ctypes.c_int32),\n (\"pti_faults\", ctypes.c_int32),\n (\"pti_pageins\", ctypes.c_int32),\n (\"pti_cow_faults\", ctypes.c_int32),\n (\"pti_messages_sent\", ctypes.c_int32),\n (\"pti_messages_received\", ctypes.c_int32),\n (\"pti_syscalls_mach\", ctypes.c_int32),\n (\"pti_syscalls_unix\", ctypes.c_int32),\n (\"pti_csw\", ctypes.c_int32),\n (\"pti_threadnum\", ctypes.c_int32),\n (\"pti_numrunning\", ctypes.c_int32),\n (\"pti_priority\", ctypes.c_int32)]\n PROC_PIDTASKINFO = 4\n def __init__(self):\n self.size = ctypes.sizeof(self)\n super(ProcTaskInfo, self).__init__() # pylint: disable=bad-super-call\n\n proc_info = ProcTaskInfo()\n if not self.libproc:\n self.libproc = ctypes.CDLL(ctypes.util.find_library('libproc'))\n self.libproc.proc_pidinfo(pid, proc_info.PROC_PIDTASKINFO, 0,\n ctypes.byref(proc_info), proc_info.size)\n\n # Convert nanoseconds to seconds.\n cpu_time = (proc_info.pti_total_user / 1000000000.0 +\n proc_info.pti_total_system / 1000000000.0)\n results = {'CpuProcessTime': cpu_time,\n 'ContextSwitches': proc_info.pti_csw}\n\n # top only reports idle wakeup count starting from OS X 10.9.\n if self.GetOSVersionName() >= os_version_module.MAVERICKS:\n results.update({'IdleWakeupCount': self._GetIdleWakeupCount(pid)})\n return results", "def get(input=None):\n if isinstance(input, datetime.datetime):\n return Elapsed((datetime.datetime.now() - input).total_seconds())\n if not input or isinstance(input, int):\n pid = input if input else os.getpid()\n output = os.popen(\"ps -p %s -o etime | grep -v ELAPSED\" %\n pid).read().strip()\n if output:\n return Elapsed(_parse_ps_output(output))", "def schedule(self, pid, delta_t):\n\t\tif len(self._process_list) == 0:\n\t\t\treturn None\n\n\t\tif self._last_index >= len(self._process_list):\n\t\t\tself._last_index = 0\n\t\t\n\t\twhile True:\n\n\t\t\tpriority = sort_priority()\n\t\t\t# print \"SCHEDULING PRIORITY \", priority, \"\\n\"\n\t\t\tactual_index = self._last_index + 1\n\n\t\t\twhile True:\n\t\t\t\t\n\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\tactual_index = 0\n\t\t\t\t\n\t\t\t\tprocess = self._process_list[actual_index] \n\t\t\t\t\n\t\t\t\tif find_priority(process.get_priority()) == priority:\n\t\t\t\t\tself._last_index = self._process_list.index(process)\n\t\t\t\t\t# print \"FIND PROCESS \" , actual_index, \"\\n\"\n\t\t\t\t\treturn process\n\t\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tactual_index += 1\n\n\t\t\t\t\tif actual_index >= len(self._process_list):\n\t\t\t\t\t\tactual_index = 0\n\n\t\t\t\t\t# print \"LAST INDEX: \", self._last_index, \" ACTUAL INDEX: \", actual_index\t\t\t\t\t\n\n\t\t\t\t\tif actual_index == self._last_index: \n\t\t\t\t\t\tbreak", "def current_process(self):\n return self._current_process", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def pid(self):\n return self._process.pid", "def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')", "def _pid(self, name):\n return self.pid_lookup[name]", "def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None", "def get_overall_cpu_util(dut, exclude_proc_name=None):", "def get_next_gp(self):\n raise NotImplementedError('Abstract Method')", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def _obtain(self):\n\n while True:\n # make sure we're observing load maximums\n if self.max_load is not None:\n try:\n load = os.getloadavg()\n if jobserver_running_jobs() > 0 and load[1] > self.max_load:\n time.sleep(0.01)\n continue\n except NotImplementedError:\n pass\n\n # make sure we're observing memory maximum\n if self.max_mem is not None:\n mem_used, mem_total = memory_usage()\n mem_percent_used = 100.0 * float(mem_used) / float(mem_total)\n if jobserver_running_jobs() > 0 and mem_percent_used > self.max_mem:\n time.sleep(0.01)\n continue\n\n # get a token from the job pipe\n try:\n token = os.read(self.job_pipe[0], 1)\n return token\n except OSError as e:\n if e.errno != errno.EINTR:\n raise", "def _cpu_usage(self, e):\n\n cores = os.cpu_count()\n try:\n cpu_usage = int(self.cpu_entry.get())\n if cpu_usage < 0 or cpu_usage > 100:\n self.invalid_input()\n elif cpu_usage == 0:\n self.processes = 1\n else:\n self.processes = round(cpu_usage / 100 * cores)\n except ValueError:\n self.invalid_input()", "def worker_process(self, item):\n g_sleep()\n return item", "def unit_for_pid(pid):\n try:\n cgroup = slurp('/proc/%d/cgroup' % pid)\n match = re.search(\"1:name=systemd:/system.slice/(?:jvm:)?(.+?)\\.service\", cgroup)\n return match.group(1) if match else None\n except IOError:\n return None", "def pidGet(self) -> float:\n ...", "def pidGet(self) -> float:\n ...", "def resource_monitor(childpid, pipe_handle):\n # Get our pid\n ourpid = os.getpid()\n \n # Calculate how often disk should be checked\n disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_LINUX / repy_constants.CPU_POLLING_FREQ_LINUX)\n current_interval = -1 # What cycle are we on \n \n # Store time of the last interval\n last_time = getruntime()\n last_CPU_time = 0\n resume_time = 0 \n \n # Run forever...\n while True:\n ########### Check CPU ###########\n # Get elapsed time\n currenttime = getruntime()\n elapsedtime1 = currenttime - last_time # Calculate against last run\n elapsedtime2 = currenttime - resume_time # Calculate since we last resumed repy\n elapsedtime = min(elapsedtime1, elapsedtime2) # Take the minimum interval\n last_time = currenttime # Save the current time\n \n # Safety check, prevent ZeroDivisionError\n if elapsedtime == 0.0:\n continue\n \n # Get the total cpu at this point\n totalCPU = os_api.get_process_cpu_time(ourpid) # Our own usage\n totalCPU += os_api.get_process_cpu_time(childpid) # Repy's usage\n \n # Calculate percentage of CPU used\n percentused = (totalCPU - last_CPU_time) / elapsedtime\n \n # Do not throttle for the first interval, wrap around\n # Store the totalCPU for the next cycle\n if last_CPU_time == 0:\n last_CPU_time = totalCPU \n continue\n else:\n last_CPU_time = totalCPU\n \n # Calculate stop time\n stoptime = nanny.calculate_cpu_sleep_interval(nanny.get_resource_limit(\"cpu\"), percentused, elapsedtime)\n \n # If we are supposed to stop repy, then suspend, sleep and resume\n if stoptime > 0.0:\n # They must be punished by stopping\n os.kill(childpid, signal.SIGSTOP)\n\n # Sleep until time to resume\n time.sleep(stoptime)\n\n # And now they can start back up!\n os.kill(childpid, signal.SIGCONT)\n \n # Save the resume time\n resume_time = getruntime()\n\n # Send this information as a tuple containing the time repy was stopped and\n # for how long it was stopped\n write_message_to_pipe(pipe_handle, \"repystopped\", (currenttime, stoptime))\n \n \n ########### End Check CPU ###########\n # \n ########### Check Memory ###########\n \n # Get how much memory repy is using\n memused = os_api.get_process_rss()\n \n # Check if it is using too much memory\n if memused > nanny.get_resource_limit(\"memory\"):\n raise ResourceException, \"Memory use '\"+str(memused)+\"' over limit '\"+str(nanny.get_resource_limit(\"memory\"))+\"'.\"\n \n ########### End Check Memory ###########\n # \n ########### Check Disk Usage ###########\n # Increment our current cycle\n current_interval += 1;\n \n # Check if it is time to check the disk usage\n if (current_interval % disk_interval) == 0:\n # Reset the interval\n current_interval = 0\n \n # Calculate disk used\n diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)\n\n # Raise exception if we are over limit\n if diskused > nanny.get_resource_limit(\"diskused\"):\n raise ResourceException, \"Disk use '\"+str(diskused)+\"' over limit '\"+str(nanny.get_resource_limit(\"diskused\"))+\"'.\"\n\n # Send the disk usage information, raw bytes used\n write_message_to_pipe(pipe_handle, \"diskused\", diskused)\n \n ########### End Check Disk ###########\n \n # Sleep before the next iteration\n time.sleep(repy_constants.CPU_POLLING_FREQ_LINUX)", "def get_top():\n print(\"This processes are using the cpu the most:\")\n print(os.system(\"ps axo %cpu,pid,euser,cmd | sort -nr | head -n 5\"))", "def usePIDOutput(self, output):\n self.motor.pidWrite(output)", "def get_isolate_cpus(self):\n\n command = \"cat /proc/cpuinfo | grep processor | awk '{print $NF}'\"\n out = run_and_getout(command)\n str_out = out.decode(self.default_code).replace('\\n', ' ').strip()\n str_out = str(str_out)\n if str_out[0] == \"0\":\n return str_out[2:]\n else:\n return str_out", "def get_sequencing_process(process):\n\n # Each entry in input_output_maps is an input/output specification with a single\n # input and any number of outputs. This gets the first input.\n first_io = process.input_output_maps[0]\n first_in_artifact = first_io[0]['uri']\n\n processes = process.lims.get_processes(inputartifactlimsid=first_in_artifact.id)\n seq_processes = [proc for proc in processes if proc.type_name in [p[1] for p in SEQ_PROCESSES]]\n # Use the last sequencing process. In case of crashed runs, this will be the right one.\n try:\n return seq_processes[-1]\n except IndexError:\n return None", "def get_cpu_clock_cycles_of_pid(pid):\n try:\n with open(LINUX_PROCESS_STAT_LOCATION % pid, 'r') as f:\n pid_entries = f.read().split(' ')\n except IOError:\n return None\n\n pid_cycles = 0\n if len(pid_entries) > 14:\n pid_cycles = int(pid_entries[13]) + int(pid_entries[14])\n return pid_cycles", "def _get_pid(split_data, sensor):\n prot, ip_dst, port_dst, timestamp = split_data\n prot = prot.lower()\n\n if not sanitizer.check_get_pid_params(prot, ip_dst, port_dst, timestamp):\n return '-1,error checking input'\n\n return sensor.search_process(prot, ip_dst, port_dst, timestamp)", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def get_process_info_by_pid(self, pid):\n # TODO: discuss if self.logger needs to happen here? I think not? -BY\n\n for process in self.data_storage.running_data:\n if self.data_storage.running_data[process]['pid'] == pid:\n return self.data_storage.running_data[process]", "def get_process(ngrok_path, config_path=None, auth_token=None, region=None):\n if ngrok_path in _current_processes:\n # Ensure the process is still running and hasn't been killed externally\n if _current_processes[ngrok_path].proc.poll() is None:\n return _current_processes[ngrok_path]\n else:\n _current_processes.pop(ngrok_path, None)\n\n return _start_process(ngrok_path, config_path, auth_token, region)", "def getProcInfo(self, line):\n try:\n pid, rss, cpu, cmdAndArgs = line.split(None, 3)\n except ValueError:\n # Defunct processes look like this (no RSS data)\n # '28835916 00:00:00 <defunct>'\n pid, cpu, cmdAndArgs = line.split(None, 2)\n rss = \"0\"\n # Exiting and Idle processes look like this\n # (no RSS data, TIME data == '-')\n # '11337738 - <exiting>'\n # '11862166 - <idle>'\n # _extractProcessMetrics(self, line) method will try\n # to parseCpuTime('-') with exception\n if cpu == \"-\":\n cpu = \"00:00:00\"\n\n return pid, rss, cpu, cmdAndArgs", "def pid(self):\n return self._get_process_id()", "def find_e_hunt_pid():\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name', 'username'])\n except psutil.NoSuchProcess:\n pass\n else:\n if pinfo[\"name\"] == \"The Equinox Hunt.exe\":\n return pinfo['pid']\n raise EnvironmentError(\"Equinox Hunt not found!\")", "def allocated_cpu(self):\n return self._allocated_cpu", "def cpu(self):\r\n return self._cpu", "def get_cpu_usage(pid):\n try:\n process = psutil.Process(pid) \n cpu = process.cpu_times()[0]\n logging.info(f\"Calculated CPU usage {cpu} for PID {pid}\")\n return float(cpu)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def get_processor_output(location: Optional[pulumi.Input[str]] = None,\n processor_id: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProcessorResult]:\n ...", "def read(self):\n if self.alive:\n with self._register_poll():\n with _unblock_read(self._proc):\n return self._yield_ready_read()\n else:\n raise ProcessIsDeadError('Can not read. The process is already dead.')", "def output_node(self, port: int):\n return self._output_nodes_map[port]", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def get_cpu_core():\n processor_info = subprocess.getoutput('dmidecode -t processor')\n cpu_core_value = re.findall(r'(?i)Core Count:\\s+(.*?)\\n', processor_info, re.S)[0]\n log.info('cpu_core value:{}'.format(cpu_core_value))\n if cpu_core_value:\n cpu_core = cpu_core_value\n else:\n cpu_core = ''\n return cpu_core", "def run_processing_engine(input_file):\n from subprocess import Popen, PIPE\n p = Popen([\"python\", \"process.py\", input_file, \"-v\"], stdout=PIPE)\n return p.wait()", "def get_cpu_usage():\n process_details = RU_OBJ.get_curr_processes()\n return json.dumps(sorted(process_details, key=lambda k: k['name']))", "def next_job(data, rank):\n for j in data.queue:\n process = data.nodes[j[0]]\n status = j[1]\n if process.is_ready() and status == -1:\n j[1] = rank\n return process\n return None # no job ready to execute or job finished", "def _while_loop(pd, nmax, max_loop, ncore, outfile):\n \n # make sure the random seed is different for every processor\n random_number_seeds = [3456789, 7654321, 2435467, 8273645,\n 1085712, 4154712, 1248291, 8415917,\n 2345161, 5710916, 5718601, 7516234,\n 9235161, 4917519, 1111245, 8167834] \n \n # get the processor ID (1 - Ncore) and convert to single integer\n current = multiprocessing.current_process()\n pid = current._identity\n pid = pid[0]\n \n #\n # choose a different seed for each processor from the list so each processor has\n # a different randum number seed. Then, fiddle with each seed a little so \n # the seeds aren't the same every time the code is run\n seed = np.int(random_number_seeds[pid] * (np.random.rand()*(10.0 - 0.01) + 0.01))\n \n np.random.seed(seed)\n \n #print 'id and seed', pid, seed\n \n n_particles = 0\n loop_counter = 0\n fmin_scale = 1.0E-100 # -16 -> -32 -> -100\n F_max = np.max(pd.DF.f) #; F_min = np.min(pd.DF.f);\n F_min = np.min(pd.DF.f) * fmin_scale\n # F_max = np.max(pd.DF.f[:-1])#; F_max = 1.0E-88\n #print F_min, F_max\n if pd.optimize:\n relative_potential = pd._interpolate_relative_potential\n else:\n relative_potential = pd.DF.relative_potential\n \n \n pos = np.zeros((nmax, 3))\n vel = np.zeros((nmax, 3)) \n \n while (( n_particles < nmax) and (loop_counter < max_loop)):\n \n r = pd._choose_position()\n Psi = relative_potential(r) \n \n v = pd._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n \n f_E = pd.DF.interpolate_f(E)\n \n logF = ( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n # if choosing random F in log F, might be good to do the comparison in logspace as well\n #.... i.e log(F) <= log(f_E) \n #\n # 0 FOR F_E MEANS THAT E < E_min of the potential. THis happens when Psi - KE is smaller\n # than the value of the potential at large_r... should this be considered unbound \n # even though it isn't zero? Is this effectively zero? This has been adjusted in the velocity\n # picking routine but needs verification to make sure it works.... but regardless, I doubt\n # that this is the reason why the particles are failing for NFW but working for hernquist....\n #\n if np.abs(np.log10(f_E)) == np.inf:\n keep_particle = False\n _my_print('log value error... throwing out particle')\n else:\n keep_particle = (logF <= np.log10(f_E))\n\n \n if keep_particle: \n index = n_particles \n \n # \n # need to sample in cosine theta NOT theta!!!!! WTF!!!!!\n #\n #\n \n theta = np.arccos( np.random.rand() * (2.0) - 1.0)\n \n # convert position to cartesian using random theta and phi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n pos[index] = r * np.array([x,y,z])\n\n # repeat for velocity using new random numbersw\n theta = np.arccos( np.random.rand() * (2.0) - 1.0)\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n vel[index] = v * np.array([vx,vy,vz])\n \"\"\" \n #else: # do strict cartesian ... slower.... less attractive\n # \n # axis_list = [0, 1, 2]\n # random.shuffle(axis_list)\n \n # #axis_index = np.random.randint(3)\n # first_axis = axis_list[0]\n #\n # pos[index, first_axis] = np.random.rand() * (2.0*r) - r\n #del axis_list[axis_index]\n \n #axis_index = np.random.randint(2)\n # second_axis = axis_list[1]\n \n max_r = np.sqrt(r*r - pos[index,first_axis]**2)\n \n pos[index, second_axis] = np.random.rand()*(2.0 * max_r) - max_r\n #del axis_list[axis_index]\n \n max_r = np.sqrt(r*r - pos[index,first_axis]**2 - pos[index,second_axis]**2)\n \n third_axis = axis_list[2]\n pos[index, third_axis] = np.random.rand() * (2.0 * max_r) - max_r\n \n if np.sqrt(pos[index,0]**2 + pos[index,1]**2 + pos[index,2]**2) > r:\n _my_print('R IS TOO LARGE')\n \n ###\n axis_list = [0, 1, 2]\n random.shuffle(axis_list)\n \n #axis_index = np.random.randint(3)\n first_axis = axis_list[0]\n \n vel[index, first_axis] = np.random.rand() * (2.0*v) - v\n #del axis_list[axis_index]\n \n #axis_index = np.random.randint(2)\n second_axis = axis_list[1]\n \n max_v = np.sqrt(v*v - vel[index,first_axis]**2)\n \n vel[index, second_axis] = np.random.rand()*(2.0 * max_v) - max_v\n #del axis_list[axis_index]\n \n max_v = np.sqrt(v*v - vel[index,first_axis]**2 - vel[index,second_axis]**2)\n \n third_axis = axis_list[2]\n vel[index, third_axis] = np.random.rand() * (2.0 * max_v) - max_v \n \n \n \"\"\"\n n_particles = n_particles + 1\n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n # now write out to a temporary file\n f = open(outfile + \"_%02i_\"%(pid) + \".temp\", 'w')\n fmt = \"%12.12E %12.12E %12.12E %12.12E %12.12E %12.12E %12.12E\\n\"\n \n \n for i in np.arange(nmax):\n f.write(fmt%(pd.M_part, pos[i,0], pos[i,1], pos[i,2], \n vel[i,0], vel[i,1], vel[i,2]))\n \n \n f.close() \n \n return pos, vel", "def spinupoutputprocess():\n if __name__ == '__main__':\n _hwmgr = HardwareController(OUTPUT_SETTINGS)\n PROCESSES.append(_hwmgr)\n _hwmgr.start()", "def process_iter():\r\n def add(pid):\r\n proc = Process(pid)\r\n _pmap[proc.pid] = proc\r\n return proc\r\n\r\n def remove(pid):\r\n _pmap.pop(pid, None)\r\n\r\n a = set(get_pid_list())\r\n b = set(_pmap.keys())\r\n new_pids = a - b\r\n gone_pids = b - a\r\n\r\n for pid in gone_pids:\r\n remove(pid)\r\n for pid, proc in sorted(list(_pmap.items()) + \\\r\n list(dict.fromkeys(new_pids).items())):\r\n try:\r\n if proc is None: # new process\r\n yield add(pid)\r\n else:\r\n # use is_running() to check whether PID has been reused by\r\n # another process in which case yield a new Process instance\r\n if proc.is_running():\r\n yield proc\r\n else:\r\n yield add(pid)\r\n except NoSuchProcess:\r\n remove(pid)\r\n except AccessDenied:\r\n # Process creation time can't be determined hence there's\r\n # no way to tell whether the pid of the cached process\r\n # has been reused. Just return the cached version.\r\n yield proc", "def get_cpu(self):\n pass", "def next(self, *input):\n self.log.info(\"Starting next for task %s\" % self.__class__.__name__)\n\n self.comm.Barrier()\n\n # This should only be called once.\n try:\n if self.done:\n raise pipeline.PipelineStopIteration()\n except AttributeError:\n self.done = True\n\n # Extract a list of the tags for all input arguments\n input_tags = [\n (\n str(icont.attrs.get(\"tag\"))\n if isinstance(icont, memh5.MemDiskGroup)\n else \"\"\n )\n for icont in input\n ]\n\n # Process input and fetch output\n if self._no_input:\n if len(input) > 0:\n # This should never happen. Just here to catch bugs.\n raise RuntimeError(\"Somehow `input` was set.\")\n output = self.process()\n else:\n output = self.process(*input)\n\n # Return immediately if output is None to skip writing phase.\n if output is None:\n return\n\n # Insert the input tags into the output container\n output.attrs[\"input_tags\"] = input_tags\n\n output = self._process_output(output)\n\n # Increment internal counter\n self._count = self._count + 1\n\n self.log.info(\"Leaving next for task %s\" % self.__class__.__name__)\n\n # Return the output for the next task\n return output", "def status(pid_file):\n if not os.path.exists(pid_file):\n return None\n\n pid = None\n with open(pid_file, \"r\") as pf:\n pid = pf.read().strip()\n\n if not pid:\n logger.error(\"Unable to retrieve pid from %s\" % pid_file)\n return None\n\n if not pid.isdigit():\n logger.error(\"Invalid pid %s read from %s\" % (pid, pid_file))\n return None\n\n pid = int(pid)\n\n try:\n # Send 0 signal to check if the process is alive.\n os.kill(pid, 0)\n except OSError as e:\n logger.debug(\"%s\" % e, exc_info=True)\n return None\n return pid", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def localStageOutPNN(self):\n return self.localStageOut['phedex-node']", "def process_memory():\n process = psutil.Process()\n return int(convert.bytetomb(process.memory_info().rss))", "def _job_id_or_out(out):\n\n stdout = re.sub(\"[^0-9]\", \"\", str(out))\n if not stdout:\n stdout = out\n return stdout", "def usage(self):\n self.process = subprocess.Popen(\n# \"ps -u %s -o rss | awk '{sum+=$1} END {print sum}'\" % self.username,\n \"ps -p %s -o rss | awk '{sum+=$1} END {print sum}'\" % self.pid,\n shell=True, stdout=subprocess.PIPE)\n self.stdout_list = self.process.communicate()[0].split('\\n')\n return int(self.stdout_list[0])", "def get(self):\n if self.proc is not None:\n return self.proc.get()\n\n return None", "def process_state(self):\n process = self._get_process()\n if not self.is_on:\n process = StateOptions.NONE\n return self._update_feature(WashDeviceFeatures.PROCESS_STATE, process)", "def process_id(self):\n return self._process_id", "def process_id(self):\n return self._process_id", "def getPid(self):\n try:\n fh = open(self.filename)\n except OSError:\n return None\n line = fh.readline()\n try:\n return string.atoi(line) # trailing newline doesn't matter\n except ValueError:\n return None", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def process():\n reader = owslib.wps.WPSDescribeProcessReader()\n root = reader.readFromString(open(resource_file(\"process_description.xml\")).read())\n xml = root.findall(\"ProcessDescription\")[0]\n return owslib.wps.Process(xml)", "def pid(self):\n\t\treturn self.__pid", "def _launch_process_watcher(self, parent_pid, child_pid, child_host, child_port, minecraft_dir):\n\n multiprocessing.freeze_support()\n parent_conn, child_conn = multiprocessing.Pipe()\n self._logger.info(\"Starting process watcher for process {} @ {}:{}\".format(child_pid, child_host, child_port))\n p = multiprocessing.Process(\n target=InstanceManager._process_watcher, args=(\n parent_pid, child_pid, \n child_host, child_port, \n minecraft_dir, child_conn))\n \n def update_port(port):\n parent_conn.send([port])\n # p.daemon = True\n\n p.start()\n return p, update_port" ]
[ "0.5942478", "0.56531554", "0.55160487", "0.5489709", "0.5332322", "0.51632214", "0.5010553", "0.49991703", "0.4969401", "0.4968952", "0.49439943", "0.49326235", "0.49264264", "0.4911699", "0.49015945", "0.48903045", "0.4889358", "0.48885792", "0.48797858", "0.4823984", "0.48171273", "0.48158756", "0.4808009", "0.47667423", "0.47644532", "0.47606382", "0.47456726", "0.4728888", "0.47036502", "0.4683463", "0.46774384", "0.46744475", "0.46730158", "0.4645288", "0.46397993", "0.46326768", "0.46287042", "0.4622052", "0.46197903", "0.46004283", "0.45924464", "0.4591962", "0.45917252", "0.45898944", "0.45858857", "0.45707524", "0.45693994", "0.45688614", "0.45603168", "0.4559564", "0.4559564", "0.45523745", "0.454785", "0.45217323", "0.45211512", "0.45177242", "0.45109633", "0.4492074", "0.44846478", "0.44782788", "0.44667718", "0.44605416", "0.44598135", "0.4453756", "0.4453311", "0.44517678", "0.44516167", "0.44511485", "0.44511485", "0.44430524", "0.44363192", "0.44297457", "0.4421049", "0.44194788", "0.44151205", "0.44102752", "0.4405556", "0.4405402", "0.44015402", "0.4397921", "0.4396372", "0.43933898", "0.439284", "0.43839154", "0.43456227", "0.43451336", "0.43386233", "0.43366602", "0.43366438", "0.43333712", "0.43318152", "0.4325965", "0.432533", "0.43249124", "0.43249124", "0.43173566", "0.43165746", "0.43144497", "0.4314348", "0.43028864" ]
0.470696
28
Makes TexMobject resiliant to unmatched { at start
def remove_stray_braces(self, tex): num_lefts, num_rights = [ tex.count(char) for char in "{}" ] if num_rights > num_lefts: backwards = tex[::-1].replace("}", "", num_rights - num_lefts) tex = backwards[::-1] elif num_lefts > num_rights: tex = tex.replace("{", "", num_lefts - num_rights) return tex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generateCurrentLineText(self, obj, **args ):\n result = self._generateSubstring(obj, **args)\n if result:\n return result\n\n [text, caretOffset, startOffset] = self._script.getTextLineAtCaret(obj)\n if text and not self._script.EMBEDDED_OBJECT_CHARACTER in text:\n return [text]\n\n return []", "def flagUntexturedObject(self, object):\n object.setShaderInput(\"texDisable\", 1, 1, 1, 1)", "def test_blank_content_object_debug(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" obj %}fallback{% endeditregion %}\n \"\"\")\n with self.assertRaisesRegexp(ValueError, \"content_object was probably \"\n \"'', check the context \"\n \"provided\"):\n tmpl.render(Context()).strip()", "def test_blank_content_object_production(self):\n tmpl = Template(\"\"\"\n {% load editregion %}\n {% editregion \"test\" obj %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('fallback', tmpl.render(Context()).strip())", "def flagTexturedObject(self, object):\n object.setShaderInput(\"texDisable\", 0, 0, 0, 0)", "def test_custom_decorator_displaytex_empty(self):\n self.assertEqual(\n DOM.render(\n DOM.create_element(\n ashley_render_children,\n {\n \"block\": {\n \"key\": \"a215p\",\n \"text\": \"\",\n \"type\": \"atomic\",\n \"data\": {\"tex\": \"\", \"type\": \"TEXBLOCK\"},\n }\n },\n )\n ),\n '<span class=\"ashley-latex-display\"></span>',\n )", "def test_custom_decorator_displaytex_no_malformed(self):\n self.assertEqual(\n DOM.render(\n DOM.create_element(\n ashley_render_children,\n {\n \"block\": {\n \"key\": \"a215p\",\n \"text\": \"\",\n \"type\": \"atomic\",\n \"data\": {\"type\": \"TEXBLOCK\"},\n }\n },\n )\n ),\n '<span class=\"ashley-latex-display\"></span>',\n )", "def textObject(text, font, color):\n\n textSurface = font.render(text, True, color)\n return textSurface, textSurface.get_rect()", "def used_tex_func(val):\n return None", "def clean_open_close_brace(self):\n # Loop over all lines, check for braces and replace them with \\n{ and \\n}\n brack_num = False\n code_on = False\n\n for line_num, line in enumerate(self.file_ltxt[:-1]):\n self.line_num = line_num\n\n # First check if we are in an inline code section\n breaker = False\n for s_type in VALID_SCRIPT_TYPES:\n if re.findall(f\"^ *{s_type} *{{\", line) or (re.findall(f\"^ *{s_type}\", line) and re.findall(\"^ *{\", self.file_ltxt[line_num+1])):\n if code_on is not False:\n self.print_error(f\"Inline {s_type} code is not supported inside {code_on} code.\")\n\n code_on = s_type\n brack_num = 0\n\n if '{' in line:\n s = line.split(\"{\")\n line = s[0] + \"\\n{\\n\" + '{'.join(s[1:])\n brack_num = 1\n if '}' in line:\n s = line.split(\"}\")\n line = s[0] + \"\\n}\\n\" + '}'.join(s[1:])\n code_on = False\n brack_num = 0\n\n\n self.file_ltxt[line_num] = line\n breaker = True\n if breaker:\n continue\n\n # If we are in an inline code section don't edit it\n if code_on is not False:\n if '}' in line: brack_num -= 1\n if '{' in line: brack_num += 1\n\n if brack_num == 0:\n code_on = False\n\n # If not then we can edit the brace opening and closings\n else:\n str_part, non_str = gen_parse.get_str_between_delims(line)\n non_str = non_str.replace(\"{\", \"\\n{\\n\").replace(\"}\", \"\\n}\\n\")\n line = non_str.replace(r\"??!%s!?\", str_part)\n\n self.file_ltxt[line_num] = line\n # print(self.file_ltxt)\n # raise SystemExit(\"BREAK\")\n # Re-split by line-end and remove blank lines\n self.file_ltxt = [i for i in '\\n'.join(self.file_ltxt).split('\\n')\n if not i.isspace() and i]", "def test_none_content_object_production(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" None %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('output:', tmpl.render(Context()).strip())", "def __init__(self, x, y, w, h, text=''):\n self.rect = pygame.Rect(x, y, w, h)\n self.color = COLOR_INACTIVE\n self.text = text\n self.txt_surface = FONT.render(text, True, self.color)\n self.active = False", "def retranslate(self):\r\n pass", "def retranslate(self):\r\n pass", "def parse_obj(lt_objs,content):\n\n # loop over the object list\n\n\n for obj in lt_objs:\n\n # if it's a textbox, print text and location\n if isinstance(obj, pdfminer.layout.LTRect):\n content[0].append(int(obj.x0))\n content[0].append(int(obj.x1))\n content[1].append(int(obj.y1))\n content[1].append(int(obj.y0))", "def text_objects(self, text, font, color):\n\n text_surface = font.render(text, True, color)\n return text_surface, text_surface.get_rect()", "def blit(obj):\n # An object is a dictionary with the keys: 'dungeon', 'room', 'symbol', 'pos', 'lastpos'\n x = obj['pos'][0]\n y = obj['pos'][1]\n lastx= obj['lastpos'][0]\n lasty = obj['lastpos'][1]\n room = obj['room']\n if x < len(room) and y < len(room[0]):\n room[lastx][lasty] = ' '\n room[x][y] = obj['symbol']", "def __init__(self, text, tag, start ,end):\n\n self.text = six.text_type(text)\n self.tag = copy.copy(tag)\n self.end = end\n self.start = start", "def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)", "def __init__(self):\n self.text = ''", "def __init__(self, camera=None, light=None, font=None, string=None,\r\n x=0.0, y=0.0, z=1.0,\r\n sx=DEFAULT_FONT_SCALE, sy=DEFAULT_FONT_SCALE,\r\n is_3d=True, size=DEFAULT_FONT_SIZE,\r\n rx=0.0, ry=0.0, rz=0.0, justify=\"C\"):\r\n if not is_3d:\r\n sy = sx = size * 4.0\r\n super(String, self).__init__(camera, light, \"\", x, y, z,\r\n rx, ry, rz, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0)\r\n\r\n if VERBOSE:\r\n print(\"Creating string ...\")\r\n\r\n self.verts = []\r\n self.texcoords = []\r\n self.norms = []\r\n self.inds = []\r\n temp_verts = []\r\n\r\n xoff = 0.0\r\n yoff = 0.0\r\n lines = 0\r\n if not isinstance(string, text_type):\r\n string = string.decode('utf-8')\r\n nlines = string.count(\"\\n\") + 1\r\n\r\n def make_verts(): #local function to justify each line\r\n if justify.upper() == \"C\":\r\n cx = xoff / 2.0\r\n elif justify.upper() == \"L\":\r\n cx = 0.0\r\n else:\r\n cx = xoff\r\n for j in temp_verts:\r\n self.verts.append([(j[0] - cx) * sx,\r\n (j[1] + nlines * font.height * GAP / 2.0 - yoff) * sy,\r\n j[2]])\r\n\r\n default = font.glyph_table.get(unichr(0), None)\r\n for i, c in enumerate(string):\r\n if c == '\\n':\r\n make_verts()\r\n yoff += font.height * GAP\r\n xoff = 0.0\r\n temp_verts = []\r\n lines += 1\r\n continue #don't attempt to draw this character!\r\n\r\n glyph = font.glyph_table.get(c, default)\r\n if not glyph:\r\n continue\r\n w, h, texc, verts = glyph\r\n for j in verts:\r\n temp_verts.append((j[0]+xoff, j[1], j[2]))\r\n xoff += w\r\n for j in texc:\r\n self.texcoords.append(j)\r\n self.norms.extend(_NORMALS)\r\n\r\n # Take Into account unprinted \\n characters\r\n stv = 4 * (i - lines)\r\n self.inds.extend([[stv, stv + 2, stv + 1], [stv, stv + 3, stv + 2]])\r\n\r\n make_verts()\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))\r\n self.buf[0].textures = [font]\r\n self.buf[0].unib[1] = -1.0", "def stripTex(file):\n S_TEXT = 0\n S_INLINE = 1\n S_DISPLAY = 2\n S_DOLLAR_IN = 3\n S_DOLLAR_OUT = 4\n S_SEQUENCE = 5\n S_EXPECT_ARG = 6\n S_OPTIONAL = 7\n\n # sequences whose 1st argument content is not desired text\n forbidden = {\n 'begin', 'end', 'ref', 'eqref', 'usepackage', 'documentclass',\n 'probbatch', 'probno', 'probpoints', 'probsolauthors', 'probsolvers', 'probavg',\n 'illfig', 'fullfig', 'plotfig',\n 'eq'\n }\n\n\n # -- strip comments --\n lines = []\n for line in file.readlines():\n line += '%'\n lines.append(line[:line.index('%')]) # TODO \\%\n\n # -- strip mathematics and chosen sequence's arguments --\n # finite state machine with depth counter\n state = S_TEXT\n mode = S_TEXT\n depth = 0\n sequence = ''\n bracketStack = [] # contains either None or index in out where sequence argument starts\n out = []\n for c in ''.join(lines):\n #print(c, state)\n if state == S_TEXT:\n if c == '\\\\':\n state = S_SEQUENCE\n out.append(c)\n elif c == '$':\n state = S_DOLLAR_IN\n elif c == '{':\n out.append(c)\n bracketStack.append((len(out), None))\n elif c == '}':\n try:\n out.append(c)\n i, seq = bracketStack.pop() # not to shadow \"global\" sequence\n if seq != None and seq in forbidden:\n out = out[:i]\n except IndexError:\n print('Unmatched right bracket.')\n break\n else:\n out.append(c)\n elif state == S_INLINE:\n if c == '\\\\':\n state = S_SEQUENCE\n mode = S_INLINE\n elif c == '$':\n state = S_TEXT\n mode = S_TEXT\n elif c == '{':\n bracketStack.append((len(out), None))\n elif c == '}':\n try:\n bracketStack.pop() \n except IndexError:\n print('Unmatched right bracket.')\n break\n elif state == S_DISPLAY:\n if c == '\\\\':\n state = S_SEQUENCE\n mode = S_DISPLAY\n elif c == '$':\n state = S_DOLLAR_OUT\n elif c == '{':\n bracketStack.append((len(out), None))\n elif c == '}':\n try:\n bracketStack.pop() \n except IndexError:\n print('Unmatched right bracket.')\n break\n elif state == S_DOLLAR_OUT:\n if c == '$':\n state = S_TEXT\n mode = S_TEXT\n else:\n pass # stay in display mode\n elif state == S_DOLLAR_IN:\n if c == '$':\n state = S_DISPLAY\n mode = state\n else:\n state = S_INLINE\n mode = state\n elif state == S_SEQUENCE: \n if c in ascii_letters:\n if mode == S_TEXT: out.append(c)\n sequence += c\n elif c == '[':\n if mode == S_TEXT: out.append(c)\n state = S_OPTIONAL\n elif c == '{':\n state = mode\n if out[-1] == '\\\\': # backslashed brace\n out.append(c)\n else:\n bracketStack.append((len(out), sequence))\n sequence = ''\n if mode == S_TEXT: out.append(c)\n elif c == '}':\n try:\n out.append(c)\n i, seq = bracketStack.pop() # not to shadow \"global\" sequence\n if seq != None and seq in forbidden:\n out = out[:i]\n except IndexError:\n print('Unmatched right bracket.')\n break\n else:\n if mode == S_TEXT: out.append(c)\n state = mode\n sequence = ''\n elif state == S_OPTIONAL: # here we suppose no nested [, ]\n if c == ']':\n if mode == S_TEXT: out.append(c)\n state = S_EXPECT_ARG\n else:\n if mode == S_TEXT: out.append(c)\n elif state == S_EXPECT_ARG:\n if c == '{':\n bracketStack.append((len(out), sequence))\n sequence = ''\n if mode == S_TEXT: out.append(c)\n else:\n state = mode\n if mode == S_TEXT: out.append(c)\n else:\n print('Invalid state')\n break\n # end for\n noMath = ''.join(out)\n\n # -- finally simple regexp substitution --\n noMath = re.sub('~', ' ', noMath)\n noMath = re.sub(r'\\\\[a-zA-Z]+(\\[[^\\]]*\\])?', '', noMath)\n noMath = re.sub(r'[{}]', '', noMath)\n print(noMath)", "def reset(self):\n self.x = self.x_original\n self.alpha = self.alpha_original\n\n # Button \"background\" - active\n self.active_background_surface.set_alpha(self.alpha)\n # Button \"background\" - inactive\n self.inactive_background_surface.set_alpha(self.alpha)\n\n # active\n self.active_text_surface = self.active_font.render(self.text, True, self.color_text)\n self.active_textRect = self.active_text_surface.get_rect()\n # inactive\n self.inactive_text_surface = self.inactive_font.render(self.text, True, self.color_text)\n self.inactive_textRect = self.inactive_text_surface.get_rect()\n\n if self.text_alignment == 'CENTER':\n self.active_textRect.center = ((self.x + (self.rect.w / 2)), (self.y + (self.rect.h / 2)))\n self.inactive_textRect.center = ((self.x + (self.rect.w / 2)), (self.y + (self.rect.h / 2)))\n\n elif self.text_alignment == 'RIGHT':\n self.active_textRect.centery = self.y + (self.rect.h / 2)\n self.active_textRect.right = self.x + self.w - 15 # padding of 15\n self.inactive_textRect.centery = self.y + (self.rect.h / 2)\n self.inactive_textRect.right = self.x + self.w - 15 # padding of 15\n\n else: # LEFT (or invalid)\n self.active_textRect.centery = self.y + (self.rect.h / 2)\n self.active_textRect.left = self.x + 15 # padding of 15\n self.inactive_textRect.centery = self.y + (self.rect.h / 2)\n self.inactive_textRect.left = self.x + 15 # padding of 15", "def __init__(\n self,\n universe,\n text,\n pos,\n frame_pos=None,\n main_font=settings.main_font,\n color=(58, 56, 0),\n second_font=settings.second_font,\n var=False,\n rotate=None,\n box=None,\n size=\"regular\",\n ):\n pos = p(pos)\n self.font = main_font\n self.frame_pos = frame_pos\n self.text = text\n self.color = color\n if main_font == settings.second_font:\n main_font_size = settings.second_font_size\n elif size == \"small\":\n main_font_size = settings.third_font_size\n else:\n main_font_size = settings.main_font_size\n self.fontA = self.fontA or pygame.font.Font(\n os.path.join(directory.fonts, self.font), main_font_size\n )\n self.fontB = self.fontB or pygame.font.Font(\n os.path.join(directory.fonts, settings.second_font),\n settings.second_font_size,\n )\n self.position = pos\n if box:\n self.box = pygame.Surface(p(box), pygame.SRCALPHA).convert_alpha()\n self.adjusting_fonts()\n self.image = self.box\n self.size = self.image.get_size()\n self.pos = [\n self.position[0] - (self.size[0] / 2),\n self.position[1] - (self.size[1] / 2),\n ]\n else:\n self.image = self.fontA.render(self.text, 1, self.color)\n if rotate:\n self.image = pygame.transform.rotate(self.image, rotate)\n self.size = self.image.get_size()\n if self.frame_pos:\n self.pos = [\n self.frame_pos[0] + self.position[0] - (self.size[0] / 2),\n self.frame_pos[1] + self.position[1] - (self.size[1] / 2),\n ]\n else:\n self.pos = [pos[0] - (self.size[0] / 2), pos[1] - (self.size[1] / 2)]\n\n self.variable_text = var\n self.text_box = self.size[0] * 0.8, self.size[1] * 0.8", "def regular(self):", "def __init__(self, text: str) -> None:\n\n super().__init__()\n\n self._width = 0\n self._opacity = 255\n self._sprites = []\n self._text = text\n for index, c in enumerate(text):\n y_offset = 0\n if c in Text.characters:\n if Text.characters[c][1]:\n y_offset = 2\n c = Text.characters[c][0]\n elif c.isupper():\n c = c.lower() + \"_maj\"\n self._sprites.append(\n cocos.sprite.Sprite(pyglet.image.load(PATH + '/assets/img/common/font/{0}.png'.format(c))))\n self._sprites[index].position = self._width, (self._sprites[index].height - 11) / 2 - y_offset\n self._width += self._sprites[index].width\n self.add(self._sprites[index])", "def preprocess(self):\n for texgroup in self.textureGroups.itervalues():\n texgroup.dirty = True", "def clear_text(self):\n global empty_string\n \n for r in range(1,3):\n for c in range(6):\n self.create_text_under_photo(data = empty_string,r=r,c=c)", "def clear_text(self):\n global empty_string\n \n for r in range(1,3):\n for c in range(6):\n self.create_text_under_photo(data = empty_string,r=r,c=c)", "def beginning_checker(self, translit):\n tr_new = re.sub(r'(\\A|·)夫', r'\\1弗', translit)\n tr_new = re.sub(r'(\\A|·)耶', r'\\1叶', tr_new)\n return tr_new", "def __init__(self, ecran, text, police, taille, pos = (0,0), couleurText = (0,0,0), couleurFond = (255,255,255)):\n self.ecran = ecran\n self.txt = text # val String du texte\n self.police = police\n self.taille = taille\n self.pos = pos\n self.couleurText = couleurText\n self.couleurFond = couleurFond\n self.text = Texte(self.ecran, self.txt, self.police, self.taille, self.pos, self.couleurText) # objet Texte\n self.textRect = self.text.textRend.get_rect()\n self.dim = (self.textRect.w + 5, self.textRect.h + 5) # dims -> dimensions du texte + marge de 5px", "def _reset(self):\n\t\tself._style = TextStyle()", "def __init__(self, text):\n self.text = text\n self.letters = [letters[c] for c in self.text]\n self.width = sum(let.width + 1 for let in self.letters)\n self._offset = width\n self.is_done = False", "def init(self) -> None:\n self.started = False\n self.lines = []\n self.text = ''\n self.graphics = ''\n self.ids = {}\n self.first_line_added = False\n\n self.used_fonts = set()\n self.current_line_used_fonts = set()\n self.current_height = 0\n self.lines = []\n\n line_width = self.width - (self.indent if self.is_first_line else 0)\n self.current_line = PDFTextLine(\n self.fonts, line_width, self.text_align, self.line_height\n )\n\n self.last_indent = 0\n self.last_state = self.last_factor = self.last_fill = None\n self.last_color = self.last_stroke_width = None\n\n self.y_ = 0", "def __init__(self):\n super(LineStart, self).__init__(r\"^\", regex.MULTILINE)", "def __init__(self, skin_position: str, /):", "def test_generate_mine_text(self):\n pg.font.init()\n font_surface = utils.generate_mine_text(1)\n self.assertIsInstance(font_surface, pg.Surface)", "def text_objects(text, font, color=None):\n if color is None:\n color = BLACK\n textSurface = font.render(text, True, color)\n return textSurface, textSurface.get_rect()", "def _render(self):\n self.dirty = False\n self.image = self.font.render(self._text, self.aa, self.color_fg)\n self.rect = self.image.get_rect()", "def render(objects,\n output_encoding,\n title_force_uppercase,\n msdos_eol_style,\n omit_fields_mapping={}):", "def expose_bomb(self):\n self['bg'] = 'red'\n self['text'] = '*'", "def __gotoBrace(self):\n self.activeWindow().moveToMatchingBrace()", "def syntax_text():", "def unrendered(self) -> str:", "def __init__(self, ecran, text, police, taille, pos = (0,0), couleur = (0,0,0)):\n self.text = text\n self.pos = pos\n self.police = police\n self.taille = taille\n self.couleur = couleur\n self.ecran = ecran\n self.textRend = self.police.renderText(self.taille, self.text, self.couleur)", "def set_material(properties,object,finish,normal):\n if object not in properties:\n properties[object.getName()]={}\n properties[object.getName()][\"finish\"]=finish\n properties[object.getName()][\"normal\"]=normal", "def resetRenderable(objects):\n for obj, state in objects:\n obj.hide_render = state", "def get_code(self, obj):\n assert self.tmpl or obj.klass in ('spacer','sizerslot')#,'sizeritem')\n lines = []\n self._reset_vars()\n\n self._prepare_tmpl_content(obj)\n\n # generate choices automatically if the template contains '%(choices)s' or '%(choices_len)s'\n if '%(choices)s' in self.tmpl or '%(choices_len)s' in self.tmpl:\n self._prepare_choice(obj)\n\n # generate wxBitmap code\n self._prepare_bitmaps(obj)\n\n if self.tmpl_dict['id_name']:\n lines.append(self.tmpl_dict['id_name'])\n\n if self.tmpl_before:\n for line in self.tmpl_before:\n lines.append(line % self.tmpl_dict)\n\n lines.append(self.tmpl % self.tmpl_dict)\n\n if self.tmpl_after:\n for line in self.tmpl_after:\n lines.append(line % self.tmpl_dict)\n\n lines.extend( self.codegen.generate_code_common_properties(obj) )\n\n if self.tmpl_props:\n for line in self.tmpl_props:\n lines.append(line % self.tmpl_dict)\n\n if self.has_setvalue1:\n assert self.tmpl_setvalue\n assert not self.has_setvalue\n self.tmpl_dict['value_unquoted'] = '1'\n lines.append(self.tmpl_setvalue % self.tmpl_dict)\n\n if self.has_setvalue and self.tmpl_dict['value_unquoted']:\n assert self.tmpl_setvalue\n assert not self.has_setvalue1\n lines.append(self.tmpl_setvalue % self.tmpl_dict)\n\n if self.has_setdefault:\n assert self.tmpl_setdefault\n lines.append(self.tmpl_setdefault % self.tmpl_dict)\n\n if self.has_selection and self.tmpl_dict['selection']!=-1:\n assert self.tmpl_selection\n lines.append(self.tmpl_selection % self.tmpl_dict)\n\n if hasattr(self, \"get_more_properties_code\"):\n lines += self.get_more_properties_code(obj)\n\n #if not self.tmpl_dict['store_as_attr']:\n ## the object doesn't have to be stored as an attribute of the\n ## custom class, but it is just considered part of the layout\n #return [], init_lines + prop_lines\n return lines, []", "def test_custom_decorator_displaytex_no_maths(self):\n self.assertEqual(\n DOM.render(\n DOM.create_element(\n ashley_render_children,\n {\n \"block\": {\n \"key\": \"a215p\",\n \"text\": \"\",\n \"type\": \"atomic\",\n \"data\": {\"tex\": \"a common string\", \"type\": \"TEXBLOCK\"},\n }\n },\n )\n ),\n '<span class=\"ashley-latex-display\">a common string</span>',\n )", "def __init__(self, text, start, end, color, alpha=1):\n self.text = text\n self.start = start\n self.end = end\n self.color = color\n self.alpha = alpha", "def __init__(self):\n super(StringStart, self).__init__(r\"^\")", "def reset(self):\n super().reset()\n self.prev_obj3_position = None", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def afficher(self):\n self.textRend = self.police.renderText(self.taille, self.text, self.couleur)\n self.ecran.surface.blit(self.textRend,self.pos)", "def __init__(self, text, idx):\n self.text = text\n self.idx = idx", "def refang(self, text: str):", "def __init__(self, name, rect, **kwargs):\n self.name = name\n self.rect = pg.Rect(rect)\n self.color = (128, 128, 128)\n self.font = pg.font.SysFont(\"arial\", 12)\n self.text = self.font.render(name, False, pg.Color(\"white\"))\n self.selected_text = self.font.render(name, False, pg.Color(\"black\"))\n self.text_rect = self.text.get_rect(center=self.rect.center)\n self.set_kwargs(kwargs)", "def realize_text_and_extract_scene(scene, template, filter_objs):\n\n default_list = lambda: collections.defaultdict(list)\n graph = {'relationships': collections.defaultdict(default_list),\n 'counts': {}, 'exists': {}, 'history': [], 'objects': {}}\n\n # number of inputs\n n_inputs = template.get('inputs', 1)\n # sample a text template\n text_sample = random.choice(template['text'])\n text_sample_index = template['text'].index(text_sample)\n\n # extract attribute tags and get them into groups\n tags = re.findall('(<[\\d\\w]*>)', text_sample)\n\n tag_groups = collections.defaultdict(list)\n for tag in tags:\n group_id = get_tag_group(tag)\n tag_groups[group_id].append(tag)\n\n # sample a random element from filtered\n arg_sample = random.choice(filter_objs)\n # scene information obtained from the current round\n graph_item = arg_sample['graph']\n\n # remove tags from text not allowed by filter_objs\n for arg_ind in range(n_inputs):\n obj_sample = arg_sample['objects'][arg_ind]\n avail_attrs = obj_sample['optional'] + obj_sample['required']\n\n for ii in tag_groups[arg_ind][::-1]:\n if mapping(ii) not in avail_attrs:\n tag_groups[arg_ind].remove(ii)\n text_sample = replace_attribute(text_sample, ii, arg_sample, True)\n\n # assert that all required attributes are present as tags\n for attribute in obj_sample['required']:\n required_tag = inv_mapping(attribute, arg_ind)\n assert required_tag in tag_groups[arg_ind], \\\n 'A required attribute is missing in template!'\n\n # start compiling tags to keep\n tags_to_keep = [inv_mapping(ii, arg_ind) for ii in obj_sample['required']]\n\n # filter out those not present in text template\n optional_tags = [inv_mapping(ii,arg_ind) for ii in obj_sample['optional']]\n optional_tags = [ii for ii in optional_tags if ii in tag_groups[arg_ind]]\n\n # if tags_to_keep is empty, sample from optional with 1:70 2:25 3:5\n if len(optional_tags) > 0:\n if len(tags_to_keep) > 0:\n n_tags_sample = [0, 1, 2]\n else: n_tags_sample = [1, 2, 3]\n n_sample = np.random.choice(n_tags_sample, 1,\n p=gvars.METAINFO['probabilities'],\n replace=False)\n # lower cap at the length of optional\n n_sample = min(n_sample[0], len(optional_tags))\n if n_sample > 0:\n tags_to_keep += random.sample(optional_tags, n_sample)\n\n # now create a dictionary of placeholders with actual attribute values\n for tag in tag_groups[arg_ind]:\n remove = tag not in tags_to_keep\n text_sample = replace_attribute(text_sample, tag, arg_sample, remove)\n\n # remove attributes from objects not included in tags_to_keep\n if 'objects' in graph_item:\n for ii in gvars.METAINFO['attributes']:\n if inv_mapping(ii, arg_ind) not in tags_to_keep:\n if ii in graph_item['objects'][arg_ind]:\n del graph_item['objects'][arg_ind][ii]\n\n # record the caption info\n graph_item['round'] = 0\n\n sample = {}\n sample['template_info'] = [copy.deepcopy(template)]\n del sample['template_info'][-1]['text']\n sample['template_info'][-1]['index'] = text_sample_index\n sample['caption'] = text_sample\n sample['dialog'] = []\n\n # append history, update scene graph, and save the new scene graph\n graph['history'].append(graph_item)\n sample['graph'] = utils.merge_update_scene_graph(graph, graph_item)\n return sample", "def __init__(self):\n\t\t# Setup fonts\n\t\tself.large_font = self._get_font(1,Annotator.THICK)\n\t\tself.large_font_outline = self._get_font(1,Annotator.THICK + Annotator.BORDER)\n\t\t\n\t\tself.small_font = self._get_font(0.5,Annotator.THIN)\n\t\tself.small_font_outline = self._get_font(0.5,Annotator.THIN + Annotator.BORDER)\n\t\t\n\t\t# Text colour\n\t\tself.colour = Annotator.COLOUR_BUSY\n\t\t\n\t\tself.forehead = (0,0,1,1)\n\t\tself.face = (0,0,1,1)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, text):\n\n self.text = text", "def __init__(self):\n self.char = \"\"\n self.d = {}\n self.end = False", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def __init__(self, strings, eos_placement='※'):\n self.strings = strings\n self.eos_placement = eos_placement", "def __init__ (self,win,text='Press a key to continue',**kwargs):\n\n self.win = win\n \n self.text = visual.TextStim(win,text=text,**kwargs)", "def __init__ (self,win,text='Press a key to continue',**kwargs):\n\n self.win = win\n \n self.text = visual.TextStim(win,text=text,**kwargs)", "def __init__(self, ransom_text, magazine_text):\n self.ransom_text = ransom_text.lower()\n self.magazine_text = magazine_text.lower()", "def add_tex_to_binning(self, binning_dict):\n if 'reco' in binning_dict['name']:\n sub_string = 'reco'\n elif 'true' in binning_dict['name']:\n sub_string = 'true'\n else:\n sub_string = None\n if 'energy' in binning_dict['name']:\n binning_dict['tex'] = r'$E_{%s}$'%sub_string\n elif 'coszen' in binning_dict['name']:\n binning_dict['tex'] = r'$\\cos\\theta_{Z,%s}$'%sub_string", "def object(s='object'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(0)\n camera.status.imgtype = 'OBJECT'\n camera.status.object = s\n camera.status.update()", "def no_highlight(): #py:no_highlight\n RUR._no_highlight_()", "def detex(tex):\n \n #tex = '\\n'.join(reformat(tex, listed=True)[1:])\n global subs\n \n for old, new in subs.iteritems():\n tex = tex.replace(old, new)\n \n return tex.strip()", "def test_to_from_scene(self): # pragma: lpy\n super(TestObjDict, self).test_to_from_scene(_as_obj=True)", "def __init__(self, obj, color_tag):\n if isinstance(color_tag, string_types):\n color_tag = [color_tag]\n\n if isinstance(obj, Mark):\n color_tag.extend(obj.color_tag)\n obj = obj.obj\n\n super(Mark, self).__init__(color_tag=color_tag)\n self.obj = obj", "def remove_invisible_chars(mobject):\n\n iscode = False\n if mobject.__class__.__name__ == \"Text\":\n mobject = mobject[:]\n elif mobject.__class__.__name__ == \"Code\":\n iscode = True\n code = mobject\n mobject = mobject.code\n mobject_without_dots = VGroup()\n if mobject[0].__class__ == VGroup:\n for i in range(mobject.__len__()):\n mobject_without_dots.add(VGroup())\n mobject_without_dots[i].add(*[k for k in mobject[i] if k.__class__ != Dot])\n else:\n mobject_without_dots.add(*[k for k in mobject if k.__class__ != Dot])\n if iscode:\n code.code = mobject_without_dots\n return code\n return mobject_without_dots", "def __init__(self, *args, **kwargs):\n self._start = kwargs.pop(\"start\", 0)\n Sentence.__init__(self, *args, **kwargs)", "def setNotRenderable(objList):\r\n # declare a list of attributes for render:\r\n renderAttrList = [\"castsShadows\", \"receiveShadows\", \"motionBlur\", \"primaryVisibility\", \"smoothShading\", \"visibleInReflections\", \"visibleInRefractions\", \"doubleSided\", \"miTransparencyCast\", \"miTransparencyReceive\", \"miReflectionReceive\", \"miRefractionReceive\", \"miFinalGatherCast\", \"miFinalGatherReceive\"]\r\n shapeTypeList = ['nurbsCurve', 'nurbsSurface', 'mesh', 'subdiv']\r\n # find all children shapes:\r\n if objList:\r\n for obj in objList:\r\n objType = cmds.objectType(obj)\r\n # verify if the object is the shape type:\r\n if objType in shapeTypeList:\r\n # set attributes as not renderable:\r\n for attr in renderAttrList:\r\n try:\r\n cmds.setAttr(obj+\".\"+attr, 0)\r\n except:\r\n #print \"Error: Cannot set not renderable \", attr, \"as zero for\", obj\r\n pass\r\n # verify if the object is a transform type:\r\n elif objType == \"transform\":\r\n # find all shapes children of the transform object:\r\n shapeList = cmds.listRelatives(obj, shapes=True, children=True)\r\n if shapeList:\r\n for shape in shapeList:\r\n # set attributes as not renderable:\r\n for attr in renderAttrList:\r\n try:\r\n cmds.setAttr(shape+\".\"+attr, 0)\r\n except:\r\n #print \"Error: Cannot set not renderable \", attr, \"as zero for\", shape\r\n pass", "def __init__(self, index=None, text=None, unitType=None, isTextSection=False, filename=\"\", lineNum=0):\n # self.index is the index in the list of AsmChunk objs\n self.index = index\n self.text = text\n self.unitType = unitType\n self.isTextSection = isTextSection\n self.filename = filename\n self.lineNum = lineNum", "def __init__(self,text,position,xmid = False,fontsize = 36,backgroundcolor = (200,200,200),surface = None):\n pygame.font.init()\n basicfont = pygame.font.Font(None,fontsize)\n\n # Figure out the size of the image that will be drawn on and create that\n # image\n self.linewidths = []\n for x in text:\n self.texttemp = basicfont.render(x,0,(1,1,1))\n self.linewidths.append(self.texttemp.get_width())\n self.imagewidth = basicfont.render(text[self.linewidths.index(max(self.linewidths))],0,(1,1,1)).get_width()\n self.imageheight = len(text) * fontsize + (len(text)-1) * 10\n self.image = pygame.Surface((self.imagewidth,self.imageheight))\n self.image.fill(backgroundcolor)\n\n # Draw the text to the image\n n = 0\n for x in text:\n self.texttemp = basicfont.render(x,0,(1,1,1))\n self.image.blit(self.texttemp,(0,n * fontsize + n * 10))\n n +=1\n\n # Set the position of the text. If xmid is passed in as true set the\n # pos to the top middle pixel of the text\n if xmid:\n self.pos = (position[0] - int(self.image.get_width() / 2),position[1])\n else:\n self.pos = position\n\n # Set up the information that will be needed to blit the image to a\n # surface\n self.blitinfo = (self.image, self.pos)\n\n # automatically blit the text onto an input surface\n if surface:\n surface.blit(*self.blitinfo)", "def change_match_type(self):\n self.term = None\n self.chars = None\n self.text.tag_remove('found', '1.0', tk.END)\n self.text.tag_remove('found.focus', '1.0', tk.END)", "def change_match_type(self):\n self.term = None\n self.chars = None\n self.text.tag_remove('found', '1.0', tk.END)\n self.text.tag_remove('found.focus', '1.0', tk.END)", "def __str__(self):\n return \"\"\"translate([{0!s}, {1!s}, {2!s}]) {{\n {3!s}\n}}\"\"\".format(self.vector[0], self.vector[1], self.vector[2], self.body)", "def __init__(self,text,position,xmid = False,surface = None,**kargs):\n\n # Initialize the pygame font class.\n pygame.font.init()\n\n # Unpack the **kargs dictionary\n fontsize = kargs.pop('fontsize',36)\n align = kargs.pop('align','l')\n\n # Create the font object\n basicfont = pygame.font.Font(None,fontsize)\n\n # Figure out the size of the image that will be drawn on and create that\n # image\n linewidths = []\n for x in text:\n texttemp = basicfont.render(x,0,(1,1,1))\n linewidths.append(texttemp.get_width())\n # The width of the image is the width of the text that corresponds to\n # the index of linewidths that contains the largest number in linewidths\n self.imagewidth = basicfont.render(text[linewidths.index(max(linewidths))],0,(1,1,1)).get_width()\n self.imageheight = len(text) * fontsize + (len(text)-1) * 10\n self.image = pygame.Surface((self.imagewidth,self.imageheight))\n self.image.fill((200,200,200))\n\n # make the background transparent\n self.image.set_colorkey((200,200,200))\n\n # Draw the text to the image using the user chosen alignment\n n = 0\n if align == 'l':\n for x in text:\n texttemp = basicfont.render(x,0,(1,1,1))\n self.image.blit(texttemp,(0,n * fontsize + n * 10))\n n +=1\n elif align == 'c':\n for x in text:\n texttemp = basicfont.render(x,0,(1,1,1))\n self.image.blit(texttemp,(self.imagewidth // 2 - texttemp.get_width() // 2,n * fontsize + n * 10))\n n +=1\n elif align == 'r':\n for x in text:\n texttemp = basicfont.render(x,0,(1,1,1))\n self.image.blit(texttemp,(self.imagewidth - texttemp.get_width(),n * fontsize + n * 10))\n n +=1\n\n # Set the position of the text. If xmid is passed in as true set the\n # pos to the top middle pixel of the text\n if xmid:\n self.pos = (position[0] - int(self.image.get_width() / 2),position[1])\n else:\n self.pos = position\n\n # Set up the information that will be needed to blit the image to a\n # surface\n self.blitinfo = (self.image, self.pos)\n\n # automatically blit the text onto an input surface\n if surface:\n surface.blit(*self.blitinfo)", "def __init__(self,txt=u'',unicodeEncoding='utf-8',verbose=False,tagID=0):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup,self).__init__(__txt=None,__rawTxt=txt,\n __SCOPEUPDATED=False,__VERBOSE=verbose,\n __tagID=tagID,\n __unicodeEncoding=unicodeEncoding)\n self.__cleanText()", "def rebuild(self):\n self.set_image(self.ui_manager.get_universal_empty_surface())\n\n if self.text_block is not None:\n self.text_block.set_dimensions((self.rect_width, -1))\n\n self.relative_rect.height = self.text_block.rect.height\n self.relative_rect.width = self.text_block.rect.width\n self.rect.width = self.text_block.rect.width\n self.rect.height = self.text_block.rect.height", "def test_none_content_object_debug(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" None %}fallback{% endeditregion %}\n \"\"\")\n if is_django_15plus():\n with self.assertRaisesRegexp(ImproperlyConfigured,\n 'no object provided to the \"editregion\" '\n 'template tag forregion \"test\"'):\n tmpl.render(Context()).strip()\n else:\n with self.assertRaisesRegexp(ValueError,\n \"content_object was probably '', \"\n \"check the context provided\"):\n tmpl.render(Context()).strip()", "def break_up_by_substrings(self):\n new_submobjects = []\n curr_index = 0\n for tex_string in self.tex_strings:\n sub_tex_mob = SingleStringTexMobject(tex_string, **self.CONFIG)\n num_submobs = len(sub_tex_mob.submobjects)\n new_index = curr_index + num_submobs\n if num_submobs == 0:\n # For cases like empty tex_strings, we want the corresponing\n # part of the whole TexMobject to be a VectorizedPoint\n # positioned in the right part of the TexMobject\n sub_tex_mob.submobjects = [VectorizedPoint()]\n last_submob_index = min(curr_index, len(self.submobjects) - 1)\n sub_tex_mob.move_to(self.submobjects[last_submob_index], RIGHT)\n else:\n sub_tex_mob.submobjects = self.submobjects[curr_index:new_index]\n new_submobjects.append(sub_tex_mob)\n curr_index = new_index\n self.submobjects = new_submobjects\n return self", "def start_character():\n p = char.CharacterProps(\"Arnold\",height=2,weight=120)\n M = char.Marine(props=p)\n M.desc() # test printout\n return False", "def at_object_creation(self):\r\n with open(\"./commands/CSW15.txt\") as word_file:\r\n self.db.csw15 = set(word.strip().upper() for word in word_file)\r\n self.db.centre = \"\" \r\n self.db.tiledict = {'A' : 9,\r\n 'B' : 2,\r\n 'C' : 2,\r\n 'D' : 4,\r\n 'E' : 12,\r\n 'F' : 2,\r\n 'G' : 3,\r\n 'H' : 2,\r\n 'I' : 9,\r\n 'J' : 1,\r\n 'K' : 1,\r\n 'L' : 4,\r\n 'M' : 2,\r\n 'N' : 6,\r\n 'O' : 8,\r\n 'P' : 2,\r\n 'Q' : 1,\r\n 'R' : 6,\r\n 'S' : 4,\r\n 'T' : 6,\r\n 'U' : 4,\r\n 'V' : 2,\r\n 'W' : 2,\r\n 'X' : 1,\r\n 'Y' : 2,\r\n 'Z' : 1,\r\n '?' : 0\r\n } #removing blanks from play; blanks make it very slow. Change here, in dict\r\n self.db.tilestring = list(''.join([L*self.db.tiledict[L] for L in string.ascii_uppercase+'?']))", "def test_render_templates():\n water_mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/water.xyz\", \"xyz\"))\n if not water_mol.name:\n water_mol.name = \"data/water.xyz\"\n\n main([\"-g\", \"/tmp/foo.ADF.in\"])\n main([\"/tmp/foo.ADF.in\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.in\").read().strip(),\n \"\"\"TITLE data/water.xyz\n\nCHARGE 0 0\n\nNumber of atoms\n 3\n\nATOMS Cartesian\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\"\"\",\n )\n\n main([\"-g\", \"/tmp/test.GAMESS.inp\"])\n main([\"/tmp/test.GAMESS.inp\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.inp\").read(),\n \"\"\" $CONTRL COORD=CART UNITS=ANGS $END\n\n $DATA\ndata/water.xyz\nC1\nO 8.0 0.0584027061 0.0584027059 0.0000000000\nH 1.0 1.0096135406 -0.0680162466 0.0000000000\nH 1.0 -0.0680162466 1.0096135407 0.0000000000\n $END\n\n\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/hello.GAMESSUK.inp\"])\n main([\"/tmp/hello.GAMESSUK.inp\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.inp\").read(), water_mol.to_string(\"gukin\"))\n\n main([\"-g\", \"/tmp/hello.world.Gaussian.gjf\"])\n main([\"/tmp/hello.world.Gaussian.gjf\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.gjf\").read(),\n \"\"\"#Put Keywords Here, check Charge and Multiplicity.\n\n data/water.xyz\n\n0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\n\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.Jaguar.in\"])\n main([\"/tmp/bar.Jaguar.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"jin\"))\n\n main([\"-g\", \"/tmp/foo.Molpro.inp\"])\n main([\"/tmp/foo.Molpro.inp\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.inp\").read(), water_mol.to_string(\"mp\"))\n\n main([\"-g\", \"/tmp/example.MOPAC.mop\"])\n main([\"/tmp/example.MOPAC.mop\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.mop\").read(),\n \"\"\"CHARGE=0 MS=0.0\ndata/water.xyz\n\nO 0.05840 1 0.05840 1 0.00000 1\nH 1.00961 1 -0.06802 1 0.00000 1\nH -0.06802 1 1.00961 1 0.00000 1\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.MPQC.in\"])\n main([\"/tmp/bar.MPQC.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"mpqcin\"))\n\n main([\"-g\", \"/tmp/foo.NWChem.nw\"])\n main([\"/tmp/foo.NWChem.nw\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.nw\").read(),\n \"\"\"start molecule\n\ntitle data/water.xyz\n\ngeometry units angstroms print xyz autosym\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nend\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/example.ORCA.inp\"])\n main([\"/tmp/example.ORCA.inp\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.inp\").read(),\n \"\"\"# data/water.xyz\n! Opt\n\n* xyz 0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\n*\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.Psi.dat\"])\n main([\"/tmp/bar.Psi.dat\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.dat\").read(),\n \"\"\"# data/water.xyz\n\nmolecule {\n0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nunits angstrom\n}\n\noptimize('scf')\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/example.QChem.in\"])\n main([\"/tmp/example.QChem.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"qcin\"))\n\n main([\"-g\", \"/tmp/foo.ZINDO.input\"])\n main([\"/tmp/foo.ZINDO.input\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.input\").read(), water_mol.to_string(\"zin\"))", "def __init__(self,seq=r'-\\|/',yoyo=False):\n\n self.seq=seq\n self.ndx=-1\n self.delta=1\n self.yoyo=yoyo", "def exemple():\r\n\r\n case_1 = \"\\u25CC\"\r\n case_1 = u\"{}\".format(case_1)\r\n fourmi_1_1 = \"\\u22C0\"\r\n fourmi_1_1 = u\"{}\".format(fourmi_1_1)\r\n fourmi_2_1 = \"\\u21CA\"\r\n fourmi_2_1 = u\"{}\".format(fourmi_2_1)\r\n fourmi_3_1 = \"\\u25BC\"\r\n fourmi_3_1 = u\"{}\".format(fourmi_3_1)\r\n fourmi_1_2 = \"\\u22C0\"\r\n fourmi_1_2 = u\"{}\".format(fourmi_1_2)\r\n fourmi_2_2 = \"\\u21C8\"\r\n fourmi_2_2 = u\"{}\".format(fourmi_2_2)\r\n fourmi_3_2 = \"\\u25B2\"\r\n fourmi_3_2 = u\"{}\".format(fourmi_3_2)\r\n clods_1 = \"\\u2726\"\r\n clods_1 = u\"{}\".format(clods_1)\r\n clods_2 = \"\\u2737\"\r\n clods_2 = u\"{}\".format(clods_2)\r\n clods_3 = \"\\u2739\"\r\n clods_3 = u\"{}\".format(clods_3)\r\n \r\n print(term.move_xy(82,3) + term.white + 'DEPOT : ' + (case_1))\r\n print(term.move_xy(82,5) + term.white + 'Clods de niveau 1 : ' + (clods_1))\r\n print(term.move_xy(82,6) + term.white + 'Clods de niveau 2 : ' + (clods_2))\r\n print(term.move_xy(82,7) + term.white + 'Clods de niveau 3 : ' + (clods_3))\r\n print(term.move_xy(82,8) + term.white + 'Fourmis de niveau 1 : ' + (fourmi_1_1) + ' ' + (fourmi_1_2))\r\n print(term.move_xy(82,9) + term.white + 'Fourmis de niveau 2 : ' + (fourmi_2_1) + ' ' + (fourmi_2_2))\r\n print(term.move_xy(82,10) + term.white + 'Fourmis de niveau 3 : ' + (fourmi_3_1) + ' ' + (fourmi_3_2))\r\n print(term.move_xy(82,12) + term.white + 'Joueur 1 vous jouez en rouge.')\r\n print(term.move_xy(82,13) + term.white + 'Joueur 2 vous jouez en jaune.')" ]
[ "0.5573216", "0.55616885", "0.5541053", "0.53553516", "0.52875495", "0.525706", "0.52527744", "0.52410287", "0.52339643", "0.52307165", "0.5166109", "0.5111728", "0.5094848", "0.5094848", "0.5079243", "0.50386226", "0.5038601", "0.50207543", "0.5015247", "0.5014118", "0.5002928", "0.49914813", "0.49869388", "0.49868008", "0.498572", "0.4973182", "0.4957988", "0.49558735", "0.49558735", "0.49553177", "0.4928541", "0.49022543", "0.49008554", "0.4900209", "0.48924065", "0.48840368", "0.4877502", "0.48738706", "0.4873268", "0.4871365", "0.48707175", "0.48670584", "0.4862653", "0.48603612", "0.48583582", "0.48574683", "0.4853273", "0.48442784", "0.4839059", "0.48227793", "0.48187742", "0.48186198", "0.4807468", "0.4807468", "0.48056957", "0.47992235", "0.4789658", "0.4788527", "0.4778973", "0.47770086", "0.47708777", "0.47708777", "0.47708777", "0.47708777", "0.47708777", "0.47708777", "0.47708777", "0.47708777", "0.47708777", "0.475458", "0.47525296", "0.47447696", "0.4739851", "0.47396883", "0.47396883", "0.47388405", "0.4736266", "0.47273028", "0.4722437", "0.47177926", "0.47118694", "0.47037598", "0.4703386", "0.47000116", "0.4692665", "0.46872532", "0.46859878", "0.46847212", "0.46847212", "0.46769094", "0.46675962", "0.46671692", "0.46600312", "0.46599683", "0.46592528", "0.4658867", "0.4649682", "0.46375346", "0.4636297", "0.46250004" ]
0.48657212
42
Reorganize existing submojects one layer deeper based on the structure of tex_strings (as a list of tex_strings)
def break_up_by_substrings(self): new_submobjects = [] curr_index = 0 for tex_string in self.tex_strings: sub_tex_mob = SingleStringTexMobject(tex_string, **self.CONFIG) num_submobs = len(sub_tex_mob.submobjects) new_index = curr_index + num_submobs if num_submobs == 0: # For cases like empty tex_strings, we want the corresponing # part of the whole TexMobject to be a VectorizedPoint # positioned in the right part of the TexMobject sub_tex_mob.submobjects = [VectorizedPoint()] last_submob_index = min(curr_index, len(self.submobjects) - 1) sub_tex_mob.move_to(self.submobjects[last_submob_index], RIGHT) else: sub_tex_mob.submobjects = self.submobjects[curr_index:new_index] new_submobjects.append(sub_tex_mob) curr_index = new_index self.submobjects = new_submobjects return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detex(tex):\n \n #tex = '\\n'.join(reformat(tex, listed=True)[1:])\n global subs\n \n for old, new in subs.iteritems():\n tex = tex.replace(old, new)\n \n return tex.strip()", "def realize_text_and_extract_scene(scene, template, filter_objs):\n\n default_list = lambda: collections.defaultdict(list)\n graph = {'relationships': collections.defaultdict(default_list),\n 'counts': {}, 'exists': {}, 'history': [], 'objects': {}}\n\n # number of inputs\n n_inputs = template.get('inputs', 1)\n # sample a text template\n text_sample = random.choice(template['text'])\n text_sample_index = template['text'].index(text_sample)\n\n # extract attribute tags and get them into groups\n tags = re.findall('(<[\\d\\w]*>)', text_sample)\n\n tag_groups = collections.defaultdict(list)\n for tag in tags:\n group_id = get_tag_group(tag)\n tag_groups[group_id].append(tag)\n\n # sample a random element from filtered\n arg_sample = random.choice(filter_objs)\n # scene information obtained from the current round\n graph_item = arg_sample['graph']\n\n # remove tags from text not allowed by filter_objs\n for arg_ind in range(n_inputs):\n obj_sample = arg_sample['objects'][arg_ind]\n avail_attrs = obj_sample['optional'] + obj_sample['required']\n\n for ii in tag_groups[arg_ind][::-1]:\n if mapping(ii) not in avail_attrs:\n tag_groups[arg_ind].remove(ii)\n text_sample = replace_attribute(text_sample, ii, arg_sample, True)\n\n # assert that all required attributes are present as tags\n for attribute in obj_sample['required']:\n required_tag = inv_mapping(attribute, arg_ind)\n assert required_tag in tag_groups[arg_ind], \\\n 'A required attribute is missing in template!'\n\n # start compiling tags to keep\n tags_to_keep = [inv_mapping(ii, arg_ind) for ii in obj_sample['required']]\n\n # filter out those not present in text template\n optional_tags = [inv_mapping(ii,arg_ind) for ii in obj_sample['optional']]\n optional_tags = [ii for ii in optional_tags if ii in tag_groups[arg_ind]]\n\n # if tags_to_keep is empty, sample from optional with 1:70 2:25 3:5\n if len(optional_tags) > 0:\n if len(tags_to_keep) > 0:\n n_tags_sample = [0, 1, 2]\n else: n_tags_sample = [1, 2, 3]\n n_sample = np.random.choice(n_tags_sample, 1,\n p=gvars.METAINFO['probabilities'],\n replace=False)\n # lower cap at the length of optional\n n_sample = min(n_sample[0], len(optional_tags))\n if n_sample > 0:\n tags_to_keep += random.sample(optional_tags, n_sample)\n\n # now create a dictionary of placeholders with actual attribute values\n for tag in tag_groups[arg_ind]:\n remove = tag not in tags_to_keep\n text_sample = replace_attribute(text_sample, tag, arg_sample, remove)\n\n # remove attributes from objects not included in tags_to_keep\n if 'objects' in graph_item:\n for ii in gvars.METAINFO['attributes']:\n if inv_mapping(ii, arg_ind) not in tags_to_keep:\n if ii in graph_item['objects'][arg_ind]:\n del graph_item['objects'][arg_ind][ii]\n\n # record the caption info\n graph_item['round'] = 0\n\n sample = {}\n sample['template_info'] = [copy.deepcopy(template)]\n del sample['template_info'][-1]['text']\n sample['template_info'][-1]['index'] = text_sample_index\n sample['caption'] = text_sample\n sample['dialog'] = []\n\n # append history, update scene graph, and save the new scene graph\n graph['history'].append(graph_item)\n sample['graph'] = utils.merge_update_scene_graph(graph, graph_item)\n return sample", "def combine(data):\n res = []\n l = len(data['ents'])\n text = data['text']\n\n def combineHelper(path, idx, count):\n if count == 2:\n e1 = path[0]['text']\n e1_label = path[0]['type']\n e2 = path[1]['text']\n e2_label = path[1]['type']\n # e11 = path[0]['start']\n # e12 = path[0]['end']\n # e21 = path[1]['start']\n # e22 = path[1]['end']\n # new_text = text[:e11] + '<e1>' + text[e11:e12] + '</e1> ' + text[e12:e21] + '<e2>' + text[e21:e22] +\\\n # '</e2>' + text[e22:]\n res.append(\n {\n \"text\": text,\n 'h': {'pos': (path[0]['start'], path[0]['end'])},\n 't': {'pos': (path[1]['start'], path[1]['end'])},\n \"e1\": e1,\n \"e2\": e2,\n \"e1_label\": e1_label,\n \"e2_label\": e2_label,\n \"e1_id\": path[0]['id'],\n \"e2_id\": path[1]['id']\n }\n )\n return\n else:\n for i in range(idx, l):\n path.append(data['ents'][i])\n combineHelper(path, i+1, count+1)\n path.pop()\n\n combineHelper([], 0, 0)\n return res", "def cut_item_texts(self, arrays=None):\n if not arrays: arrays = self.masks()\n for a in arrays:\n for item in self.sources(a):\n i = self._meta['columns'][item]\n for tk in self.valid_tks:\n text = self.text(item, True, tk)\n if text: i['text'][tk] = text\n for ed in ['x', 'y']:\n if i['text'].get('{} edits'.format(ed)):\n for tk in self.valid_tks:\n text = self.text(item, True, tk, ed)\n if text: i['text']['{} edits'.format(ed)][tk] = text\n return None", "def mapper(list_of_textlines):\n text = [i.lower() for i in list_of_textlines]\n text = [re.subn(\"\\s+|\\n+\", \" \", i)[0] for i in text]\n text = [re.subn(\"[.!@#$%^&*()-_+=,./?\\\"'|\\}{:;]+\", \" \", i)[0] for i in text]\n text = [re.split(\"\\s+\", i) for i in text]\n text = [[i for i in j if i != ''] for j in text]\n text = [i for i in text if len(i) > 0]\n text = [item for sublist in text for item in sublist]\n\n return text", "def fix_structure(trans_dict):\n\n for idx in range(len(trans_dict.features)):\n old_trans_dict = trans_dict.features[idx]\n if trans_dict.features[idx].type == 'exon':\n exon = trans_dict.features[idx]\n trans_dict.features[idx] = SeqFeature(exon.location, type = 'gene', strand = exon.strand, id = exon.id)\n trans_dict.features[idx].sub_features = [SeqFeature(exon.location, type = 'Transcript', strand = exon.strand, id = exon.id)] \n trans_dict.features[idx].sub_features[0].sub_features = [exon]\n elif len(trans_dict.features[idx].sub_features) > 0 and trans_dict.features[idx].sub_features[0].type == 'exon':\n exon = trans_dict.features[idx]\n trans_dict.features[idx] = SeqFeature(exon.location, type = 'gene', strand = exon.strand, id = exon.id)\n trans_dict.features[idx].sub_features = [exon]", "def update_textobjects(self):\n vc = _VimCursor(self)\n\n done = set()\n not_done = set()\n def _find_recursive(obj):\n if isinstance(obj, EditableTextObject):\n for c in obj._childs:\n _find_recursive(c)\n not_done.add(obj)\n _find_recursive(self)\n\n counter = 10\n while (done != not_done) and counter:\n for obj in sorted(not_done - done): # Order matters for python locals!\n if obj._update(done, not_done):\n done.add(obj)\n counter -= 1\n if counter == 0:\n raise RuntimeError(\"The snippets content did not converge: Check for Cyclic dependencies \"\n \"or random strings in your snippet. You can use 'if not snip.c' to make sure \"\n \"to only expand random output once.\")\n\n vc.to_vim()\n self._del_child(vc)", "def _get_transformations(self, current_text, indices_to_modify):\n transformed_texts = []\n words = current_text.words\n for idx in indices_to_modify:\n word = words[idx]\n # expend when word in map\n if word in EXTENSION_MAP:\n expanded = EXTENSION_MAP[word]\n transformed_text = current_text.replace_word_at_index(idx, expanded)\n transformed_texts.append(transformed_text)\n\n return transformed_texts", "def collate_sections(self,paper_text,section_list:List[Section],split_upto=0.2,split_bins=10):\n current_text_split = []\n prev_section = None\n curr_text = str(paper_text)\n unfound_sections = []\n some_section_not_found = False\n for index,s in enumerate(section_list):\n curr_text,section_status = self.split_and_find_section(curr_text,s.name,prev_section,split_upto=split_upto,split_bins=split_bins)\n if not section_status: # If couldn't match section add it here. \n some_section_not_found = True\n # print('\\n\\t'+s.name) \n prev_section = s \n for ss in s.subsections:\n curr_text,section_status = self.split_and_find_section(curr_text,ss.name,prev_section,split_upto=split_upto,split_bins=split_bins)\n if not section_status:\n some_section_not_found = True\n # print(\"Cannot Match For :\",ss.name)\n prev_section = ss\n # print('\\n\\t\\t'+ss.name)\n if index == len(section_list)-1:\n s.text = curr_text\n return section_list,some_section_not_found", "def merging_lines(document_list):\n final_list = []\n first_span = True\n for i, page in enumerate(document_list):\n if i == 10:\n print(\"HERE\")\n page_list = []\n for span in page:\n if first_span:\n page_list.append(span)\n first_span = False\n else:\n boolean, t = h.check_same_line(page_list[-1], span)\n if boolean and not h.check_inline_subs(page_list[-1], span):\n page_list[-1] = merge(page_list[-1], span, t)\n else:\n page_list.append(span)\n first_span = True\n final_list.append(page_list)\n for page in final_list:\n for span in range(len(page)):\n page[span] = h.fix_spaces(page[span])\n '''\n final_list = []\n first_span = True\n for i, page in enumerate(document_list):\n page_list = []\n for line in page:\n new_line = {\"text\": \"\", \"bbox\": line['bbox'], \"dir\": line['dir'], \"font\": \"\", \"size\": 0}\n for span in line['spans']:\n new_line = merge(new_line, span)\n page_list.append(new_line)\n final_list.append(page_list)\n for page in final_list:\n for span in range(len(page)):\n page[span] = h.fix_spaces(page[span])\n '''\n return final_list", "def createStructuredTranscript_Non_Core_Doc():\n\n #create a temporary folder that will hold the data transformed from doc to docx\n os.system('mkdir ' + INPUT_FOLDER+'temp')\n\n core_doc_asset = []\n missing_count = 0\n missing_files=[]\n # get all the docx files that are part of the core asset\n for file in glob.glob(INPUT_FOLDER+\"*.doc\"):\n\n # RG numbers for the core asset\n if (\"RG-50.030\" not in file and\n \"RG-50.106\" not in file and\n \"RG-50.549\" not in file):\n \n\n \n # convert file to docx, storing it in an untracked folder called temp\n file_docx = file + 'x'\n command = 'textutil -convert docx ' + file + ' -output ' + INPUT_FOLDER+'temp/'+ file_docx.split('/')[-1]\n call(command, shell=True)\n\n # append to the array\n core_doc_asset.append(file_docx)\n \n\n \n\n # get the units for each file, store them and update tracker\n core_doc_asset=create_dictionary_of_file_list(core_doc_asset)\n \n not_processed=0\n processed_doc=0\n \n # get the units for each file, store them and update tracker \n for mongo_rg in core_doc_asset:\n # get text units for this entry\n processed=[]\n result=[]\n \n for file in core_doc_asset[mongo_rg]:\n \n \n \n units = getTextUnits(INPUT_FOLDER+'temp/'+file.split('/')[-1])\n \n if units:\n #replace white spaces\n for i,element in enumerate(units):\n units[i]['unit']=' '.join(element['unit'].split())\n result.extend(units)\n \n processed.append(True)\n else:\n #check if processed\n processed.append(False)\n\n #set the method used to transform the transcript\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"method\", \"transcribe_non_core_doc\")\n\n not_processed=not_processed+1\n\n if False in processed:\n\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Unprocessed\")\n not_processed=not_processed+1\n missing_files.append(' '.join(core_doc_asset[mongo_rg]))\n else:\n # insert units on the output collection\n h.update_field(DB, OUTPUT, \"shelfmark\", 'USHMM '+mongo_rg, \"structured_transcript\", result)\n\n \n # update status on the stracker\n \n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Processed\")\n processed_doc=processed_doc+1\n \n\n #delete the temporary folder\n os.system('rm -r ' + INPUT_FOLDER+'temp')\n\n \n #write the missing files to text file\n file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_doc_failed.txt','w')\n file.write('\\n'.join(missing_files))\n\n \n # success\n pprint.pprint(\"Non-core doc files were successfully processed, but there are \" + str(missing_count) + \" missing\")", "def test_post_build_processing_remove_occurrences(self):\n it = [\n \"[[Chapter]] Chapter I\",\n \"This is chapter I text\",\n \"[[Article]] Article I\",\n \"This is article I text\",\n ]\n\n descriptor = {\n 'components': ['Chapter', 'Article'],\n 'patterns': ['Chapter', 'Article']\n }\n\n doc = parse_iterable(it, descriptor)\n\n descriptor = extend_internal_patterns(descriptor)\n descriptor = compile_patterns(descriptor)\n\n doc = post_build_process(doc, descriptor)\n\n result = [n['text'] for _, n in doc.traverse()]\n expected = [[], [\"Chapter I\", \"This is chapter I text\"], [\"Article I\", \"This is article I text\"]]\n self.assertListEqual(result, expected)", "def _get_transformations(self, current_text, indices_to_modify):\n transformed_texts = []\n\n for i in indices_to_modify:\n new_words = self._get_new_words(current_text, i)\n\n new_transformted_texts = []\n for w in new_words:\n new_transformted_texts.append(\n current_text.insert_text_before_word_index(i, w)\n )\n transformed_texts.extend(new_transformted_texts)\n\n return transformed_texts", "def textparse(self,\r\n analysetext,\r\n depth=0,\r\n keys=None,\r\n re_entering=False,\r\n newindex=Index(1)):\r\n if keys is None:\r\n keys = set()\r\n if LEFTNOTE not in analysetext \\\r\n or extract.embedded_extract(analysetext)[2] == 0:\r\n return\r\n #test if it contains embedded text\r\n\r\n## ee = extract.embedded_extract(RIGHTNOTE.join(LEFTNOTE.\r\n##join(analysetext.split(LEFTNOTE)[1:]).split(RIGHTNOTE)[:-1]),eliminate = True)\r\n\r\n ee_temp = extract.embedded_extract(analysetext)\r\n embeddedlist = ee_temp[0]\r\n\r\n if depth-1 in self.pass_key_dict:\r\n\r\n self.pass_key_dict[depth] = self.pass_key_dict[depth-1]\r\n else:\r\n self.pass_key_dict[depth] = [[list(keys)], []]\r\n\r\n emb_len = str(len(embeddedlist))\r\n\r\n for a_temp, phrase in enumerate(embeddedlist):\r\n if a_temp<10 or (a_temp>9 and a_temp<100\r\n and a_temp%10 == 0) or (a_temp>99\r\n and a_temp%100==0):\r\n #display counter for embedded notes\r\n print()\r\n print(str(a_temp)+'/'+emb_len)\r\n\r\n\r\n\r\n\r\n\r\n\r\n if extract.embedded_extract(phrase)[2] > 1:\r\n\r\n\r\n if phrase[0] == LEFTNOTE and phrase[-1] == RIGHTNOTE:\r\n newindex = self.textinterpret(\r\n extract.embedded_extract(\r\n RIGHTNOTE.join(LEFTNOTE.join(phrase.split(LEFTNOTE)[1:])\r\n .split(RIGHTNOTE)[:-1]),\r\n eliminate=True)[1],\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n else:\r\n newindex = self.textinterpret(\r\n extract.embedded_extract(\r\n phrase,\r\n eliminate=True)[1],\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n newindex = self.textparse(phrase[1:-1],\r\n depth+1,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n\r\n\r\n else:\r\n\r\n newindex = self.textinterpret(phrase,\r\n depth,\r\n re_entering=re_entering,\r\n newindex=newindex)\r\n print()\r\n return newindex", "def convert_to_t5_format(nlp, texts):\n\n inputs = []\n outputs = []\n original_texts = []\n\n for text, doc in zip(texts, nlp.pipe(texts, n_process=-1)):\n\n pairs = set()\n\n for chunk in doc.noun_chunks:\n if chunk.text == text:\n continue\n input_ = text[0 : chunk.start_char] + \"<extra_id_0> \" + text[chunk.end_char + 1 :]\n output = \"<extra_id_0> \" + chunk.text + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for token in doc:\n left_edge_i = token.left_edge.i\n right_edge_i = token.right_edge.i\n chunk_length = right_edge_i - left_edge_i + 1\n if chunk_length / len(doc) > 0.5 or chunk_length > 10: # if chunk is too long, just skip it\n continue\n\n input_ = str(doc[:left_edge_i]) + \" <extra_id_0> \" + str(doc[right_edge_i + 1 :])\n output = \"<extra_id_0> \" + str(doc[left_edge_i : right_edge_i + 1]) + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for token in doc:\n if token.pos_ in [\"NOUN\", \"PRON\", \"PROPN\"]: # we don't want to mask parts of noun chunks\n continue\n input_ = str(doc[: token.i]) + \" <extra_id_0> \" + str(doc[token.i + 1 :])\n output = \"<extra_id_0> \" + token.text + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for (input_, output) in pairs:\n inputs.append(input_)\n outputs.append(output)\n original_texts.append(text)\n\n return inputs, outputs, original_texts", "def convert_pattern_format(text):\n parsed_text = []\n # parse text via Pattern's parser\n pattern_parsed_text = Text(parse(text, relations=True, lemmata=True))\n for sentence in pattern_parsed_text:\n s = Sentence()\n s.string = remove_blanks(sentence.string)\n for word in sentence:\n # Patterns tags for each word in the sentence are stored in a new Word-object\n w = Word()\n w.string = word.string\n w.lemma = word.lemma\n w.index = word.index\n w.tag = word.type\n w.entity = \"\"\n # each word is appended to a Sentence-object\n s.words.append(w)\n # each Sentence-object is appended to an array\n parsed_text.append(s)\n return parsed_text", "def normalization(obj):\n dic = obj.mainfield.para_dict.copy()\n for item in obj.forfield: dic.update(item.para_dict)\n for item in obj.existfield: dic.update(item.para_dict)\n\n global_dic = number_type(dic)\n obj.normal_guards = norm_rep(global_dic, obj.all_sentence)\n\n main_dic = number_type(obj.mainfield.para_dict)\n obj.mainfield.content = norm_rep(main_dic, obj.mainfield.content)\n\n for index in range(len(obj.forfield)):\n obj.forfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # for_dic = number_type(temp_dic)\n obj.forfield[index].content = norm_rep(global_dic, obj.forfield[index].content)\n print(global_dic, obj.forfield[index].para_dict)\n obj.forfield[index].para_dict = pair_2_dict(global_dic, obj.forfield[index].para_dict)\n\n for index in range(len(obj.existfield)):\n obj.existfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # exist_dic = number_type(temp_dic)\n obj.existfield[index].content = norm_rep(global_dic, obj.existfield[index].content)\n obj.existfield[index].para_dict = pair_2_dict(global_dic, obj.existfield[index].para_dict)\n\n # change para_dict: {'i':'NODE} -> {'NODE_1', 'NODE'}\n obj.mainfield.para_dict = pair_2_dict(global_dic, obj.mainfield.para_dict)", "def scene_to_text(scenes):\n scene_text_dict = []\n scene_text_list = []\n for i, scene in enumerate(scenes):\n if len(scene['frame_data']) == 0:\n break\n scene_image = Image.fromarray(scene['frame_data'])\n str_text = pytesseract.image_to_string(scene_image)\n #list_text = list(filter(('').__ne__, re.split(\" |\\n|, |. |:|.\\n|\\x0c\", str_text)))\n list_text = list(filter(('').__ne__, re.split(\" |\\n\", str_text)))\n bag_of_word = collections.Counter(list_text)\n scene_text_dict.append(\n {'start': scene['start'], \n 'end': scene['end'], \n 'bag_of_word': dict(bag_of_word)\n })\n scene_text_list.append(list_text)\n return scene_text_dict, scene_text_list", "def reformatList( listOfPaths):\n newList = []\n first = True\n for seg in listOfPaths: \n newList += seg.asSVGCommand(first)\n first = False\n return newList", "def read_documents(file_path: str) -> List[Tuple[str, List[Tuple[str, List[str]]]]]:\n print(f'Reading SciREX documents from {file_path}')\n with open(file_path, 'r') as json_file:\n json_list = list(json_file)\n\n papers = []\n for json_str in json_list:\n papers.append(json.loads(json_str))\n\n def find_index_in_array(index, array):\n for array_index, (start, end) in enumerate(array):\n if end > index:\n return array_index\n\n result = []\n for paper in papers:\n result_sections = []\n\n # Populate the sentences list with section information.\n for index, section in enumerate(paper['sections']):\n # Get the first sentence of the section.\n index = find_index_in_array(section[0], paper['sentences'])\n sentence = paper['sentences'][index]\n # The section name is the first sentence of the section.\n section_name = paper['words'][sentence[0]:sentence[1]]\n\n # Example for the first sentence on a section:\n # [\"section\", \":\", \"Abstract\"]\n # If the first sentence starts with [\"section\", \":\"], we are only interested in the words after that prefix.\n if len(section_name) >= 2 and section_name[1] == \":\":\n section_name_length = len(section_name)\n section_name = section_name[2:]\n else:\n section_name_length = 0\n if index == 0:\n # First section will always be labled as 'Title'\n section_name = ['Title']\n else:\n section_name = []\n\n result_sections.append((\" \".join(section_name), []))\n\n words = paper['words']\n for info in paper['sentences']:\n sentence = words[info[0]:info[1]]\n section_index = find_index_in_array(info[0], paper['sections'])\n\n result_sections[section_index][1].append(\" \".join(sentence))\n\n result.append((str(paper['doc_id']), result_sections))\n\n return result", "def _merge_conllu_subtokens(self, lines: List[str], doc: Doc)-> Doc:\n # identify and process all subtoken spans to prepare attrs for merging\n subtok_spans = []\n for line in lines:\n parts = line.split(\"\\t\")\n id_, word, lemma, pos, tag, morph, head, dep, _1, misc = parts\n if \"-\" in id_:\n subtok_start, subtok_end = id_.split(\"-\")\n subtok_span = doc[int(subtok_start) - 1 : int(subtok_end)]\n subtok_spans.append(subtok_span)\n # create merged tag, morph, and lemma values\n tags = []\n morphs = {}\n lemmas = []\n for token in subtok_span:\n tags.append(token.tag_)\n lemmas.append(token.lemma_)\n if token._.merged_morph:\n for feature in token._.merged_morph.split(\"|\"):\n field, values = feature.split(\"=\", 1)\n if field not in morphs:\n morphs[field] = set()\n for value in values.split(\",\"):\n morphs[field].add(value)\n # create merged features for each morph field\n for field, values in morphs.items():\n morphs[field] = field + \"=\" + \",\".join(sorted(values))\n # set the same attrs on all subtok tokens so that whatever head the\n # retokenizer chooses, the final attrs are available on that token\n for token in subtok_span:\n token._.merged_orth = token.orth_\n token._.merged_lemma = \" \".join(lemmas)\n token.tag_ = \"_\".join(tags)\n token._.merged_morph = \"|\".join(sorted(morphs.values()))\n token._.merged_spaceafter = (\n True if subtok_span[-1].whitespace_ else False\n )\n\n with doc.retokenize() as retokenizer:\n for span in subtok_spans:\n retokenizer.merge(span)\n\n return doc", "def process_sample(\n sample: Dict[str, Any],\n relation_vocab: Dict[str, int],\n spacy_model: Any,\n tokenizer: Any,\n) -> Tuple[Optional[Dict[str, Any]], Dict[str, int]]:\n\n processed_sample = {}\n\n if sample['num_pos_raters'] < 2:\n relation = NO_RELATION\n else:\n relation = sample['relation']\n if relation not in relation_vocab:\n relation_vocab[relation] = len(relation_vocab)\n label = relation_vocab[relation]\n processed_sample['target'] = [label]\n\n text = sample['annotated_text']\n\n # Remove subj and obj annotations from text and store position\n def find_span(input_text: str, pattern: Any,\n prefix_len: int) -> Tuple[int, int]:\n \"\"\"Find span corresponding to actual subj or obj strings.\"\"\"\n match = pattern.search(input_text)\n span_start = match.start() + prefix_len + 1\n # We want inclusive spans, hence -2 instead of -1\n span_end = match.end() - 2\n return (span_start, span_end)\n\n def replace_and_adjust(\n input_text: str, match: Any, prefix_len: int,\n inverted_mapping: np.ndarray) -> Tuple[str, np.ndarray]:\n \"\"\"Remove subj/obj annotations and adjust token mapping accordingly.\"\"\"\n\n original_span_start = match.start() + prefix_len + 1\n original_span_end = match.end() - 1\n actual_string = input_text[original_span_start:original_span_end]\n new_text = input_text[:match.start()] + actual_string + input_text[match\n .end():]\n\n # Inverted mapping maps from remaining tokens to positions in original text\n new_inverted_mapping = np.zeros(len(new_text), dtype=np.int32)\n new_inverted_mapping[:match.start()] = inverted_mapping[:match.start()]\n\n new_span_start = match.start()\n new_span_end = match.start() + len(actual_string)\n new_inverted_mapping[new_span_start:new_span_end] = inverted_mapping[\n original_span_start:original_span_end]\n new_inverted_mapping[new_span_end:] = inverted_mapping[original_span_end +\n 1:]\n\n return new_text, new_inverted_mapping\n\n inverted_mapping = np.arange(len(text))\n subj_pattern = re.compile('SUBJ{[^}]+}')\n subj_span = find_span(text, subj_pattern, len('SUBJ'))\n obj_pattern = re.compile('OBJ{[^}]+}')\n obj_span = find_span(text, obj_pattern, len('OBJ'))\n\n # Remove subj/obj annotations from text\n while True:\n subj_match = subj_pattern.search(text)\n if subj_match is None:\n break\n text, inverted_mapping = replace_and_adjust(text, subj_match, len('SUBJ'),\n inverted_mapping)\n\n while True:\n obj_match = obj_pattern.search(text)\n if obj_match is None:\n break\n text, inverted_mapping = replace_and_adjust(text, obj_match, len('OBJ'),\n inverted_mapping)\n\n # Adjust spans for removed tokens\n mapping = np.zeros(len(sample['annotated_text']), dtype=np.int32) - 1\n mapping[inverted_mapping] = np.arange(len(inverted_mapping))\n subj_span = (mapping[subj_span[0]], mapping[subj_span[1]])\n assert subj_span[0] != -1 and subj_span[1] != -1\n obj_span = (mapping[obj_span[0]], mapping[obj_span[1]])\n assert obj_span[0] != -1 and obj_span[1] != -1\n\n parsed_text = spacy_model(text)\n\n # We use spacy to parse text, identify noun chunks\n mention_char_spans = []\n mention_char_spans.append(subj_span)\n mention_char_spans.append(obj_span)\n\n def overlaps(first_span: Tuple[int, int], second_span: Tuple[int,\n int]) -> bool:\n\n def point_inside_span(point: int, span: Tuple[int, int]) -> bool:\n return span[0] >= point and point <= span[1]\n\n spans_overlap = (\n point_inside_span(first_span[0], second_span) or\n point_inside_span(first_span[1], second_span) or\n point_inside_span(second_span[0], first_span) or\n point_inside_span(second_span[1], first_span))\n\n return spans_overlap\n\n for chunk in parsed_text.noun_chunks:\n span_start_char = parsed_text[chunk.start].idx\n span_last_token = parsed_text[chunk.end - 1]\n span_end_char = span_last_token.idx + len(span_last_token.text) - 1\n char_span = (span_start_char, span_end_char)\n # Append only if does not overlap with subj or obj spans. In case spacy\n # mention annotation disagrees with tacred annotation, we want to favor\n # tacred.\n\n if not overlaps(char_span, subj_span) and not overlaps(char_span, obj_span):\n mention_char_spans.append(char_span)\n\n # Sort spans by start char\n start_chars = np.array([span[0] for span in mention_char_spans])\n sorted_indices = np.argsort(start_chars)\n sorted_positions = np.zeros_like(start_chars)\n sorted_positions[sorted_indices] = np.arange(len(sorted_positions))\n sorted_spans = [mention_char_spans[idx] for idx in sorted_indices]\n\n # Tokenize and get aligned mention positions\n _, text_ids, text_mask, mention_spans, span_indices = tokenization_utils.tokenize_with_mention_spans(\n tokenizer=tokenizer,\n sentence=text,\n spans=sorted_spans,\n max_length=FLAGS.max_length,\n add_bert_tokens=True,\n allow_truncated_spans=True,\n )\n\n processed_sample['text_ids'] = text_ids\n processed_sample['text_mask'] = text_mask\n\n # Subj and obj are the first elements of mention spans.\n subj_index = sorted_positions[0]\n obj_index = sorted_positions[1]\n\n # Some spans may be dropped by the BERT tokenizer. Here we map indices in the\n # original list of spans to the one returned by the tokenizer.\n reverse_span_indices = {\n original_idx: tokenized_idx\n for tokenized_idx, original_idx in enumerate(span_indices)\n }\n\n # Skip if subj or obj dropped.\n if (subj_index not in reverse_span_indices or\n obj_index not in reverse_span_indices):\n return None, relation_vocab\n\n subj_index = reverse_span_indices[subj_index]\n obj_index = reverse_span_indices[obj_index]\n\n # Make sure we don't discard subj or obj\n assert max(subj_index, obj_index) < FLAGS.max_mentions\n\n processed_sample['subject_mention_indices'] = [subj_index]\n processed_sample['object_mention_indices'] = [obj_index]\n\n mention_spans = np.array(mention_spans)\n mention_start_positions = mention_spans[:, 0]\n mention_end_positions = mention_spans[:, 1]\n\n mention_start_positions = mention_start_positions[:FLAGS.max_mentions]\n mention_end_positions = mention_end_positions[:FLAGS.max_mentions]\n\n mention_pad_shape = (0, FLAGS.max_mentions - len(mention_start_positions))\n\n mention_mask = np.ones(len(mention_start_positions), dtype=np.int64)\n mention_mask = np.pad(mention_mask, mention_pad_shape, mode='constant')\n mention_start_positions = np.pad(\n mention_start_positions, mention_pad_shape, mode='constant')\n mention_end_positions = np.pad(\n mention_end_positions, mention_pad_shape, mode='constant')\n\n processed_sample['mention_start_positions'] = mention_start_positions\n processed_sample['mention_end_positions'] = mention_end_positions\n processed_sample['mention_mask'] = mention_mask\n\n return processed_sample, relation_vocab", "def retokenize_object_final(actions_structure, obj_type, list_to_check):\n\n for i, action_dict in enumerate(actions_structure):\n obj_list = []\n doc = copy.copy(action_dict['doc'])\n for j in range(0, len(action_dict['doc'])):\n token = doc[j]\n if token.dep_ in list_to_check:\n if obj_type == 'action_subject' and (token.pos_ == 'DET' or token.text.lower() in ['what', 'who']) and not action_dict['question']:\n obj = replace_relative_subject(token, action_dict)\n else:\n span = doc[doc[j].left_edge.i: doc[j].right_edge.i + 1]\n obj = {'initial_value': span.text,\n 'indexes': (doc[token.i].left_edge.i, doc[token.i].right_edge.i + 1),\n 'replacement_value': span.text, 'main_index': j}\n if obj_type == 'auxiliar_object':\n obj = add_prep_to_auxiliar_object(doc, obj)\n\n if obj is not None:\n obj_list.append(obj)\n actions_structure[i]['context'][obj_type] = obj_list", "def trans(monitext):\n result = ''\n last_line = 'empty'\n\n while monitext:\n # newline character or empty line(s)\n matched = re.match(r'\\n+', monitext, re.M)\n\n if matched:\n result += matched.group()\n if len(matched.group()) > 1:\n last_line = 'empty'\n elif last_line == 'title':\n result += '\\n'\n last_line = 'empty'\n monitext = monitext[matched.end():]\n continue\n\n # code block\n matched = re.match(r'{{{.*?\\n((\\n|.)*?)\\n}}}', monitext, re.M)\n\n if matched:\n body = matched.groups()[0]\n result += '\\n\\t' + '\\n\\t'.join(body.split('\\n'))\n monitext = monitext[matched.end():]\n last_line = 'code'\n continue\n\n # header\n matched = re.match(r'^(=+) (.+) (=+)', monitext)\n\n if matched:\n title = matched.groups()[1]\n level = len(matched.groups()[0])\n\n if last_line != 'empty':\n result += '\\n'\n\n if level < 4:\n underscore = {2 : '=', 3 : '-'}[level] * mbstrlen(title)\n result += title + os.linesep + underscore\n else:\n result += ('#' * level) + \" \" + title\n monitext = monitext[matched.end():]\n\n last_line = 'title'\n\n continue\n\n # link\n matched = re.match(r'(.*)\\[([^\\s]+[ \\t]+)?(.+)\\]', monitext)\n\n if matched:\n pre = matched.groups()[0]\n url = matched.groups()[1]\n if url:\n url = url.strip()\n name = matched.groups()[2]\n\n if url:\n replaced = \"%s[%s](%s)\" % (pre, name, url)\n else:\n replaced = \"%s[%s](%s)\" % (pre, name, name)\n\n monitext = monitext[:matched.start()] + replaced\\\n + monitext[matched.end():]\n\n # important\n monitext = re.sub(r'\\'\\'\\'(.*?)\\'\\'\\'', r'**\\1**', monitext)\n\n # italic\n monitext = re.sub(r'\\'\\'(.*?)\\'\\'', r'_\\1_', monitext)\n\n # list\n matched = re.match(r'^(\\s*)\\* (.*)', monitext)\n\n if matched:\n depth = len(matched.groups()[0])\n body = matched.groups()[1]\n result += (depth - 1) * '\\t' + '* ' + body\n monitext = monitext[matched.end():]\n\n last_line = 'others'\n\n try:\n # Go to the next line\n index = monitext.index('\\n')\n result += monitext[:index]\n monitext = monitext[index:]\n except ValueError:\n result += monitext\n break\n\n return result", "def mk_eng_txt_files(self, **rewrite):\n\n\n print \"mk_eng_txt_files: RETRIEVING PAGE_LIST..........\"\n page_list = c_m.l_of_l_read(self.page_list_path)\n\n if len(page_list) < 1: # handling empty page_list case\n print \"mk_eng_txt_files: PAGE LIST IS NOT POPULATED, RUN HTML_File_Maker AND Text_Extractor MODULES FIRST\"\n else: # handling page_list for partially transalted sites\n print \"mk_eng_txt_files: IN CASE PAGE LIST ALREADY HAD SOME ENG_TEXT ENTRIES SETTING INITIAL new_page_list TO LAST KNOWN PAGE_LIST VERSION\"\n self.new_page_list = copy(page_list)\n\n\n\n # iterating throug unique text per page txts\n for data_set in page_list:\n self.p_text_f_name = data_set[2]\n print \"mk_eng_txt_files: TRANSLATING TEXT FROM FILE %s\" % self.p_text_f_name\n\n self.eng_p_text_f_name = \"eng_\" + self.p_text_f_name\n self.eng_p_text_f_path = self.text_eng_folder_path + self.eng_p_text_f_name\n\n self.page_text = c_m.simply_read(self.text_folder_path, self.p_text_f_name)\n\n # if page has less than 10 symbols it is not translated\n if len(self.page_text) < 10:\n print \"mk_eng_txt_files: NOT WORTH TRANSLATING, WRITING AS IS AND SKIPPING...\"\n c_m.simply_write(self.page_text, self.eng_p_text_f_path)\n\n elif len(self.page_text) > self.max_page_length:\n print \"mk_eng_txt_files: PAGE TEXT IS TOO LONG DEVIDING TO PARTS, TRANSLATING AND GETTING BACK FULL PAGE TEXT\"\n text_output = self.get_text_parts(**rewrite)\n\n else: # 10 < len(page_text) < 2000\n\n if rewrite[\"rewrite\"]:\n print \"mk_eng_txt_files: TRANSLATING IN REWRITE MODE\"\n text_output = self.get_text()\n\n\n elif not os.path.exists(self.eng_p_text_f_path):\n print \"mk_eng_txt_files: TRANSLATING IN ONLY ONCE MODE\"\n text_output = self.get_text()\n\n else:\n print \"mk_eng_txt_files: SKIPPING FILE, ALREADY TRANSLATED\"\n # continue\n\n # print \"WRITING TRANSLATED OUTPUT TO FILE: \", self.eng_p_text_f_name\n # c_m.simply_write(text_output, self.text_eng_folder_path, self.eng_p_text_f_name) \n data_set.append(self.eng_p_text_f_name) # updating dataset with eng_text file name\n self.new_page_list.append(data_set) # updating page list with updated entry\n\n\n print \"mk_eng_txt_files: DONE TRANSLATING SITE %s \" % self.domain\n print \"mk_eng_txt_files: UPDATING PAGE LIST WITH ENG TEXT FILE NAMES\"\n c_m.l_of_l_write(self.new_page_list, self.page_list_path)\n print \"mk_eng_txt_files: SITE TRANSLATION FINISHED, CLOSING CHROME WEBDIRVER\"\n self.loaded_driver.quit()", "def split_text(text: str) -> List[Dict[str, str]]:\n # split into paragraphs\n lines = text.splitlines()\n groups = common.group_list(lines, lambda a, _: a.strip() == '')\n paras = ['\\n'.join(item) for empty_line, item in groups if not empty_line]\n\n def _fallback(p, type):\n logging.warn(f'Wrong {type} format:\\n' + p)\n cells.append({'type': 'text', 'source': p})\n\n cells = []\n for p in paras:\n lines = p.splitlines() + ['']\n p += '\\n'\n if p.startswith('#'):\n # parse title\n if not _is_mark(lines[1:]):\n _fallback(p, 'title')\n else:\n m = re.match(r'#+ *', lines[0])\n cells.append({\n 'type': 'title',\n 'prefix': m[0],\n 'source': lines[0][m.span()[1]:],\n 'mark': '\\n'.join(lines[1:])})\n elif p.startswith('$$'):\n # parse equations\n m = re.findall(r'\\$\\$', p)\n if len(m) != 2:\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'equation', 'source': p})\n elif p.startswith('!['):\n # parse images\n if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]):\n _fallback(p, 'image')\n else:\n cells.append({'type': 'image', 'source': p})\n elif p.startswith('|'):\n # parse table\n for i, l in enumerate(lines):\n if not l.startswith('|'):\n break\n if not _is_mark(lines[i:]):\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'table', 'source': p})\n else:\n groups = common.group_list(lines, _list)\n for prefix, item in groups:\n if len(prefix.split('__')) == 2:\n prefix = prefix.split('__')[0]\n source = '\\n'.join(item)[len(prefix):]\n if prefix == '':\n cells.append({'type': 'text', 'source': source})\n else:\n cells.append({\n 'type': 'list',\n 'prefix': prefix,\n 'source': source})\n return cells", "def sort(self):\n \n ct=[]\n rt=[]\n wr=[]\n # search for tags that aren't in the right position\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n if c.wa:\n if not self.wa:\n self.wa=[]\n self.wa.extend(c.wa)\n if c.ct:\n newcts=[ct_tag for ct_tag in c.ct if ct_tag.name!=c.name]\n map(self.contigs[i].ct.remove,newcts)\n ct.extend(newcts)\n for j in range(len(c.reads)):\n r = c.reads[j]\n if r.rt:\n newrts=[rt_tag for rt_tag in r.rt if rt_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].rt.remove,newrts)\n rt.extend(newrts)\n if r.wr:\n newwrs=[wr_tag for wr_tag in r.wr if wr_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].wr.remove,newwrs)\n wr.extend(newwrs)\n # now sort them into their proper place\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n for ct_tag in ct:\n if ct_tag.name==c.name:\n if self.contigs[i].ct is None:\n self.contigs[i].ct=[]\n self.contigs[i].ct.append(ct_tag)\n if rt or wr:\n for j in range(len(c.reads)):\n r = c.reads[j]\n for rt_tag in rt:\n if rt_tag.name==r.rd.name:\n if self.contigs[i].reads[j].rt is None:\n self.contigs[i].reads[j].rt=[]\n self.contigs[i].reads[j].rt.append(rt_tag)\n for wr_tag in wr:\n if wr_tag.name==r.rd.name:\n if self.contigs[i].reads[j].wr is None:\n self.contigs[i].reads[j].wr=[]\n self.contigs[i].reads[j].wr.append(wr_tag)", "def generateMapping(text, extensions):\n extensions = filter(lambda e: e != 'markdown.extensions.footnotes', extensions)\n\n text_index, modified = preprocess(text)\n html_modified = markdown(modified, extensions=extensions)\n modified_index = findPlaceholderIndex(html_modified, len(text_index))\n return zip(text_index, modified_index)", "def placeNoteLists(self, text):\n if self.notes:\n o = OrderedDict()\n for label, info in self.notes.items():\n if 'seq' in info:\n i = info['seq']\n info['seq'] = label\n o[i] = info\n else:\n self.unreferencedNotes[label] = info\n\n if o:\n # sort o by key\n o = OrderedDict(sorted(o.items(), key=lambda t: t[0]))\n self.notes = o\n text_re = re.compile('<p>notelist(%s)(?:\\:([\\w|%s]))?([\\^!]?)(\\+?)\\.?[\\s]*</p>'\n % (self.c, self.syms), re.U)\n text = text_re.sub(self.fNoteLists, text)\n return text", "def outline_nodes(str_in):\n\n # TASKPAPER REGEX REQUIREMENTS ARE SIMPLER THAN MARKDOWN\n rgx_body = re.compile(r'(\\t*)([^\\t]*.*)$')\n rgx_tp_tsk = re.compile(r'^(\\t*)(\\-\\s.*)$')\n rgx_tp_prj = re.compile(r'^(\\t*)(\\s*)([^-\\s].*\\:)$')\n\n def _read_tags(dct_node):\n \"\"\" Store the key-value pairs and key list\n and return text leaving in-line tags in place\n but pruning off any tags at the end of the line\n \"\"\"\n str_text = dct_node[ATT_TEXT]\n\n bln_mode = False\n str_point = str_text\n\n # and then digest all tags, right to left, eating terminal tags.\n str_s_point = str_point.rstrip()\n i_end = len(str_s_point)\n lst_keys = []\n lst_not_duplicate = []\n rgx_tag = re.compile(RGX_TP_TAG)\n lst_matches = [_ for _ in rgx_tag.finditer(str_s_point)]\n for o_match in lst_matches:\n str_key = o_match.group(2)\n # Valid key assignment ? or a duplicate ?\n if str_key not in lst_keys:\n lst_keys.append(str_key)\n var_value = o_match.group(3)\n if var_value != None: #treat simple keys as boolean flags\n dct_node[ATT_TAGS][str_key] = var_value\n else:\n dct_node[ATT_TAGS][str_key] = ''\n lst_not_duplicate.append(True)\n else:\n lst_not_duplicate.append(False)\n\n # and now shed any string of non-duplicate tags from the end\n for i in reversed(range(len(lst_matches))):\n o_match = lst_matches[i]\n if lst_not_duplicate[i]:\n if i_end == o_match.end():\n i_end = o_match.start()\n else:\n break\n else:\n break\n\n\n # store any keys in textual order,\n lng_keys = len(lst_keys)\n if lng_keys:\n if lng_keys > 1:\n dct_node[ATT_TAG_NAMES] = lst_keys\n else:\n dct_node[ATT_TAG_NAMES] = lst_keys\n # and assign any remaining text\n if bln_mode or lng_keys:\n dct_node[ATT_TEXT] = str_s_point[0:i_end]\n\n\n def _set_tp_node(dct_node, var_type, o_match):\n \"\"\"set TP node properties by reference\"\"\"\n bln_empty = False\n if var_type != TYP_NOTE:\n dct_node[ATT_TYPE] = var_type\n if var_type != TYP_PROJ: # strip prefix\n dct_node[ATT_TEXT] = o_match.group(2)[2:]\n else: # or suffix\n dct_node[ATT_TEXT] = o_match.group(2) + o_match.group(3)[:-1]\n else:\n # str_text = dct_node[ATT_LINE].lstrip()\n dct_node[ATT_TEXT] = dct_node[ATT_TEXT].lstrip()\n if dct_node[ATT_LINE].lstrip() == '':\n dct_node[ATT_TYPE] = TYP_EMPTY\n bln_empty = True\n\n if not bln_empty:\n lng_indent = len(o_match.group(1))\n if lng_indent:\n dct_node[ATT_INDENT] = lng_indent\n\n str_vanilla = TYP_NOTE\n\n\n lst_nodes = [\n {ATT_ID:0, ATT_PARENT: None, ATT_LEVEL:0,\n ATT_CHILD_INDEX: None, ATT_INDENT:None, ATT_TYPE:TYP_ROOT,\n ATT_LINE_INDEX:None, ATT_TEXT_INDEX:None, ATT_TEXT:'',\n ATT_LINE:'', ATT_TAG_NAMES:[], ATT_TAGS:{},\n ATT_CHILN:[], ATT_PATH:[]}\n ] + [\n {ATT_ID:i+1, ATT_TYPE:str_vanilla, ATT_LINE:str_line,\n ATT_LINE_INDEX:i, ATT_TEXT:str_line, ATT_INDENT:0, ATT_TAGS:{},\n ATT_LEVEL:0, ATT_TAG_NAMES:[], ATT_CHILN:[], ATT_PATH:[]}\n for i, str_line in\n enumerate(str_in.splitlines())\n ]\n\n\n # MAIN PARSE LOOP TO DERIVE TYPE, AND OTHER ATTRIBUTES OF EACH NODE\n\n lng_txt = 0\n for dct_node in lst_nodes[1:]:\n # Maintain an index into the text\n # (Note that [ATT_ID] serves as a 1-based index to the lines)\n dct_node[ATT_TEXT_INDEX] = lng_txt\n\n str_point = dct_node[ATT_LINE]\n lng_chars = len(str_point)\n lng_txt += (lng_chars + 1) # splitlines is dropping \\n\n\n # IDENTIFY THE INDENT COUNT & NESTING LEVEL\n # Assume Note text until there is counter-evidence\n if lng_chars < 1:\n dct_node[ATT_TYPE] = TYP_EMPTY\n else:\n _read_tags(dct_node)\n str_point = dct_node[ATT_TEXT]\n o_match = rgx_tp_prj.match(str_point)\n\n if o_match != None:\n _set_tp_node(dct_node, TYP_PROJ, o_match)\n else:\n o_match = rgx_tp_tsk.match(str_point)\n if o_match != None:\n _set_tp_node(dct_node, TYP_TASK, o_match)\n else:\n o_match = rgx_body.match(str_point)\n if o_match != None:\n _set_tp_node(dct_node, TYP_NOTE, o_match)\n else:\n print \"Unexpected TP pattern:\" + str_point\n\n\n # Now that we know the provisional type of each node,\n # digest any infixed or postfixed tags\n # DETECT ANY REMAINING EMPTIES BEFORE WE TAKE OUT MODES & TAGS\n if dct_node[ATT_TYPE] != TYP_EMPTY:\n str_line = dct_node[ATT_LINE]\n str_rs_line = str_line.rstrip()\n if str_rs_line == '':\n dct_node[ATT_TEXT] = ''\n if dct_node[ATT_TYPE] == TYP_NOTE:\n dct_node[ATT_TYPE] = TYP_EMPTY\n\n return lst_nodes", "def flatten(self, name, codes, new_name=None, text_key=None):\n if not new_name:\n if '.' in name:\n new_name = '{}_rec'.format(name.split('.')[0])\n else:\n new_name = '{}_rec'.format(name)\n if not text_key: text_key = self.text_key\n label = self._meta['masks'][name]['text'][text_key]\n cats = self.item_texts(name)\n self.add_meta(new_name, 'delimited set', label, cats)\n for x, source in enumerate(self.sources(name), 1):\n self.recode(new_name, {x: {source: codes}}, append=True)\n return None", "def _recursive_content_parse(\n self, content: ContentType, parent_style: dict, ids: list\n ) -> None:\n style = parent_style.copy()\n ids = ids.copy()\n\n if isinstance(content, str):\n content = {'.': [content]}\n elif isinstance(content, (list, tuple)):\n content = {'.': content}\n\n if not isinstance(content, dict):\n raise TypeError(\n 'content must be of type dict, str, list or tuple: {}'\n .format(content)\n )\n\n elements = []\n for key, value in content.items():\n if key.startswith('.'):\n style.update(parse_style_str(key[1:], self.fonts))\n if isinstance(value, (int, float)):\n value = [str(value)]\n elif isinstance(value, str):\n value = [value]\n if not isinstance(value, (list, tuple)):\n raise TypeError(\n 'value of . attr must be of type str, list or tuple: {}'\n .format(value)\n )\n elements = value\n break\n\n style.update(process_style(content.get('style'), self.pdf))\n part_var = content.get('var')\n text_part = self._new_text_part(style, ids, part_var)\n text_part['ids'].extend(content.get('ids', []))\n\n if part_var is not None:\n elements = ['0']\n\n label = content.get('label')\n if label is not None:\n text_part['ids'].append('$label:' + label)\n ref = content.get('ref')\n if ref is not None:\n text_part['ids'].append('$ref:' + ref)\n uri = content.get('uri')\n if uri is not None:\n text_part['ids'].append('$uri:' + uri)\n outline = content.get('outline')\n if isinstance(outline, dict):\n text = outline.get('text', ''.join(str(e) for e in elements))\n level = outline.get('level', 1)\n if label is None:\n outline_label = str(uuid4())\n text_part['ids'].append('$label:' + outline_label)\n else:\n outline_label = label\n outline_ = {'text': text, 'level': level, 'label': outline_label}\n text_part['ids'].append('$outline:{}'.format(json.dumps(outline_)))\n\n is_last_string = False\n\n for element in elements:\n if isinstance(element, (int, float)):\n element = str(element)\n if isinstance(element, str):\n if element == '':\n continue\n lines = element.split('\\n')\n if not is_last_string:\n text_part = self._new_text_part(\n style, text_part['ids'], part_var, text_part\n )\n text_part['text'] += lines[0]\n for line in lines[1:]:\n self.content.append({'type': 'br'})\n text_part = self._new_text_part(\n style, text_part['ids'], part_var, text_part\n )\n text_part['text'] += line\n is_last_string = True\n elif isinstance(element, dict):\n self._recursive_content_parse(element, style, text_part['ids'])\n is_last_string = False\n else:\n raise TypeError(\n 'elements must be of type str or dict: {}'.format(element)\n )\n\n if text_part is not None and text_part['text'] == '':\n self.content.remove(text_part)", "def translate_files(root_directory, output_file):\n # type: ()\n\n root_depth = len(root_directory.split(\"/\"))\n\n dirs = [d for d in os.walk(root_directory) if d[0].count(\"/\") == root_depth]\n\n sections = get_sections_data(dirs, root_depth=root_depth)\n\n # generate subsection headers\n for section, subsections in sections.iteritems():\n keys = sorted(subsections.keys(), key=lambda x: subsections[x][0])\n\n text = \"\"\n for subsection in keys:\n text += \"\\n\\\\subsection{\" + subsection + \"}\\n\"\n text += subsections[subsection][1]\n\n sections[section] = text\n\n # generate section headers\n result = \"\"\n for section, section_data in sections.iteritems():\n result += \"\\\\section{\" + section + \"}\\n\" + section_data + \"\\n\\n\\n\"\n\n # write output to file\n with open(output_file, \"w\") as test:\n with open(\"maple2latex/out/primer\") as primer:\n test.write(primer.read() + result + \"\\n\\\\end{document}\\n\")", "def expand(text):\n for i in range(0, len(text)):\n first = text[i-1][3]\n last = text[(i+1) % len(text)][0]\n yield first+text[i]+last", "def assign_columns(dynamic_list, sub_list, column1, column3):\n document_list = []\n # print(sub_list)\n for i, page in enumerate(dynamic_list):\n group1 = []\n group2 = []\n group3 = []\n subtitle = 0\n group_counter = 0\n for group in page:\n if h.check_subs(group, sub_list):\n # print(group['text'])\n subtitle = 1\n elif subtitle and len(group['lines']) < 5 and len(group['lines'][0]['text']) < 35:\n # print(\"secondTime\\t\" + group['text'])\n subtitle += 1\n else:\n if h.get_bbox_length(group['bbox']) <= column3 and i == 0:\n if subtitle:\n # print(subtitle)\n # print(group)\n for i in range(1, subtitle+1):\n page[group_counter-i]['column'] = 3\n group3.append(page[group_counter-i])\n subtitle = 0\n # if subtitle:\n # print(len(group['text']))\n # page[group_counter-1]['column'] = 3\n # group3.append(page[group_counter-1])\n # subtitle = False\n group['column'] = 3\n group3.append(group)\n elif h.get_bbox_length(group['bbox']) <= column1:\n if subtitle:\n # print(subtitle)\n # print(group)\n for i in range(1, subtitle+1):\n page[group_counter-i]['column'] = 2\n group2.append(page[group_counter-i])\n subtitle = 0\n # if subtitle:\n # page[group_counter-1]['column'] = 2\n # group2.append(page[group_counter-1])\n # subtitle = False\n group['column'] = 2\n group2.append(group)\n else:\n if subtitle:\n for i in range(1, subtitle+1):\n page[group_counter-i]['column'] = 1\n group1.append(page[group_counter-i])\n subtitle = 0\n # if subtitle:\n # page[group_counter-1]['column'] = 1\n # group1.append(page[group_counter-1])\n # subtitle = False\n group['column'] = 1\n group1.append(group)\n group_counter += 1\n page_list = [group1, group2, group3]\n document_list.append(page_list)\n return document_list", "def normalizeTexts(texts):\n fCW = 0\n for item in texts:\n fCW = max(len(item), fCW)\n for counter, item in enumerate(texts):\n texts[counter] = texts[counter].ljust(fCW + 1, '.')\n return (texts, fCW)", "def extractTextWithFullLayout(analyzed_data):\r\n\r\n data = []\r\n for page in analyzed_data:\r\n if not page:\r\n continue\r\n\r\n data.append([])\r\n for lt_obj in page:\r\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\r\n data[-1].append({\r\n 'type': 'text', # Might support more types (e.g. figures) in the future.\r\n 'text': lt_obj.get_text().split(\"\\n\"),\r\n 'layout': {\r\n 'x0': lt_obj.x0,\r\n 'x1': lt_obj.x1,\r\n 'y0': lt_obj.y0,\r\n 'y1': lt_obj.y1\r\n }\r\n })\r\n\r\n return data", "def parse(text):\n md = markdown.Markdown(['codehilite', 'tables', ])\n\n for iref in re.findall(img_ref_re, text):\n img_id = iref[7]\n try:\n image = FlatPageImage.objects.get(pk=int(img_id))\n md.references[img_id] = (image.image_path.url, '')\n except ObjectDoesNotExist:\n pass\n\n for lref in re.findall(reference_re, text):\n doc_name = lref[7]\n try:\n doc = File.objects.get(name=doc_name)\n md.references[doc_name]= (doc.url, doc.name)\n except ObjectDoesNotExist:\n pass\n\n return md.convert(text)", "def _process_layout(self, layout):\n # Here we just group text into paragraphs\n elements = []\n for lt_obj in layout:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n elements.append(Paragraph(lt_obj.get_text().strip()))\n elif isinstance(lt_obj, LTFigure):\n # Recursive...\n elements.extend(self._process_layout(lt_obj))\n return elements", "def get_data_form_report(language_code,template_type,report):\n substitution_words = [] \n children_dict = report.get_children()\n if (template_type in MULTIPLE_PROPERTIES.keys()):\n for parent, children in children_dict.iteritems():\n dict_aux = {MULTIPLE_PROPERTIES[template_type][0]:parent}\n for child in children:\n if (MULTIPLE_PROPERTIES[template_type][1] not in dict_aux.keys()):\n dict_aux[MULTIPLE_PROPERTIES[template_type][1]] = [unicode(child[language_code])]\n else:\n dict_aux[MULTIPLE_PROPERTIES[template_type][1]].append(unicode(child[language_code]))\n #substitution_words[template_type].append(unicode(child[language_code]))\n substitution_words.append(dict_aux)\n \n return substitution_words", "def _process_adc(texts, tab_separated_title, leading_labels, titles=[]):\n documents = []\n corpus_labels = set()\n for i, text in enumerate(texts):\n if text:\n title = \"Document\" + str(i + 1) if titles == [] else titles[i]\n features = {\"contentType\": \"Text\", \"sourceFileLine\": str(i)}\n \n if tab_separated_title:\n #example: title \\t start of text\n text = text.split(\"\\t\")\n title = str(text[0])\n text = \"\\t\".join(text[1:])\n \n if leading_labels:\n #example: !LB1 !Lb2 !LBL \\t start of text\n text = text.split(\"\\t\")\n doc_labels=[]\n for label in [f.strip() for f in text[0].split(\"!\") if f != \"\"]:\n features[label] = \"true\"\n corpus_labels.add(label)\n doc_labels.append(label)\n text = \"\".join(text[1:])\n features[\"Labels\"]=json.dumps(doc_labels)\n \n documents.append(Document(name=title,\n features=features,\n text=str(text),\n annotations=[Annotation(span_start=0,\n span_end=max(0, len(str(text)) - 1),\n type=\"TextBlock\",\n features={})]))\n return documents, list(corpus_labels)", "def extract_names(pages: Iterable[tuple[int, list[str]]]) -> DataT:\n found_first = False\n current_name: dict[str, Any] | None = None\n current_label: str | None = None\n current_lines: list[str] = []\n in_headings = True\n\n def start_label(label: str, line: str) -> None:\n nonlocal current_label, current_lines\n assert current_name is not None\n assert current_label is not None\n if label in current_name:\n if label in (\"Syntype\", \"Type Locality\"):\n label = f\"Syntype {line}\"\n assert (\n label not in current_name\n ), f\"duplicate label {label} in {current_name}\"\n current_name[current_label] = current_lines\n current_label = label\n current_lines = [line]\n\n for page, lines in pages:\n if current_name is not None:\n current_name[\"pages\"].append(page)\n for line in lines:\n if not found_first:\n if line.strip() in (\"TYPE SPECIMENS\", \"SPECIMENS\"):\n found_first = True\n continue\n # ignore family/genus headers\n if re.match(\n (\n r\"^\\s*(Genus|Family|Subfamily|Suborder|Order) [A-Z][a-zA-Z]+\"\n r\" [a-zA-Z\\.’, \\-]+(, \\d{4})?$\"\n ),\n line,\n ):\n in_headings = True\n continue\n # ignore blank lines\n if not line:\n continue\n if in_headings:\n if line.startswith(\" \"):\n continue\n else:\n in_headings = False\n if line.startswith(\" \"):\n current_lines.append(line)\n elif re.match(r\"^[A-Z][A-Z a-z-]+: \", line):\n start_label(line.split(\":\")[0], line)\n elif line.startswith(\"Lectotype as designated\"):\n start_label(\"Lectotype\", line)\n elif line.startswith(\"Neotype as designated\"):\n start_label(\"Neotype\", line)\n elif line.startswith(\n (\n \"This specimen\",\n \"Type \",\n \"No type\",\n \"There are\",\n \"No additional\",\n \"All \",\n \"Subspecies of \",\n \"Neotype designated \",\n \"Padre Island\",\n )\n ):\n start_label(\"comments\", line)\n elif line.startswith(\n (\"Secondary junior\", \"Primary junior\", \"Junior primary\")\n ):\n start_label(\"homonymy\", line)\n elif re.match(r\"^[\\d/]+\\. \", line):\n start_label(line.split(\".\")[0], line)\n elif line.startswith(\"USNM\"):\n start_label(line.split(\".\")[0], line)\n elif (\n current_label not in (\"name\", \"verbatim_citation\", \"homonymy\")\n and \":\" not in line\n ):\n # new name\n if current_name is not None:\n assert current_label is not None\n current_name[current_label] = current_lines\n assert any(\n field in current_name\n for field in (\n \"Holotype\",\n \"Type Locality\",\n \"Lectotype\",\n \"Syntype\",\n \"Syntypes\",\n \"No name-bearing status\",\n \"Neotype\",\n )\n ), current_name\n yield current_name\n current_name = {\"pages\": [page]}\n current_label = \"name\"\n current_lines = [line]\n elif current_label == \"name\":\n if re.search(\n r\"\\d|\\b[A-Z][a-z]+\\.|\\baus\\b|\\bDas\\b|\\bPreliminary\\b|\\., \", line\n ):\n start_label(\"verbatim_citation\", line)\n else:\n # probably continuation of the author\n current_lines.append(line)\n elif (\n current_label == \"verbatim_citation\"\n or current_label == \"homonymy\"\n or line.startswith(\"= \")\n ):\n start_label(\"synonymy\", line)\n else:\n assert False, f\"{line!r} with label {current_label}\"\n assert current_label is not None\n assert current_name is not None\n current_name[current_label] = current_lines\n yield current_name", "def process_labels(ctx, tex, chapter):\n headings = ['chapter'] + ['sub'*i + 'section' for i in range(4)]\n reh = r'(' + '|'.join(headings) + r'){(.+?)}'\n environments = ['thm', 'lem', 'exc', 'figure', 'equation']\n ree = r'begin{(' + '|'.join(environments) + r')}'\n rel = r'(\\w+)label{(.+?)}'\n rel2 = r'label{(.+?)}'\n bigone = r'\\\\({})|\\\\({})|\\\\({})|\\\\(caption)|\\\\({})'.format(reh, ree, rel, rel2)\n rx = re.compile(bigone)\n\n sec_ctr = [chapter] + [0]*(len(headings))\n env_ctr = [0]*len(environments)\n blocks = catlist()\n lastlabel = None\n lastidx = 0\n m = rx.search(tex, lastidx)\n while m:\n blocks.append(tex[lastidx:m.start()])\n lastidx = m.start()\n cmd = next_command(tex, lastidx)\n lastidx = cmd.end\n if m.group(2):\n # This is a sectioning command (chapter, subsection,...)\n name = m.group(2)\n i = headings.index(name)\n if i == 0:\n env_ctr = [0]*len(env_ctr)\n sec_ctr[i:] = [sec_ctr[i]+1]+[0]*(len(headings)-i-1)\n number = \".\".join([str(x) for x in sec_ctr[:i+1]])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n blocks.append(\"<a id='{}'></a>\".format(idd))\n\n title = '{}&emsp;{}'.format(number, cmd.args[0])\n blocks.append(r'\\{}{{{}}}'.format(name, title))\n\n elif m.group(5):\n # This is an environment (thm, lem, ...)\n name = m.group(5)\n lastenv = name # save this for a caption command coming later...\n i = environments.index(name)\n env_ctr[i] += 1\n number = \"{}.{}\".format(sec_ctr[0], env_ctr[i])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n blocks.append(\"<a id='{}'></a>\".format(idd))\n\n if name in ctx.theoremlike_environments:\n nicename = ctx.named_entities[name]\n title = '{}&nbsp;{}'.format(nicename, number)\n blocks.append(r'\\begin{{{}}}[{}]'.format(name, title))\n else:\n blocks.append(r'\\begin{{{}}}'.format(name))\n\n elif m.group(6):\n # This is a labelling command (\\thmlabel, \\seclabel,...)\n label = \"{}:{}\".format(m.group(7), m.group(8))\n ctx.label_map[label] = (ctx.outputfile, lastlabel)\n\n elif m.group(9):\n # This is a caption command\n name = lastenv\n i = environments.index(name)\n number = \"{}.{}\".format(sec_ctr[0], env_ctr[i])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n nicename = ctx.named_entities[name]\n title = '<span class=\"title\">{}&nbsp;{}</span>'.format(nicename, number)\n text = '{}&emsp;{}'.format(title, cmd.args[0])\n blocks.append(r'\\caption{{{}}}'.format(text))\n\n elif m.group(10):\n # This is a \\label command, probably the target of a pageref\n idd = gen_unique_id()\n blocks.append(\"<a id={}></a>\".format(idd))\n ctx.label_map[m.group(11)] = (ctx.outputfile, idd)\n\n m = rx.search(tex, lastidx)\n blocks.append(tex[lastidx:])\n return \"\".join(blocks)", "def apply_layer(self, text_index):\n footnotes = []\n for label in self.layer.keys():\n if is_contained_in(label, text_index):\n footnotes += [x['footnote_data']\n for x in self.layer[label]\n if 'footnote_data' in x]\n return 'footnotes', sorted(footnotes, key=lambda x: x['ref'])", "def _process_layout(self, layout):\r\n # Here we just group text into paragraphs\r\n elements = []\r\n for lt_obj in layout:\r\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\r\n elements.append(Paragraph(lt_obj.get_text().strip()))\r\n elif isinstance(lt_obj, LTFigure):\r\n # Recursive...\r\n elements.extend(self._process_layout(lt_obj))\r\n return elements", "def reconstitute():\n with open(TEXT_FPATH, 'w') as txt:\n for jfpath in json_fpaths():\n with open(jfpath) as f:\n jstruct = json.load(f)\n\n for recipe in jstruct.keys():\n _reconstitute_recipe(txt, jstruct[recipe])", "def aligned_set(bitext):\n aligned = []\n for d in bitext:\n f_sent = d[\"fr\"] ## foreign sentence\n e_sent = d[\"en\"] ## English sentence\n fr = f_sent.split()\n en = e_sent.split()\n aligned.append(AlignedSent(fr,en))\n return aligned", "def convert_single_example(ex_index, example, label_list, max_seq_length,tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n text_a=example.text_a\n labels_a=[]\n text_a=re.split(\"(<[a-zA-Z]+>[^<>]+</[a-zA-Z]+>)\",text_a)\n tokens_a=[]\n for sub_text in text_a:\n if len(sub_text.strip())<1:\n continue\n elif re.search('<([a-zA-Z]+)>([^<>]+)<[/a-zA-Z]+>',sub_text):\n re_res=re.search('<([a-zA-Z]+)>([^<>]+)<[/a-zA-Z]+>',sub_text)\n slot_name=re_res.group(1)\n slot_value=re_res.group(2)\n slot_value=tokenizer.tokenize(slot_value)\n slot_labels=[]\n for i,s in enumerate(slot_value):\n if i==0:\n slot_labels.append(\"B_\"+slot_name)\n elif re.search(\"^##\",s):\n slot_labels.append(\"x\")\n else:\n slot_labels.append(\"M_\"+slot_name)\n tokens_a.extend(slot_value)\n labels_a.extend(slot_labels)\n else:\n sub_text=tokenizer.tokenize(sub_text)\n sub_labels=['x' if re.search(\"^##\",i) else 'o' for i in sub_text]\n tokens_a.extend(sub_text)\n labels_a.extend(sub_labels)\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n labels=[example.label]\n for label in labels_a:\n labels.append(label)\n labels.append('o')\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_ids)\n output_mask=[1 if i!='x' else 0 for i in labels]\n label_ids=[label_map[i] for i in labels]\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n segment_ids.append(0)\n input_mask.append(0)\n output_mask.append(0)\n label_ids.append(label_map['<PAD>'])\n assert len(input_ids)==max_seq_length\n assert len(segment_ids)==max_seq_length\n assert len(label_ids)==max_seq_length\n assert len(input_mask)==max_seq_length\n assert len(output_mask)==max_seq_length\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(tokens))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"labels: %s\" % \" \".join([str(x) for x in labels]))\n tf.logging.info(\"label_ids: %s\" % \" \".join([str(x) for x in label_ids]))\n tf.logging.info(\"output_mask: %s\" % \" \".join([str(x) for x in output_mask]))\n feature = InputFeatures(\n input_ids=input_ids,\n segment_ids=segment_ids,\n label_ids=label_ids,\n input_mask=input_mask,\n output_mask=output_mask)\n return feature", "def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label", "def Hydrate(df, txtPath, excludes=True):\n\n def HydrateText(docId, startOffset, endOffset, properties, txtPath, excludes):\n \n # Read in the text for the document (want to think about ways for improving performance)\n docText = ''\n text = ''\n \n # Check if file already has been read (written to tmp space)\n if os.path.exists('/tmp/' + docId):\n with io.open('/tmp/' + docId,'r',encoding='utf-8') as f:\n docText = f.read()\n else:\n try:\n with io.open(txtPath + docId,'r',encoding='utf-8') as f:\n docText = f.read()\n with io.open('/tmp/' + docId,'w',encoding='utf-8') as f:\n f.write(docText)\n except Exception as ex:\n print(ex)\n docText=\"\"\n \n # Return properties if docText was empty or 'text' is already defined in the properties\n if (docText == '') or ((properties != None) and ('text' in properties)):\n return properties\n else:\n if (excludes) and (properties != None) and ('excludes' in properties) and (len(properties['excludes']) > 0):\n excludes = []\n exToks = []\n for excludesEntry in properties['excludes'].split(\"|\"):\n toks = excludesEntry.split(\",\") \n excludes.append((int(toks[0]),toks[1],toks[2],int(toks[3]),int(toks[4])))\n excludes = list(set(excludes))\n for exclude in excludes:\n exToks.append((exclude[3],exclude[4]))\n exToks = list(set(exToks))\n exToks.sort(key=lambda tup: (tup[0], tup[1]))\n curOffset = startOffset\n for exTok in exToks:\n if exTok[0] <= curOffset:\n curOffset = exTok[1]\n else:\n text = text + docText[curOffset:exTok[0]]\n curOffset = exTok[1]\n if curOffset < endOffset:\n text = text + docText[curOffset:endOffset]\n \n else:\n text = docText[startOffset:endOffset]\n \n if properties != None:\n properties['text'] = text\n else:\n properties = {}\n properties['text'] = text\n return properties\n \n HydrateTextUDF = udf(HydrateText,MapType(StringType(),StringType()))\n\n hydratedf = df.sortWithinPartitions('docId') \\\n .withColumn('properties', HydrateTextUDF(col('docId'),col('startOffset'),col('endOffset'),col('properties'),lit(txtPath),lit(excludes)))\n return hydratedf", "def convert_lists(mkd, tab_level=0):\n\t\n\t#list all unordered list codes for current indent level\n\tmd_unordered_list_codes = re.findall(r\"^\\t{\"+str(tab_level)+\"}[\\*\\-\\+] .+(?:\\n^\\t{\"+str(tab_level)+\",}(?:[\\*\\-\\+]|[0-9]+\\.) .+)*\", mkd, re.M)\n\tfor md_code in md_unordered_list_codes:\n\t\t\n\t\t#add itemize begin/end block\n\t\ttex_code = \"\\\\begin{itemize}\\n\" + md_code + \"\\n\\end{itemize}\"\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\t\t#convert each element of list for current indent level\n\t\tmd_item_codes = re.findall(r\"^\\t{\"+str(tab_level)+\"}[\\*\\-\\+] .*$\", md_code, re.M)\n\t\tfor md_code in md_item_codes:\n\t\t\titem = re.findall(r\"^\\t{\"+str(tab_level)+\"}[\\*\\-\\+] (.*)$\", md_code, re.M)[0]\n\t\t\ttex_code = \"\\item \" + item\n\t\t\tmkd = mkd.replace(md_code, tex_code)\n\t\n\n\t#ordered list conversion works similar to unordered list conversion\n\tmd_ordered_list_codes = re.findall(r\"^\\t{\"+str(tab_level)+\"}[0-9]+\\. .+(?:\\n^\\t{\"+str(tab_level)+\",}(?:[\\*\\+\\-]|[0-9]+\\.) .+)*\", mkd, re.M)\n\tfor md_code in md_ordered_list_codes:\n\t\ttex_code = \"\\\\begin{enumerate}\\n\" + md_code + \"\\n\\end{enumerate}\"\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\t\tmd_item_codes = re.findall(r\"^\\t{\"+str(tab_level)+\"}[0-9]+\\. .*$\", md_code, re.M)\n\t\tfor md_code in md_item_codes:\n\t\t\titem = re.findall(r\"^\\t{\"+str(tab_level)+\"}[0-9]+\\. (.*)$\", md_code, re.M)[0]\n\t\t\ttex_code = \"\\item \" + item\n\t\t\tmkd = mkd.replace(md_code, tex_code)\n\n\n\tif md_unordered_list_codes or md_ordered_list_codes:\n\t\tmkd = convert_lists(mkd, tab_level+1)\n\t\n\treturn mkd", "def nom_struc_rebuilding(nom_struc):\n\n #init\n nominal_structure = ns = nn = []\n i = 0\n\n while i < len(nom_struc):\n\n if nom_struc[i]._quantifier == 'SOME' and (nom_struc[i].det == ['a'] or nom_struc[i].det == ['an']):\n nom_struc[i]._quantifier = 'ONE'\n\n #The first nominal group not preceded but 'and' if there is\n if nom_struc[i]._conjunction == 'AND' and i > 0:\n nominal_structure = nominal_structure + ['and']\n elif nom_struc[i]._conjunction == 'OR':\n nominal_structure = nominal_structure + ['or']\n elif nom_struc[i]._conjunction == 'BUT':\n nominal_structure = nominal_structure + ['but']\n\n #We recover the nominal group and his complement\n if nom_struc[i]._quantifier == 'SOME' or nom_struc[i]._quantifier == 'ALL' or nom_struc[\n i]._quantifier == 'ANY' or (nom_struc[i]._quantifier == 'DIGIT' and nom_struc[i].det != 'one'):\n #If there is a specific quantifier (plural)\n for n in ResourcePool().plural_nouns:\n if nom_struc[i].noun != [] and n[1] == nom_struc[i].noun[0]:\n nn = [n[0]]\n\n #If it is not a specific plural, we add 's'\n if nom_struc[i].noun != [] and nn == []:\n nn = [nom_struc[i].noun[0] + 's']\n\n #We reconver the other information \n nominal_structure = nominal_structure + nom_struc[i].det\n for z in nom_struc[i].adj:\n nominal_structure = nominal_structure + z[1] + [z[0]]\n nominal_structure = nominal_structure + nn\n\n #Re-init\n nn = []\n\n else:\n #if not plural\n nominal_structure = nominal_structure + nom_struc[i].det\n for z in nom_struc[i].adj:\n nominal_structure = nominal_structure + z[1] + [z[0]]\n nominal_structure = nominal_structure + nom_struc[i].noun\n\n #We recover noun complement\n if nom_struc[i].noun_cmpl:\n nominal_structure = nominal_structure + ['of']\n nominal_structure = nominal_structure + nom_struc_rebuilding(nom_struc[i].noun_cmpl)\n\n #We recover the relative\n for j in nom_struc[i].relative:\n if not j.sn:\n ns = [nom_struc[i]]\n\n nominal_structure = nominal_structure + [j.aim] + sentence_rebuilding.relative(j, ns)\n ns = []\n\n i += 1\n return nominal_structure", "def reformat():\n toolkit.reformat()", "def format_substitutions(subs: Union[SubstituteTerm, List[SubstituteTerm]]):\n text = \"\"\n if isinstance(subs, SubstituteTerm):\n term_str = str(subs)\n for line in term_str.split('\\n'):\n text += Markup.escape(line) + Markup('<br />')\n text += Markup('<br />')\n return text\n for term in subs:\n term_str = str(term)\n for line in term_str.split('\\n'):\n text += Markup.escape(line) + Markup('<br />')\n text += Markup('<br />')\n return text", "def surface_segment_data_preparation(word_dictionary: {str, str}):\n X = []\n Y = []\n words = []\n for word in word_dictionary:\n word_list = []\n word_label_list = []\n for i in range(len(word)):\n gram_dict = {}\n gram_arr = []\n\n ### Unigram\n # gram_dict[word[i]] = 1\n gram_dict[\"uni_\" + word[i]] = 1\n gram_arr.append(word[i])\n\n ### BIGRAM\n try:\n tmp = word[i - 1: i + 1]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 2:\n gram_dict[\"bi_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n try:\n tmp = word[i: i + 2]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 2:\n gram_dict[\"bi_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n ### TRIGRAM\n try:\n tmp = word[i - 1: i + 2]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 3:\n gram_dict[\"tri_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n ## FourGram\n try:\n tmp = word[i - 1: i + 3]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 4:\n gram_dict[\"four_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n try:\n tmp = word[i - 2: i + 2]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 4:\n gram_dict[\"four_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n ## FiveGram\n try:\n tmp = word[i - 2: i + 3]\n if tmp:\n # gram_dict[tmp] = 1\n if len(tmp) == 5:\n gram_dict[\"five_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n ## SixGram\n try:\n tmp = word[i - 3: i + 3]\n if tmp:\n if len(tmp) == 6:\n # gram_dict[tmp] = 1\n gram_dict[\"six_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n try:\n tmp = word[i - 2: i + 4]\n if tmp:\n if len(tmp) == 6:\n # gram_dict[tmp] = 1\n gram_dict[\"six_\" + tmp] = 1\n gram_arr.append(tmp)\n except IndexError:\n continue\n\n if word[i] in 'aeiou':\n gram_dict[\"vowel\"] = 1\n else:\n gram_dict[\"const\"] = 1\n\n if word[i].isupper():\n gram_dict[\"upper\"] = 1\n else:\n gram_dict[\"lower\"] = 1\n\n word_list.append(gram_dict)\n word_label_list.append(word_dictionary[word][i])\n\n X.append(word_list)\n Y.append(word_label_list)\n words.append([char for char in word])\n return X, Y, words", "def all_phrases(grammar, root):\n #\n # if root not in grammar:\n # return [[root]]\n #\n # phrases = []\n # for structure in grammar[root]:\n # for fragment in structure:\n # phrases = phrases + all_phrases(grammar,fragment)\n # print(phrases)\n # return phrases\n\n if root not in grammar:\n return [[root]]\n phrases = []\n for structure in grammar[root]:\n phrase_template = []\n for speech_part in structure:\n if speech_part not in grammar:\n if len(phrase_template)>0:\n new_phrase_template = []\n for phrase in phrase_template:\n if type(phrase)==str:\n phrase = [phrase]\n new_phrase_template.append(phrase+[speech_part])\n phrase_template = new_phrase_template\n else:\n phrase_template.append([speech_part])\n else:\n if len(phrase_template)>0:\n new_phrase_template = []\n for phrase in phrase_template:\n if type(phrase)==str:\n phrase = [phrase]\n for fragment in grammar[speech_part]:\n fragmented_bool = False\n for fragmented in fragment:\n if fragmented in grammar:\n fragmented_bool = True\n for subfragment in grammar[fragmented]:\n new_phrase_template.append(phrase+subfragment)\n if not fragmented_bool:\n new_phrase_template.append(phrase+fragment)\n phrase_template = new_phrase_template\n else:\n for fragment in grammar[speech_part]:\n if fragment[0] in grammar:\n for subfragment in grammar[fragment[0]]:\n phrase_template.append(subfragment)\n else:\n phrase_template.append(fragment)\n phrases = phrases + phrase_template\n return phrases", "def preprocess(self):\n for texgroup in self.textureGroups.itervalues():\n texgroup.dirty = True", "def __init__(self, annotated_text):\n self.tokens = ['ROOT'] # initially has only root element\n self.spans = [None]\n self.heads = [None] # root has no head element\n self.labels = [None] # root has no head element => no label\n\n span_to_index = {} # maps token spans to indexes\n root_indexes = [] # to store indexes of root elements\n\n # get token spans and values from the Texterra-annotated document\n for i, an in enumerate(annotated_text['annotations']['syntax-relation']):\n span = (an['start'], an['end'])\n self.spans.append(span)\n span_to_index[span] = i + 1\n self.tokens.append(annotated_text['text'][an['start']: an['end']])\n\n # iterate over the document again to set heads and labels\n for i, an in enumerate(annotated_text['annotations']['syntax-relation']):\n if 'parent' in an['value']:\n self.heads.append(span_to_index[(an['value']['parent']['start'], an['value']['parent']['end'])])\n self.labels.append(an['value']['type'])\n else:\n self.heads.append(0)\n self.labels.append('ROOT')\n root_indexes.append(i + 1)\n\n # stores dependency structure of the sentence in dict, with\n # root elements as key and their child elements as value.\n # child elements that have their own children are stored as dicts\n # where they serve as key and their children as value.\n self.tree = {}\n self._visited = [] # stores elements visited during tree's building process\n self.to_string = ''\n\n # iterate over root elements and build their subtrees\n for root_index in root_indexes:\n # get the root's span\n root_span = self.spans[root_index]\n\n # indicate the root as visited\n self._visited.append(root_index)\n\n # build the roots subtree\n sub_tree, sub_tree_string = self._build_tree(root_index)\n sub_tree_key = (root_span[0], root_span[1], self.tokens[root_index], 'ROOT')\n self.tree[sub_tree_key] = sub_tree\n\n # attach the subtrees string to the sentence's parse string\n if len(root_indexes) > 0 and not sub_tree_string.startswith('('):\n format_string = '({0}) '\n else:\n format_string = '{0} '\n self.to_string += format_string.format(sub_tree_string)", "def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n\n # Split in parts\n for line in text.splitlines():\n if line.startswith((\"# \", \"## \", \"### \", \"#### \", \"##### \")):\n # Finish pending lines\n parts.append(\"\\n\".join(lines))\n lines = []\n # Process header\n level = len(line.split(\" \")[0])\n title = line.split(\" \", 1)[1]\n title_short = title.split(\"(\")[0].split(\"<\")[0].strip().replace(\"`\", \"\")\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append(\"\\n\".join(lines))\n\n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + \"\\n\\n\"", "def sort_subgroups(new_document_list):\n for page in new_document_list:\n if page[0]:\n page[0][0] = sorted(page[0][0], key=lambda g: g['bbox'][1])\n if page[1]:\n if page[1][0]:\n page[1][0] = sorted(page[1][0], key=lambda g: g['bbox'][1])\n if len(page[1])>1:\n if page[1][1]:\n page[1][1] = sorted(page[1][1], key=lambda g: g['bbox'][1])\n if page[2]:\n if page[2][0]:\n page[2][0] = sorted(page[2][0], key=lambda g: g['bbox'][1])\n if len(page[2])>1:\n if page[2][1]:\n page[2][1] = sorted(page[2][1], key=lambda g: g['bbox'][1])\n if len(page[2])>2:\n if page[2][2]:\n page[2][2] = sorted(page[2][2], key=lambda g: g['bbox'][1])\n return new_document_list", "def merge_articles(docs_folder):\n\n s = \"\"\n \n for doc in os.listdir(docs_folder):\n try:\n with open(docs_folder + doc ,'r') as f:\n\n lines = f.readlines()\n raw_doc = \"\".join(txt for txt in lines)\n left_idx_headline = [ m.end(0) for m in re.finditer(r\"<HEADLINE>\",raw_doc)]\n right_idx_headline = [ m.start(0) for m in re.finditer(r\"</HEADLINE>\",raw_doc)]\n\n left_idx_text = [ m.end(0) for m in re.finditer(r\"<TEXT>\",raw_doc)]\n right_idx_text = [ m.start(0) for m in re.finditer(r\"</TEXT>\",raw_doc)]\n\n raw_headline = raw_doc[left_idx_headline[0]:right_idx_headline[0]]\n raw_text = raw_doc[left_idx_text[0]:right_idx_text[0]]\n\n left_idx_paragraph_headline = [ m.end(0) for m in re.finditer(r\"<P>\",raw_headline)]\n right_idx_paragraph_headline = [ m.start(0) for m in re.finditer(r\"</P>\",raw_headline)]\n\n left_idx_paragraph_text = [ m.end(0) for m in re.finditer(r\"<P>\",raw_text)]\n right_idx_paragraph_text = [ m.start(0) for m in re.finditer(r\"</P>\",raw_text)]\n\n for i in range(len(left_idx_paragraph_headline)):\n s += raw_headline[left_idx_paragraph_headline[i]:right_idx_paragraph_headline[i]-2] + \".\"\n\n for i in range(len(left_idx_paragraph_text)):\n s += raw_text[left_idx_paragraph_text[i]:right_idx_paragraph_text[i]-1]\n except:\n pass\n\n return s", "def _text_write_preprocess(self):\n self.check()\n\n max_name_len = np.max([len(name) for name in self.name])\n fieldtypes = [\"U\" + str(max_name_len), \"f8\", \"f8\"]\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n\n component_fieldnames = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n component_fieldnames.append(comp_name + \"_\" + frame_desc_str)\n fieldnames = [\"source_id\"] + component_fieldnames\n stokes_names = [\"I\", \"Q\", \"U\", \"V\"]\n fieldshapes = [()] * 3\n\n if self.stokes_error is not None:\n stokes_error_names = [(f\"{k}_error\") for k in [\"I\", \"Q\", \"U\", \"V\"]]\n\n n_stokes = 0\n stokes_keep = []\n for si, total in enumerate(np.nansum(self.stokes.to(\"Jy\"), axis=(1, 2))):\n if total > 0:\n fieldnames.append(stokes_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n if self.stokes_error is not None:\n fieldnames.append(stokes_error_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n n_stokes += 1\n stokes_keep.append(total > 0)\n\n assert n_stokes >= 1, \"No components with nonzero flux.\"\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n fieldnames.append(\"subband_frequency\")\n else:\n fieldnames.append(\"frequency\")\n fieldtypes.append(\"f8\")\n fieldshapes.extend([(self.Nfreqs,)])\n elif self.reference_frequency is not None:\n fieldnames.extend([(\"reference_frequency\")])\n fieldtypes.extend([\"f8\"])\n fieldshapes.extend([()] * n_stokes + [()])\n if self.spectral_index is not None:\n fieldnames.append(\"spectral_index\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_rise_lst\"):\n fieldnames.append(\"rise_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_set_lst\"):\n fieldnames.append(\"set_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n dt = np.dtype(list(zip(fieldnames, fieldtypes, fieldshapes)))\n\n arr = np.empty(self.Ncomponents, dtype=dt)\n arr[\"source_id\"] = self.name\n\n for comp_ind, comp in enumerate(comp_names):\n arr[component_fieldnames[comp_ind]] = getattr(self.skycoord, comp).deg\n\n for ii in range(4):\n if stokes_keep[ii]:\n arr[stokes_names[ii]] = self.stokes[ii].T.to(\"Jy\").value\n if self.stokes_error is not None:\n arr[stokes_error_names[ii]] = self.stokes_error[ii].T.to(\"Jy\").value\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n arr[\"subband_frequency\"] = self.freq_array.to(\"Hz\").value\n else:\n arr[\"frequency\"] = self.freq_array.to(\"Hz\").value\n elif self.reference_frequency is not None:\n arr[\"reference_frequency\"] = self.reference_frequency.to(\"Hz\").value\n if self.spectral_index is not None:\n arr[\"spectral_index\"] = self.spectral_index\n\n if hasattr(self, \"_rise_lst\"):\n arr[\"rise_lst\"] = self._rise_lst\n if hasattr(self, \"_set_lst\"):\n arr[\"set_lst\"] = self._set_lst\n\n return arr", "def transpose(text, transposer, start = 0, changes = None):\n tokenizer = ly.tokenize.MusicTokenizer()\n tokens = tokenizer.tokens(text)\n \n if changes is None:\n changes = ly.tokenize.ChangeList(text)\n \n class gen(object):\n \"\"\"\n Advanced generator of tokens, discarding whitespace and comments,\n and automatically detecting \\relative blocks and places where a new\n LilyPond parsing context is started, like \\score inside \\markup.\n \n It also handles transposition tasks that are the same in relative\n and absolute environments.\n \"\"\"\n def __init__(self):\n self.inSelection = not start\n \n def __iter__(self):\n return self\n \n def next(self):\n while True:\n token = next(tokens)\n if isinstance(token, (tokenizer.Space, tokenizer.Comment)):\n continue\n elif not self.inSelection and token.pos >= start:\n self.inSelection = True\n # Handle stuff that's the same in relative and absolute here\n if token == \"\\\\relative\":\n relative()\n elif isinstance(token, tokenizer.MarkupScore):\n absolute(consume())\n elif isinstance(token, tokenizer.ChordMode):\n chordmode()\n elif token == \"\\\\transposition\":\n next(source) # skip pitch\n elif token == \"\\\\transpose\":\n if self.inSelection:\n for token in next(source), next(source):\n if isinstance(token, tokenizer.Pitch):\n transpose(token)\n else:\n next(source), next(source)\n elif token == \"\\\\key\":\n token = next(source)\n if self.inSelection and isinstance(token, tokenizer.Pitch):\n transpose(token, 0)\n else:\n return token\n \n source = gen()\n \n def consume():\n \"\"\" Consume tokens till the level drops (we exit a construct). \"\"\"\n depth = tokenizer.depth()\n for token in source:\n yield token\n if tokenizer.depth() < depth:\n return\n \n def transpose(token, resetOctave = None):\n \"\"\" Transpose absolute pitch in token, must be tokenizer.Pitch \"\"\"\n p = Pitch.fromToken(token, tokenizer)\n if p:\n transposer.transpose(p)\n if resetOctave is not None:\n p.octave = resetOctave\n changes.replaceToken(token, p.output(tokenizer.language))\n \n def relative():\n \"\"\" Called when \\\\relative is encountered. \"\"\"\n def transposeRelative(token, lastPitch):\n \"\"\"\n Make a new relative pitch from token, if possible.\n Return the last pitch used (absolute, untransposed).\n \"\"\"\n p = Pitch.fromToken(token, tokenizer)\n if not p:\n return lastPitch\n # absolute pitch determined from untransposed pitch of lastPitch\n octaveCheck = p.octaveCheck is not None\n p.absolute(lastPitch)\n if source.inSelection:\n # we may change this pitch. Make it relative against the\n # transposed lastPitch.\n try:\n last = lastPitch.transposed\n except AttributeError:\n last = lastPitch\n # transpose a copy and store that in the transposed\n # attribute of lastPitch. Next time that is used for\n # making the next pitch relative correctly.\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy # store transposed copy in new lastPitch\n new = copy.relative(last)\n if octaveCheck:\n new.octaveCheck = copy.octave\n if relPitchToken:\n # we are allowed to change the pitch after the\n # \\relative command. lastPitch contains this pitch.\n lastPitch.octave += new.octave\n new.octave = 0\n changes.replaceToken(relPitchToken[0], lastPitch.output(tokenizer.language))\n del relPitchToken[:]\n changes.replaceToken(token, new.output(tokenizer.language))\n return p\n\n lastPitch = None\n relPitchToken = [] # we use a list so it can be changed from inside functions\n \n # find the pitch after the \\relative command\n token = next(source)\n if isinstance(token, tokenizer.Pitch):\n lastPitch = Pitch.fromToken(token, tokenizer)\n if lastPitch and source.inSelection:\n relPitchToken.append(token)\n token = next(source)\n if not lastPitch:\n lastPitch = Pitch.c1()\n \n # eat stuff like \\new Staff == \"bla\" \\new Voice \\notes etc.\n while True:\n if token in ('\\\\new', '\\\\context'):\n next(source) # skip context type\n token = next(source)\n if token == '=':\n next(source) # skip context name\n token = next(source)\n elif isinstance(token, tokenizer.NoteMode):\n token = next(source)\n else:\n break\n \n # now transpose the relative expression\n if isinstance(token, tokenizer.OpenDelimiter):\n # Handle full music expression { ... } or << ... >>\n for token in consume():\n if token == '\\\\octaveCheck':\n token = next(source)\n if isinstance(token, tokenizer.Pitch):\n p = Pitch.fromToken(token, tokenizer)\n if p:\n if source.inSelection:\n copy = p.copy()\n transposer.transpose(copy)\n p.transposed = copy\n changes.replaceToken(token, copy.output(tokenizer.language)) \n lastPitch = p\n del relPitchToken[:]\n elif isinstance(token, tokenizer.OpenChord):\n chord = [lastPitch]\n for token in source:\n if isinstance(token, tokenizer.CloseChord):\n lastPitch = chord[:2][-1] # same or first\n break\n elif isinstance(token, tokenizer.Pitch):\n chord.append(transposeRelative(token, chord[-1]))\n elif isinstance(token, tokenizer.Pitch):\n lastPitch = transposeRelative(token, lastPitch)\n elif isinstance(token, tokenizer.OpenChord):\n # Handle just one chord\n for token in source:\n if isinstance(token, tokenizer.CloseChord):\n break\n elif isinstance(token, tokenizer.Pitch):\n lastPitch = transposeRelative(token, lastPitch)\n elif isinstance(token, tokenizer.Pitch):\n # Handle just one pitch\n transposeRelative(token, lastPitch)\n \n def chordmode():\n \"\"\" Called inside \\\\chordmode or \\\\chords. \"\"\"\n for token in consume():\n if source.inSelection and isinstance(token, tokenizer.Pitch):\n transpose(token, 0)\n \n def absolute(tokens):\n \"\"\" Called when outside a possible \\\\relative environment. \"\"\"\n for token in tokens:\n if source.inSelection and isinstance(token, tokenizer.Pitch):\n transpose(token)\n \n # Do it!\n absolute(source)\n return changes", "def divide_subgroups(document_list):\n new_doc_list = []\n for page in document_list:\n if page[0]:\n page[0] = [page[0]]\n if page[1]:\n # print(page[1])\n subgroup1 = []\n subgroup2 = []\n for group in page[1]:\n if group['bbox'][0] < 40:\n subgroup1.append(group)\n else:\n subgroup2.append(group)\n # print(subgroup1)\n # print(subgroup2)\n if subgroup1:\n if subgroup2:\n col_group2 = [subgroup1, subgroup2]\n else:\n col_group2 = [subgroup1]\n else:\n col_group2 = [subgroup2]\n # print(col_group2)\n else:\n col_group2 = []\n if page[2]:\n # print(page[2])\n subgroup1 = []\n subgroup2 = []\n subgroup3 = []\n for group in page[2]:\n # print(\"here\")\n # print(group)\n if group['bbox'][0] < 20:\n subgroup1.append(group)\n elif group['bbox'][0] < 50:\n subgroup2.append(group)\n else:\n subgroup3.append(group)\n # print(subgroup1)\n # print(subgroup2)\n # print(subgroup3)\n if subgroup1:\n if subgroup2:\n if subgroup3:\n col_group3 = [subgroup1, subgroup2, subgroup3]\n else:\n col_group3 = [subgroup1, subgroup2]\n else:\n if subgroup3:\n col_group3 = [subgroup1, subgroup3]\n else:\n col_group3 = [subgroup1]\n else:\n if subgroup2:\n if subgroup3:\n col_group3 = [subgroup2, subgroup3]\n else:\n col_group3 = [subgroup2]\n else:\n if subgroup3:\n col_group3 = [subgroup3]\n else:\n col_group3 = []\n # print(col_group3)\n else:\n col_group3 = []\n # print(page[0])\n # print(page[1])\n # print(page[2])\n # print(\"\\n\\n\")\n new_doc_list.append([page[0], col_group2, col_group3])\n return new_doc_list", "def preprocess(text):\n text_index = list(range(0, len(text), SEG_SIZE))\n segments = [\n text[i: i + SEG_SIZE] + PLACEHOLDER_TEMPLATE.format(PLACEHOLDER, i//SEG_SIZE)\n for i in text_index\n ]\n text_index.append(len(text))\n return text_index[1:], ''.join(segments)", "def make_trigrams(texts,trigram_mod,bigram_mod):\n return [trigram_mod[bigram_mod[doc]] for doc in texts]", "def _from_etree_to_tree(self, lang='en-US'):\n #clear existing tree\n# for i in self.tree.get_children():\n# self.tree.delete(i)\n self.tree.delete(*self.tree.get_children())\n #now insert old tree\n for category in self.trout:\n tagged = category.get('tags')\n if tagged is None:\n tagged = \"('{}',)\".format(category.tag)\n if tagged[-1] == ')':\n inserttext = tagged[2:3].upper() + tagged[3:tagged.find(')')-2]\n else:\n inserttext = tagged[1:2].upper() + tagged[2:-1]\n #messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(lang, inserttext))\n thiscategory = self.tree.insert('', 'end', iid=inserttext.lower(), values=['', ''], \\\n text=LOCALIZED_TEXT[lang][inserttext], tags=\"{}\".format(inserttext.lower()))\n for term in category:\n values = eval(term.get('values'))\n tags = term.get('tags')\n# messagebox.showwarning('_from_etree_to_tree', \"{}, {}\".format(values, tags))\n thisterm = self.tree.insert(thiscategory, 'end')\n self.tree.item(thisterm, tags=term.get('tags'))\n self.tree.item(thisterm, text=term.text)\n self.tree.item(thisterm, values=[str(values[0]), str(values[1])])\n# tags=term.get('tags'))\n for rendering in term:\n thisrendering = self.tree.insert(thisterm, 'end', \\\n text=rendering.text, values=term.get('values'), \\\n tags=rendering.get('tags'))\n self.tree.tag_configure('approved', background='palegreen')\n self.tree.tag_configure('conflict', background='bisque')\n self.tree.tag_configure('suggestions', background='lightblue')\n self.tree.tag_configure('unknown', background='whitesmoke')\n self.tree.tag_configure('cldr', background='violet')\n self.tree.update() \n pass", "def analysis_to_subword_dicts(ana):\n return map(pairs_to_dict, chunk_subwords(analysis_to_pairs(ana)))", "def run(self, tree):\r\n self.stashed_nodes = {}\r\n\r\n stack = [tree]\r\n\r\n while stack:\r\n currElement = stack.pop()\r\n insertQueue = []\r\n for child in currElement.getchildren():\r\n if child.text and not isinstance(child.text, util.AtomicString):\r\n text = child.text\r\n child.text = None\r\n lst = self.__processPlaceholders(self.__handleInline(\r\n text), child)\r\n stack += lst\r\n insertQueue.append((child, lst))\r\n if child.tail:\r\n tail = self.__handleInline(child.tail)\r\n dumby = util.etree.Element('d')\r\n tailResult = self.__processPlaceholders(tail, dumby)\r\n if dumby.text:\r\n child.tail = dumby.text\r\n else:\r\n child.tail = None\r\n pos = currElement.getchildren().index(child) + 1\r\n tailResult.reverse()\r\n for newChild in tailResult:\r\n currElement.insert(pos, newChild)\r\n if child.getchildren():\r\n stack.append(child)\r\n\r\n if self.markdown.enable_attributes:\r\n for element, lst in insertQueue:\r\n if element.text:\r\n element.text = \\\r\n inlinepatterns.handleAttributes(element.text, \r\n element)\r\n i = 0\r\n for newChild in lst:\r\n # Processing attributes\r\n if newChild.tail:\r\n newChild.tail = \\\r\n inlinepatterns.handleAttributes(newChild.tail,\r\n element)\r\n if newChild.text:\r\n newChild.text = \\\r\n inlinepatterns.handleAttributes(newChild.text,\r\n newChild)\r\n element.insert(i, newChild)\r\n i += 1\r\n return tree", "def split_full_text(self, full_text, headers_list):\n\n sectioned_text = {}\n indices = {}\n no_abstr = False\n\n for i, hd in enumerate(headers_list):\n #need to replace special regex characters before matching substrings\n if '(' in hd:\n hd = hd.replace('(', '\\(')\n\n if ')' in hd:\n hd = hd.replace(')', '\\)')\n\n if '[' in hd:\n hd = hd.replace('[', '\\[')\n\n if ']' in hd:\n hd = hd.replace(']', '\\]')\n\n if '{' in hd:\n hd = hd.replace('{', '\\{')\n\n if '}' in hd:\n hd = hd.replace('}', '\\}')\n\n if '+' in hd:\n hd = hd.replace('+', '\\+')\n\n if '*' in hd:\n hd = hd.replace('*', '\\*')\n\n if ':' in hd:\n hd = hd.replace(':', '\\:')\n\n if i == 0: # meta-data has no substring-matching to do\n\n inds = [m.start() for m in re.finditer(hd, full_text)]\n #Abstract can appear in text, but isn't listed w/ headers\n #Only use first instance\n if len(inds) > 0:\n indices[hd] = inds[0]\n\n else: #if there is no abstract, use figures to remove meta-data\n fig_text = [m.start() for m in re.finditer('Figure', full_text)]\n indices[hd] = fig_text[0]\n no_abstr = True\n\n else:\n inds = [m.start() for m in re.finditer(hd, full_text)]\n #assume final instance of substring match corresponds\n #to the correct header text instance\n indices[hd] = inds[-1]\n\n\n for i, hd in enumerate(headers_list):\n\n if i == 0:\n if no_abstr == True:\n\n #get meta-data, which has no keyword matching\n sectioned_text['Section Headers'] = headers_list\n end_ind = indices[' Abstract ']\n sectioned_text['Meta-data'] = full_text[:end_ind]\n\n #indicate there is no abstract\n start_id = indices[' Abstract ']\n end_id = indices[list(indices.keys())[1]]\n sectioned_text[' Abstract '] = ''\n\n\n if no_abstr == False:\n #get meta-data, which has no keyword matching\n sectioned_text['Section Headers'] = headers_list\n end_ind = indices[' Abstract ']\n sectioned_text['Meta-data'] = full_text[:end_ind]\n\n #get abstract\n start_id = indices[' Abstract ']\n end_id = indices[list(indices.keys())[1]]\n sectioned_text[hd] = full_text[start_id : end_id]\n\n if i > 0 and i < len(headers_list)-1: #all setions but final section\n if i == 1:\n if no_abstr == True:\n start_id = indices[' Abstract ']\n end_id = indices[list(indices.keys())[i+1]]\n sectioned_text[hd] = full_text[start_id:end_id]\n\n else:\n start_id = indices[list(indices.keys())[i]]\n end_id = indices[list(indices.keys())[i+1]]\n sectioned_text[hd] = full_text[start_id:end_id]\n\n else:\n start_id = indices[list(indices.keys())[i]]\n end_id = indices[list(indices.keys())[i+1]]\n sectioned_text[hd] = full_text[start_id:end_id]\n\n if i == len(headers_list) - 1: #final header\n start_id = indices[list(indices.keys())[i]]\n sectioned_text[hd] = full_text[start_id:]\n\n return sectioned_text", "def formatTexts(owned, shared):\n owned_texts = []\n shared_texts = []\n # Catches error if there is no score from the databse search\n try:\n for text in range(len(owned)):\n owned_texts.append(\n {'title': owned[text][0], 'body': owned[text][1], 'score': owned[text][2]})\n for text in range(len(shared)):\n shared_texts.append(\n {'title': shared[text][0], 'body': shared[text][1], 'score': shared[text][2]})\n except:\n for text in range(len(owned)):\n owned_texts.append(\n {'title': owned[text][0], 'body': owned[text][1]})\n for text in range(len(shared)):\n shared_texts.append(\n {'title': shared[text][0], 'body': shared[text][1]})\n # Adds False if the either of the text arrays are empty\n if len(owned_texts) == 0:\n owned_texts.append(False)\n if len(shared_texts) == 0:\n shared_texts.append(False)\n return owned_texts, shared_texts", "def formatted_alignments(self,chosen_a_idxs,bitext,alignments,e_words,f_words):\n output =[]\n output_idxs = []\n for key in chosen_a_idxs.keys():\n temp = []\n temp_idx = []\n idx = chosen_a_idxs[key]\n alignment = alignments[idx]\n for t in alignment:\n temp.append((e_words[t[0]],f_words[t[1]]))\n temp_idx.append((bitext[key][\"en\"].index(e_words[t[0]]),bitext[key][\"fr\"].index(f_words[t[1]])))\n output.append(temp)\n output_idxs.append(temp_idx)\n return output,output_idxs", "def setTexts(self, text):\n for i in self.textItems:\n try:\n i.scene().removeItem(i)\n except Exception as e:\n pass\n self.textItems = []\n for t in text:\n item = TextItem(t)\n item.setColor([255, 255, 255])\n self.textItems.append(item)\n item.setParentItem(self)", "def notes_to_annotations(self):\n\n for sub_dir, text_name, file_names in anafora.walk(self.xml_dir, self.xml_regex):\n note_path = os.path.join(self.text_dir, text_name)\n xml_path = os.path.join(self.xml_dir, sub_dir, file_names[0])\n ref_data = anafora.AnaforaData.from_file(xml_path)\n\n # collect (annot_start, annot_end, annot_id) tuples\n add_annotations(self.note2times[note_path], ref_data, 'TIMEX3')\n add_annotations(self.note2times[note_path], ref_data, 'SECTIONTIME')\n add_annotations(self.note2times[note_path], ref_data, 'DOCTIME')\n add_annotations(self.note2events[note_path], ref_data, 'EVENT')\n\n # collect (src spans, targ spans, src id, targ id) tuples\n for rel in ref_data.annotations.select_type('TLINK'):\n src = rel.properties['Source']\n targ = rel.properties['Target']\n label = rel.properties['Type']\n if label == 'CONTAINS':\n src_start, src_end = src.spans[0]\n targ_start, targ_end = targ.spans[0]\n self.note2rels[note_path].append(\n (src_start, src_end, targ_start, targ_end, src.id, targ.id))\n\n # sort relation tuples by src arguments' offsets\n # self.note2rels[note_path].sort(key=lambda t: t[0])", "def transform(self):\n count=1\n assert len(self.list_folder)>=1 ,\"FILES NOT FOUND\"\n for i,folder in enumerate(self.list_folder):\n path=folder\n for j,pdf in enumerate(os.listdir(path)):\n if pdf!= '.DS_Store':\n self.df.loc[count] = [pdf,folder.split('/')[-2], i+1,None,None]\n \n \"\"\" 0- Read Pdf file \"\"\"\n raw = parser.from_file(os.path.join(path,pdf))\n s = raw['content']\n \n \"\"\" 1- Handle linebreaks to optimize TextBlob.sentences results\"\"\"\n s=self.treat_new_line(s)\n \n \"\"\" 2- Divide text by sentences using TextBlob\"\"\"\n blob=TextBlob(s)\n paragraphs = np.array([str(s) for s in blob.sentences],dtype=str)\n self.parser = []\n self.parser_raw=[]\n p=self.text_processor_pdf(paragraphs)\n \n \"\"\"\n 3- Get rid of bad text data:\n Discard sentences with too long word (16 is the 99% quantile in english)\n Discard sentences with too much upper words (CREDENTIALS, Link, TITLE ..)\n \"\"\"\n index_=[i for i,c in enumerate(self.parser) if (True in [len(w)>=16 for w in c.split()] )]\n index_raw=[i for i,c in enumerate(self.parser_raw) if np.sum([w==w.upper() for w in c.split()])>=4]\n index=list(set(index_ + index_raw))\n self.df.loc[count,'paragraphs']=np.delete(np.array(self.parser),index)\n self.df.loc[count,'raw paragraphs']=np.delete(np.array(self.parser_raw),index)\n count+=1\n \n print(\"files from {} succesfully converted \".format(folder))\n \n return self.df", "def __syntax(self, text):\n s = pattern.en.parsetree(text, relations = True, lemmata = True)\n text_chunks = []\n for sentence in s:\n out = \"\"\n for chunk in sentence.chunks:\n out += str(chunk.type)\n text_chunks.append(out)\n text_chunks_out = [\" \".join(text_chunks)]\n return (self.chunk_vectorizer.transform(text_chunks_out),)", "def __processElementText(self, node, subnode, isText=True):\r\n if isText:\r\n text = subnode.text\r\n subnode.text = None\r\n else:\r\n text = subnode.tail\r\n subnode.tail = None\r\n\r\n childResult = self.__processPlaceholders(text, subnode)\r\n\r\n if not isText and node is not subnode:\r\n pos = node.getchildren().index(subnode)\r\n node.remove(subnode)\r\n else:\r\n pos = 0\r\n\r\n childResult.reverse()\r\n for newChild in childResult:\r\n node.insert(pos, newChild)", "def preprocess_text(self, seq):\n if self.text_preprocess_fn:\n seq = list(map(self.text_preprocess_fn, seq))\n return seq", "def build_latex(file_list):\n eingabe=[]\n anhang_count=0\n anhaenge=[]\n anhaenge_file=[]\n for file in file_list:\n x=load_file(file)[1]\n eingabe.append(\"\\section{%s}\" %(x[2]))\n eingabe.append(\"\\subsection{Infos}\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}lX}\")\n eingabe.append(r\"\\textbf{Datum} & %s\\\\\" %(x[0]))\n eingabe.append(r\"\\textbf{Gremium} & %s\\\\\" %(x[1]))\n eingabe.append(r\"\\textbf{Anatrag/Beschluss wurde} & %s\\\\\" %(x[9]))\n x[11]=x[11].replace(\" \",\"\")\n kw=x[11].split(\",\")\n for i in range(0,len(kw)):\n if i==0:\n eingabe.append(r\"\\textbf{Keyword:} & %s\\\\\" %(kw[i]))\n else:\n eingabe.append(r\" & %s\\\\\" %(kw[i]))\n eingabe.append(\"\\end{tabularx}\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[6],x[7],x[8]))\n eingabe.append(\"\\end{tabularx}\")\n eingabe.append(\"\\subsection{Antrags/Beschlusstext}\")\n line_text=len(eingabe)\n eingabe.append(x[3])\n eingabe.append(\"\\subsection{Begründung}\")\n eingabe.append(x[4])\n if x[23]==\"Ja\" and x[24]!=\"\":\n delta=7\n anzahl=int((len(x)-23)/delta)\n if anzahl==1:\n eingabe.append(\"\\subsection{Änderungsantrag}\")\n eingabe.append(\"\\subsubsection*{Vorschlag}\")\n eingabe.append(x[24])\n eingabe.append(\"\\subsubsection*{Begründung}\")\n eingabe.append(x[25]+\"\\\\vspace{1.5ex} \\\\\\\\\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[26],x[27],x[28]))\n eingabe.append(r\"\\multicolumn{2}{@{}l}{\\textbf{Änderungsantrag wurde:}} & %s \\\\\" %(x[29]))\n eingabe.append(\"\\\\end{tabularx}\")\n else:\n eingabe.append(\"\\subsection{Änderungsanträge}\")\n for i in range(0,anzahl):\n eingabe.append(\"\\subsubsection{Änderungsvorschlag %s}\" %(i+1))\n eingabe.append(\"\\\\paragraph*{Vorschlag}\")\n eingabe.append(x[24+(delta*i)])\n eingabe.append(\"\\\\paragraph*{Begründung}\")\n eingabe.append(x[25+(delta*i)]+\"\\\\vspace{1.5ex} \\\\\\\\\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[26+(delta*i)],x[27+(delta*i)],x[28+(delta*i)]))\n eingabe.append(r\"\\multicolumn{2}{@{}l}{\\textbf{Änderungsantrag wurde:}} & %s \\\\\" %(x[29+(delta*i)]))\n eingabe.append(\"\\\\end{tabularx}\")\n if x[10]!=\"\":\n anhang=x[10].split(\",\")\n bennenung=x[11].split(\",\")\n eingabe[line_text]=eingabe[line_text]+\"\\\\\\\\ \\n Dieser Antrag enthält %s Anhänge: \" %(len(anhang))\n for i in range(0,len(anhang)):\n anhang_count=anhang_count+1\n anhaenge.append(\"\\section{%s - %s} \\label{An:%s}\" % (x[2],bennenung[i],str(anhang_count)))\n anhaenge.append(\"\\includepdf[pages=-]{%s}\" %(anhang[i]))\n anhaenge_file.append(anhang[i])\n if i!=len(anhang)-1:\n eingabe[line_text]=eingabe[line_text]+\"\\\\nameref{An:%s}, \" % (str(anhang_count))\n else:\n eingabe[line_text]=eingabe[line_text]+\"\\\\nameref{An:%s} \" % (str(anhang_count)) \n \n eingabe.append(\"\\\\newpage\") \n eingabe.append(\"\\\\appendix\") \n eingabe.append(\"\\\\pagenumbering{Roman}\") \n ausgabe=\"\"\n for i in range(0,len(eingabe)):\n ausgabe=ausgabe+eingabe[i]+\"\\n\"\n \n for i in range(0,len(anhaenge)):\n ausgabe=ausgabe+anhaenge[i]+\"\\n\"\n \n return ausgabe,anhaenge_file", "def tidy_text(self, text: List[str],\n ) -> List[str]:\n \n if self.case_fold:\n text = list(map(lambda t: t.lower(), text))\n \n if self.special_syms:\n text = list(map(self.handle_special_symbols, text))\n \n text = list(map(self.handle_kw_phrases, text))\n text = list(map(self.depluralise_keywords, text))\n\n if self.tokenise:\n text = list(map(word_tokenize, text))\n \n if self.stem:\n text = list(map(self.stemming, text))\n \n if self.lemmatise:\n text = list(map(self.lemmatisation, text))\n \n if self.del_stop_words:\n stop_words = subsample_frequent_words(text, set(self.target_words))\n text = list(map(\n lambda t: self.handle_stop_words(t, stop_words), text\n ))\n \n return text", "def __PerformSubstitutions(self, text):\n\n for substitution in self.substitutions:\n pattern, replacement = self.SplitValue(substitution)\n text = re.compile(pattern,re.M).sub(replacement, text)\n return text", "def gt_multi_txt(path, bboxes): \n \n W, H = Image.open(path).size\n\n lines_out=[]\n for obj_info in bboxes:\n label = 0 #obj_info['name']\n xmin, ymin, xmax, ymax = obj_info['bbox']\n\n cx = '%.3f' % np.clip(((xmax+xmin)/2)/W,0,1)\n cy = '%.3f' % np.clip(((ymax+ymin)/2)/H,0,1)\n w = '%.3f' % np.clip((xmax-xmin)/W ,0,1)\n h = '%.3f' % np.clip((ymax-ymin)/H ,0,1)\n\n lines_out.append(' '.join([str(label),cx,cy,w,h,'\\n']))\n\n return lines_out", "def restore_item_texts(self, arrays=None):\n if not arrays: arrays = self.masks()\n for a in arrays:\n sources = self.sources(a)\n for tk, ed in product(self.valid_tks, [None, 'x', 'y']):\n if (any(self.text(i, True, tk, ed)==self.text(i, False, tk, ed)\n for i in sources) and self.text(a, text_key=tk, axis_edit=ed)):\n rename_items = {self.item_no(i): self.text(i, True, tk, ed)\n for i in sources if self.text(i, True, tk, ed)}\n self.set_item_texts(a, rename_items, tk, ed)\n elif not any(self.text(i, True, tk, ed) in self.text(i, False, tk, ed)\n for i in sources if self.text(i, False, tk, ed)) and self.text(a, text_key=tk, axis_edit=ed):\n rename_items = {self.item_no(i): self.text(i, True, tk, ed)\n for i in sources if self.text(i, True, tk, ed)}\n self.set_item_texts(a, rename_items, tk, ed)\n return None", "def textree(self):\n\t\ttextree = '\\\\begin{dependency}[theme=simple]\\n\\\\begin{deptext}[column sep=.5cm, row sep=.1ex]\\n'\n\t\tsentence = self.reconstruct_sentence()\n\t\ts = '\\\\&'.join(sentence[1:])+'\\\\\\\\\\n'\n\t\tn = '\\\\&'.join(map(str,range(len(sentence)))) + '\\\\\\\\\\n'\n\t\ttextree = textree + s + n +'\\\\end{deptext}\\n'\n\t\ttextree = textree + '\\\\deproot{%s}{}\\n' % str(self.head_pos)\n\t\tfor head in self.deps:\n\t\t\tfor dependent in self.deps[head]:\n\t\t\t\ttextree = textree + '\\\\depedge{%s}{%s}{%s}\\n' % (head, dependent[0], dependent[1])\n\t\ttextree = textree + '\\\\end{dependency}'\n\t\treturn textree", "def separate_body_and_ref(TexRoot, build_and_clean=True, recursive=0):\n _STRING_TO_BE_ADDED = \"\"\"\n\\\\newpage\n\\\\AtBeginShipout{%\n\\\\AtBeginShipoutDiscard\n}\n \"\"\"\n _BIB_STYLE_CMD = '\\\\bibliographystyle{'\n _BIB_FILE_CMD = '\\\\bibliography{'\n _BODY_START_CMD = '\\\\begin{document}'\n _BODY_END_CMD = '\\\\end{document}'\n _BBL_INSERT_CMD = '\\\\input{{{bbl}.bbl}}'\n _MAKE_TITLE_CMD = '\\\\maketitle'\n\n filename, file_extension = os.path.splitext(TexRoot)\n body_file = filename + \"-body\" + file_extension\n ref_file = filename + \"-ref\" + file_extension\n\n # create file objects for body file and reference file\n body_fp = open(body_file, 'w')\n ref_fp = open(ref_file, 'w')\n\n flag = False\n in_header = True\n with open(TexRoot, 'r') as fp:\n for line in fp:\n # remove leading blank space\n line = line.strip()\n if (not flag) and (line.startswith(_BIB_STYLE_CMD) or line.startswith(_BIB_FILE_CMD)):\n if not in_header:\n # sometimes bib style is put in the header\n body_fp.write(_STRING_TO_BE_ADDED + os.linesep)\n flag = True\n\n if line.startswith(_BODY_START_CMD):\n in_header = False\n\n if in_header and (not line.startswith(_MAKE_TITLE_CMD)):\n ref_fp.write(line + os.linesep)\n\n if line.startswith(_BIB_STYLE_CMD):\n ref_fp.write(line + os.linesep)\n\n body_fp.write(line + os.linesep)\n if line.startswith(_BIB_FILE_CMD):\n bib_files = re.findall(r'\\\\bibliography{(.*)}', line)\n if len(bib_files) == 0:\n print \"No bib file is found\"\n exit(1)\n\n # bbl_lists = get_bib(bib_files[0])\n ref_fp.write(_BODY_START_CMD + os.linesep)\n ref_fp.write(_BBL_INSERT_CMD.format(bbl=os.path.splitext(body_file)[0]) + os.linesep)\n # for bbl in bbl_lists:\n # ref_fp.write(_BBL_INSERT_CMD.format(bbl=bbl) + os.linesep)\n ref_fp.write(_BODY_END_CMD + os.linesep)\n\n body_fp.close()\n ref_fp.close()\n\n if build_and_clean:\n build([body_file, ref_file])\n clean(recursive)", "def parse_obj(lt_objs,content):\n\n # loop over the object list\n\n\n for obj in lt_objs:\n\n # if it's a textbox, print text and location\n if isinstance(obj, pdfminer.layout.LTRect):\n content[0].append(int(obj.x0))\n content[0].append(int(obj.x1))\n content[1].append(int(obj.y1))\n content[1].append(int(obj.y0))", "def create_clean_content(self):\n clean_content = []\n searched_unique_single_word = []\n un_searched_unique_single_word = []\n searched_pair_word = []\n searched_unique_single_word_synonym = []\n\n for i in range(self.subhead.shape[0]):\n clean_content.append(\n self.filter_stop_words(self.subhead.Col7[i], self.stop_words)\n )\n searched_unique_single_word.append(\n self.get_searched_unique_single_word(\n self.subhead.Col7[i], self.stop_words\n )\n )\n un_searched_unique_single_word.append(\n self.get_un_searched_single_word(self.subhead.Col7[i], self.stop_words)\n )\n searched_pair_word.append(\n self.get_searched_paired_word(self.subhead.Col7[i], self.stop_words)\n )\n searched_unique_single_word_synonym.append(\n self.get_searched_single_word_synonym(\n self.subhead.Col7[i], self.stop_words\n )\n )\n\n self.subhead[\"clean_content\"] = clean_content\n self.subhead[\"searched_unique_single_word\"] = searched_unique_single_word\n self.subhead[\"un_searched_unique_single_word\"] = un_searched_unique_single_word\n self.subhead[\"searched_pair_word\"] = searched_pair_word\n self.subhead[\n \"searched_unique_single_word_synonym\"\n ] = searched_unique_single_word_synonym", "def render_text_surfaces(self):\n self.images = [] # The text surfaces.\n line_width = 0\n line = []\n space_width = self.font.size(' ')[0]\n\n # Put the words one after the other into a list if they still\n # fit on the same line, otherwise render the line and append\n # the resulting surface to the self.images list.\n for word in self.text:\n line_width += self.font.size(word)[0] + space_width\n # Render a line if the line width is greater than the rect width.\n if line_width > self.rect.w:\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)\n line = []\n line_width = self.font.size(word)[0] + space_width\n\n line.append(word)\n\n # Need to render the last line as well.\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)", "def _get_and_build_text_structure(self):\n return Text_structure(self.filename, self)", "def _spacyfy(\n matches: List[List[Optional[Tuple[str, str]]]], pattern: List[Dict[str, Any]]\n) -> List[List[Dict[str, Any]]]:\n new_patterns = []\n if matches:\n for match in matches:\n new_pattern = deepcopy(pattern)\n for i, token in enumerate(match):\n if token:\n del new_pattern[i][token[0]]\n new_pattern[i][\"TEXT\"] = token[1]\n new_patterns.append(new_pattern)\n return new_patterns", "def reconstruct_object_multiple(path):\n if len(path) == 0:\n return '%s'\n elif len(path) == 2:\n return '{{%s: [{recons}]}}'.format(recons=reconstruct_object_multiple(path[1:]))\n else:\n # The indexed query on `path` below is the means by which we recurse\n # Every iteration pushes it closer to a length of 0 and, thus, bottoming out\n # This function differs from the singular reconstruction in that the final object\n # gets wrapped in a list (when length is 2, there should be a key and a value left)\n return '{{%s: {recons}}}'.format(recons=reconstruct_object_multiple(path[1:]))", "def preprocess(doc_in, doc_out):\n def output(text, doc_id):\n doc_out.write(doc_id + \"\\n\")\n doc_out.write(text.replace(\"\\n\", \" \") + \"\\n\\n\")\n\n def filter_text(t):\n filtered_out = [\"<P>\", \"</P>\"]\n r = t\n for f in filtered_out:\n r = r.replace(f, \" \")\n return r\n\n\n doc_id = None\n reading_text = False\n text = \"\"\n for line in doc_in:\n if(str_text_start in line):\n if(reading_text):\n warning(\"Found \" + str_text_start + \" in text\")\n if(not doc_id):\n warning(\"Reading text without knowing id\")\n continue\n reading_text = True\n continue\n if((str_text_stop in line) and reading_text):\n output(text, doc_id)\n text = \"\"\n reading_text = False\n doc_id = None\n doc_id_match = pat_doc_no.match(line)\n if(doc_id_match):\n doc_id = doc_id_match.group(1)\n if(reading_text):\n warning(\"Found doc id in text\")\n continue\n if(reading_text):\n text = text + filter_text(line)", "def load2TexAS(data):\n # State global variable\n global cache_stanza, cache_spacy, cache_udpipe, cache_trankit\n\n # Collect the data\n string = data['text']\n lang = data['lang']\n packages = data['packages']\n\n hash_string = hashlib.sha1(string.encode()).hexdigest()\n\n final_HTML = \"\"\n message_HTML = \"<div class=\\'message\\'>\"\n isMessage = False\n header_input = []\n log_row = [datetime.now().strftime('%Y-%m-%d %H:%M:%S'), lang]\n\n if \"stanza\" in packages:\n # Initialize the TexAS document\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n\n ## If cache is full, reload the cache.\n if cache.count(cache_stanza) > 100:\n cache.write(cache_stanza, \"stanza\")\n cache_stanza = cache.load(\"stanza\")\n \n ## Check text whether is already in cache\n if hash_string in cache_stanza[lang].keys():\n tokens, end_pos, lemma, pos, nlpWordsList, hasCompoundWords, cache_stanza = cache.read(\"stanza\", cache_stanza, lang, string) #The output cache_stanza has 'count' been updated.\n else:\n tokens, end_pos, lemma, pos, nlpWordsList, hasCompoundWords, cache_stanza = cache.add(\"stanza\", cache_stanza, lang, string, get_services_stanza)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"stanza\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"stanza\" + \"-\" + lang)\n mydoc.setSentenceList(end_pos)\n\n if hasCompoundWords:\n mydoc.addTokenView( \"WORDS\", nlpWordsList )\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n if hasCompoundWords:\n myTabView.showView(\"WORDS\")\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"Stanza\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>Stanza</div> <br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"stanza\")\n \n else:\n log_row.append(\"\")\n\n if \"spacy\" in packages:\n # SpaCy does not support Arabic and Russian\n if lang == 'ara' or lang == 'rus':\n message_HTML += \"SpaCy does not support Arabic or Russian. <br>\"\n isMessage = True\n\n else:\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n \n ## If cache is full, reload the cache.\n if cache.count(cache_spacy) > 100:\n cache.write(cache_spacy, \"spacy\")\n cache_spacy = cache.load(\"spacy\")\n \n ## Check text whether is already in cache\n if hash_string in cache_spacy[lang].keys():\n tokens, end_pos, lemma, pos, cache_spacy = cache.read(\"spacy\", cache_spacy, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_spacy = cache.add(\"spacy\", cache_spacy, lang, string, get_services_spacy)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"spacy\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"spacy\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"SpaCy\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>\" + \"SpaCy\" + \"</div><br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"spacy\")\n \n else:\n log_row.append(\"\")\n\n if \"udpipe\" in packages: \n ## If cache is full, reload the cache.\n if cache.count(cache_udpipe) > 100:\n cache.write(cache_udpipe, \"udpipe\")\n cache_udpipe = cache.load(\"udpipe\")\n \n ## Check text whether is already in cache\n if hash_string in cache_udpipe[lang].keys():\n tokens, end_pos, lemma, pos, cache_udpipe = cache.read(\"udpipe\", cache_udpipe, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_udpipe = cache.add(\"udpipe\", cache_udpipe, lang, string, get_services_udpipe)\n \n string_udpipe = \" \".join(tokens)\n\n # Initialize the TexAS document\n mydoc = tx.Document(string_udpipe)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n\n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"udpipe\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"udpipe\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"UDpipe\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>UDpipe</div> <br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"udpipe\")\n \n else:\n log_row.append(\"\")\n \n if \"trankit\" in packages:\n # trankit temporarily only support english\n if lang == 'eng':\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n \n ## If cache is full, reload the cache.\n if cache.count(cache_trankit) > 100:\n cache.write(cache_trankit, \"trankit\")\n cache_trankit = cache.load(\"trankit\")\n \n ## Check text whether is already in cache\n if hash_string in cache_trankit[lang].keys():\n tokens, end_pos, lemma, pos, cache_trankit = cache.read(\"trankit\", cache_trankit, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_trankit = cache.add(\"trankit\", cache_trankit, lang, string, get_services_trankit)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"spacy\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"spacy\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"Trankit\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>\" + \"Trankit\" + \"</div><br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"trankit\")\n\n else:\n message_HTML += \"Trankit temporarily only supports English. <br>\"\n isMessage = True \n \n else:\n log_row.append(\"\")\n\n message_HTML += \"</div>\"\n if isMessage:\n return message_HTML + get_header_table(header_input) + \"<br><br>\" + final_HTML\n\n writeLog(log_row)\n return get_header_table(header_input) + \"<br><br>\" + final_HTML", "def get_word_structure(word, roots_data_frame, suffixes_data_frame):\n\n result = find_roots(word, roots_data_frame['Root word regex'])\n\n root_starts = [x[0] for x in result]\n root_ends = [x[1] for x in result]\n root_indices = [x[2] for x in result]\n root_words = roots_data_frame.iloc[root_indices, 0].values\n root_meanings = roots_data_frame.iloc[root_indices, 1].values\n\n word_fragments = []\n prev_end = 0\n for start, end, meaning in zip(root_starts, root_ends, root_meanings):\n diff = start - prev_end\n if diff > 0:\n word_end = prev_end + diff\n word_fragments.append( ('none', word[prev_end:word_end], '', prev_end, word_end) )\n word_fragments.append( ('styled', word[start:end], meaning, start, end) )\n prev_end = end\n\n word_end = len(word)\n if prev_end < word_end:\n # Find suffix\n result = find_suffix(word[prev_end:word_end], suffixes_data_frame['Suffix regex'])\n word_fragments_suffix = []\n if len(result) > 0:\n suffix_start, suffix_end, suffix_regex_i = result[0]\n suffix_starts = [x[0] for x in result]\n suffix_ends = [x[1] for x in result]\n suffix_indices = [x[2] for x in result]\n suffix_words = suffixes_data_frame.iloc[suffix_indices, 0].as_matrix()\n suffix_meanings = suffixes_data_frame.iloc[suffix_indices, 1].as_matrix()\n for start, end, meaning in zip(suffix_starts, suffix_ends, suffix_meanings):\n start += prev_end\n end += prev_end\n word_fragments_suffix.append(\n ('styled', word[start:end], meaning, start, end) )\n word_end = start # Move word end boundary to suffix start\n\n if prev_end < word_end:\n word_fragments.append( ('none', word[prev_end:word_end], '', prev_end, word_end) )\n\n word_fragments.extend(word_fragments_suffix)\n\n # pprint(word_fragments)\n return word_fragments", "def getRtf(self):\n self.pieces = []\n for node in self.root.findall(\"MiscellaneousDocumentText\"):\n for child in node:\n if child.tag == \"Para\":\n self.__addPara(child)\n elif child.tag in (\"ItemizedList\", \"OrderedList\"):\n self.__addList(child, child.tag)\n return \"\".join(self.pieces)", "def render_parallel(children):\r\n if len(children) == 1:\r\n return children[0]\r\n\r\n children_latex = [k.latex for k in children if k.latex != \"||\"]\r\n latex = r\"\\|\".join(children_latex)\r\n tall = any(k.tall for k in children)\r\n return LatexRendered(latex, tall=tall)", "def merge_words(w, z):\n\n list_w = list(w)\n list_z = list(z)\n\n return recursive_build_list(\"\", list_w, list_z)", "def _preprocess(self, txt_seq):\n input = []\n for token in txt_seq.split():\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n input.append(self.word2id[\"<END>\"])\n input = torch.LongTensor(input)\n return input", "def iter_items(items):\n for it in items:\n text = nlp(it[\"text\"].lower())\n\n # Stop word removal\n token_list = [\n token.lemma_\n for token in text\n if not token.is_stop and not token.is_punct\n ]\n\n it[\"text\"] = \" \".join(token_list)\n\n children_items = it.get(\"content\", {}).get(\"items\")\n if children_items:\n iter_items(children_items)", "def surface_labelled_data_preparation(word_dictionary: {str, str}):\n X = []\n Y = []\n words = []\n\n for word in word_dictionary:\n segments = word.split('-')\n labels = word_dictionary[word].split('-')\n segment_features = []\n for i in range(len(segments)):\n features = {}\n\n segment_length = len(segments[i])\n features['length'] = segment_length\n\n features['segment.lower()'] = segments[i].lower()\n features['pos_in_word'] = i\n\n if segment_length % 2 == 0:\n features['even'] = 1\n else:\n features['odd'] = 1\n\n features['begin'] = segments[i][0]\n features['end'] = segments[i][len(segments[i]) - 1]\n\n try:\n features['prev_segment'] = segments[i - 1]\n except IndexError:\n features['prev_segment'] = ''\n # continue\n\n try:\n features['next_segment'] = segments[i + 1]\n except IndexError:\n features['next_segment'] = ''\n\n if segments[0].isupper():\n features['start_upper'] = 1\n else:\n features['start_lower'] = 1\n\n if segments[0] in 'aeiou':\n features['first_vowel'] = 1\n else:\n features['first_const'] = 1\n\n segment_features.append(features)\n words.append(segments)\n\n X.append(segment_features)\n Y.append(labels)\n words.append(word)\n\n return X, Y, words" ]
[ "0.5540645", "0.5538486", "0.55300415", "0.55045766", "0.54806244", "0.545654", "0.543825", "0.5400212", "0.53588676", "0.53223515", "0.53047854", "0.5286626", "0.52593404", "0.5253145", "0.52510726", "0.5232758", "0.5221223", "0.521921", "0.521023", "0.52065337", "0.5153433", "0.5148956", "0.513898", "0.51175636", "0.51048815", "0.51046187", "0.50930244", "0.508939", "0.50889575", "0.5087909", "0.5086575", "0.5084244", "0.5082297", "0.507265", "0.504284", "0.5041099", "0.50381225", "0.50367045", "0.5031065", "0.5028929", "0.5022349", "0.5015388", "0.500909", "0.49793053", "0.49770945", "0.49745715", "0.4973663", "0.49615845", "0.496101", "0.49578974", "0.49569866", "0.4954266", "0.4939848", "0.49387324", "0.49342328", "0.49313897", "0.49285784", "0.49246058", "0.49241424", "0.49215734", "0.49167117", "0.49166995", "0.4909334", "0.49027574", "0.4902298", "0.48995182", "0.48991957", "0.48952714", "0.48942637", "0.48870748", "0.48851857", "0.488414", "0.48754483", "0.48707375", "0.48696047", "0.48693797", "0.48639104", "0.4863279", "0.48514256", "0.48457557", "0.48433965", "0.4836478", "0.48279408", "0.4823712", "0.4822568", "0.48211467", "0.4813287", "0.48129296", "0.48119855", "0.48119274", "0.4807729", "0.48035604", "0.4796692", "0.47949794", "0.47938877", "0.47927326", "0.4777643", "0.4771506", "0.47700998", "0.47683817" ]
0.6977978
0
Converts a dvi, which potentially has multiple slides, into a directory full of enumerated pngs corresponding with these slides. Returns a list of PIL Image objects for these images sorted as they where in the dvi
def dvi_to_svg(dvi_file, regen_if_exists=False): result = dvi_file.replace(".dvi", ".svg") if not os.path.exists(result): commands = [ "dvisvgm", dvi_file, "-n", "-v", "0", "-o", result, ">", get_null() ] os.system(" ".join(commands)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_all_dicom_images(self, verbose=True):\n if verbose: print(\"Loading dicom files ... This may take a moment.\")\n\n path = self.get_path_to_dicom_files()\n fnames = [fname for fname in os.listdir(path)\n if fname.endswith('.dcm') and not fname.startswith(\".\")]\n images = []\n for fname in fnames:\n image = dicom.dcmread(os.path.join(path,fname))\n\n seid = str(image.SeriesInstanceUID).strip()\n stid = str(image.StudyInstanceUID).strip()\n\n if seid == self.series_instance_uid and\\\n stid == self.study_instance_uid:\n images.append(image)\n\n # ##############################################\n # Clean multiple z scans.\n #\n # Some scans contain multiple slices with the same `z` coordinate \n # from the `ImagePositionPatient` tag.\n # The arbitrary choice to take the slice with lesser \n # `InstanceNumber` tag is made.\n # This takes some work to accomplish...\n zs = [float(img.ImagePositionPatient[-1]) for img in images]\n inums = [float(img.InstanceNumber) for img in images]\n inds = list(range(len(zs)))\n while np.unique(zs).shape[0] != len(inds):\n for i in inds:\n for j in inds:\n if i!=j and zs[i] == zs[j]:\n k = i if inums[i] > inums[j] else j\n inds.pop(inds.index(k))\n\n # Prune the duplicates found in the loops above.\n zs = [zs[i] for i in range(len(zs)) if i in inds]\n images = [images[i] for i in range(len(images)) if i in inds]\n\n # Sort everything by (now unique) ImagePositionPatient z coordinate.\n sort_inds = np.argsort(zs)\n images = [images[s] for s in sort_inds]\n # End multiple z clean.\n # ##############################################\n\n return images", "def get_sorted_img_list():\n dirPath=settings.BASE_DIR\n imgdir=\"/pttWeb/static/topicmodel\"\n fileID=glob.glob(dirPath+imgdir+\"/*.png\")\n fileID=[i.replace('/home/stream/Documents/minimum_django/pttWeb/static/','') for i in fileID]\n fileID=[Week_Image(i) for i in fileID]\n fileID.sort(key=lambda x: x.date, reverse=True)\n #translate . to / since javascript parsing date has some issue!\n fileID=[(i.filename,date_trans_z(i.date.strftime(\"%Y.%m.%d\"))) for i in fileID]\n return fileID", "def getimgs():", "def get_imgs(paths_list: list) -> list:\n \n imgs_list = [Image.open(project_path + data_path + paths_list[i]) for i in range(len(paths_list))]\n \n return imgs_list", "def images_for_denoising():\r\n return list_images(relpath('image_dataset/train'), True)", "def get_images(path_list):\n images = []\n labels = []\n names = []\n i = 0\n for path in path_list:\n for fruit_dir_path in glob.glob(path):\n fruit_label = fruit_dir_path.split(\"/\")[-1]\n for image_path in glob.glob(os.path.join(fruit_dir_path, \"*.jpg\")):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n\n image = cv2.resize(image, (45, 45))\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n images.append(image)\n names.append(fruit_label)\n labels.append(i)\n i += 1\n\n images = np.array(images)\n print(images.shape)\n # add a new dimension here\n with np.nditer(images, op_flags=['readwrite']) as it:\n for x in it:\n x = np.expand_dims(x, axis=0)\n labels = np.array(labels)\n return images, labels, i", "def get_images(self):\n \n images = []\n for order in self.order_lst:\n o_items = order.get_items()\n images.append(o_items.get_image())\n \n return images", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def tile_slides(slides_filepaths, desired_tile_with, desired_overlap, desired_magnification):\n containing_folders = []\n for slide_filepath in slides_filepaths:\n containing_folders.append(tile_slide(slide_filepath, desired_tile_with, desired_overlap, desired_magnification))\n return containing_folders", "def get_imlist(path):\n\treturn [os.path.join( path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def to_pillow(id:int, samples:list=None):\n\t\t\tsamples = listify(samples)\n\t\t\tfiles = Dataset.Image.get_image_files(id, samples=samples)\n\t\t\timages = [f.Image.to_pillow(f.id) for f in files]\n\t\t\treturn images", "def listImages(name_list, path, as_grey=True):\n\timage_list = []\n\n\tfor name in name_list:\n\t\timage = io.imread(path + \"/\" + name + \".jpg\", as_grey=as_grey)\n\t\tif as_grey is True:\n\t\t\timage = resize(image, (200, 200))\n\t\timage_list.append(image)\n\n\treturn image_list", "def _printout_images_info(design_path):\r\n _max_pic_number = 8\r\n images = dict()\r\n for foo in os.listdir(design_path):\r\n abs_foo = os.path.join(design_path, foo)\r\n if os.path.isfile(abs_foo):\r\n continue\r\n if foo.endswith(\"Images\"):\r\n images.setdefault(foo, list())\r\n for bar in os.listdir(abs_foo):\r\n if bar.endswith(\".png\"):\r\n images[foo].append(bar)\r\n if images:\r\n for k, v in list(images.items()):\r\n v.sort(key=sort_by_num, reverse=True)\r\n nine_images = dict()\r\n images_number = 0\r\n for i in range(0, 10):\r\n if images_number > _max_pic_number:\r\n break\r\n for k, v in list(images.items()):\r\n nine_images.setdefault(k, list())\r\n try:\r\n nine_images[k].append(v[i])\r\n images_number += 1\r\n if images_number > _max_pic_number:\r\n break\r\n except IndexError:\r\n continue\r\n say_it(\"\")\r\n say_it(\"Images Number: {}\".format(images_number))\r\n ii = 1\r\n for kk, vv in list(nine_images.items()):\r\n for foo in vv:\r\n say_it(\"-PNG{}: {}/{}\".format(ii, kk, foo))\r\n ii += 1", "def get_imlist_png(path):\n \n return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.png')]", "def get_imlist(path):\n return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def get_imlist2(path):\n return [\n os.path.join(path, f) for f in os.listdir(path) if f.endswith('.ppm')\n ]", "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def div_imgs(dir_path: str, holdout: int) -> None:\n VH_REGEX = re.compile(r'(.*)_([0-9]+).vh.tif')\n\n for file in os.listdir(dir_path):\n m = re.match(VH_REGEX, file)\n if not m:\n continue\n\n pre, num = m.groups()\n vv = f'{pre}_{num}.vv.tif'\n mask = f'{pre}_{num}.mask.tif'\n\n if not os.path.isfile(os.path.join(dir_path, mask)):\n print(f\"Tile: {file} is missing a mask {mask}!\")\n\n if not os.path.isfile(os.path.join(dir_path, vv)):\n print(f\"Tile: {file} is missing a mask {vv}!\")\n\n test_or_train = 'train' if random.random() > holdout else 'test'\n\n folder = os.path.join(dir_path, test_or_train)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n\n os.rename(\n os.path.join(dir_path, file), os.path.join(folder, file)\n )\n os.rename(\n os.path.join(dir_path, vv),\n os.path.join(folder, vv)\n )\n os.rename(\n os.path.join(dir_path, mask),\n os.path.join(folder, mask)\n )", "def get_imlist(path):\n return [\n os.path.join(path, f) for f in os.listdir(path) if f.endswith('.bmp')\n ]", "def pdftoimages(input_dir,output_dir): \n dirListing = os.listdir(input_dir)\n files = []\n imagespath = output_dir\n for item in dirListing:\n files.append(item)\n n = len(files)\n for num in range(n):\n doc = fitz.open(input_dir+\"/\"+files[num])\n for img in doc.getPageImageList(0):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n pix1 = None \n pix=None\n break", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X = []\n y = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y.append(label)\n X.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X,y", "def load_images(tags_pict):\n img_data_list = []\n for p in tags_pict.index :\n img_path = tags_pict.full_path[p]\n img = load_img(img_path, target_size= inputShape)\n x = img_to_array(img)\n x = np.expand_dims(img, axis=0)\n # pre-process the image using the appropriate function based on the\n # model that has been loaded (i.e., mean subtraction, scaling, etc.)\n x = preprocess_input(x)\n img_data_list.append(x)\n img_data = np.array(img_data_list)\n img_data=np.rollaxis(img_data,1,0)\n img_data=img_data[0]\n return(img_data)", "def prepare_image_list(path, image_name, i_range):\n DisplayNetwork.display_green(\"[INFO] Loading the images to train the network\")\n positives = []\n file_list = commands.getoutput('ls ./' + path + '/*.jpg | xargs -n 1 basename').split(\"\\n\")\n\n for i in i_range:\n image_name = path + '/' + file_list[i]\n DisplayNetwork.display_yellow(\"[INFO] Loading image\" + image_name)\n\n image_matrix = ImageFunctions.turnImageToGray(image_name) # Load image as gray\n reshaped = np.reshape(image_matrix, 361) # makes 19x19 matrix to 1x361 vector\n positives.append(reshaped.tolist())\n\n return np.array(positives)", "def make_imgs_list(self, imgs_dir, imgs_list):\n \n empty_list = []\n \n for img in imgs_list:\n \n img_dir = imgs_dir + '/' + img\n \n empty_list.append(img_dir)\n \n return empty_list", "def _locate_images(self):\r\n extensions = '|'.join(self.valid_extensions)\r\n extension_re = re.compile('.+\\.(%s)$' % extensions, re.IGNORECASE)\r\n files = sorted(os.listdir(self.path))\r\n\r\n images = []\r\n for root, dirs, files in os.walk(self.path, followlinks=self.config['follow_links']):\r\n for filename in sorted(files):\r\n if not filename.startswith('.') and extension_re.match(filename):\r\n images.append(Image(path=os.path.join(root, filename), config=self.config))\r\n if not self.config['recursive']:\r\n break\r\n\r\n if not images:\r\n raise SourceImagesNotFoundError(self.path)\r\n\r\n images = sorted(images, reverse=self.config['algorithm_ordering'][0] != '-')\r\n\r\n return images", "def make_image_list(self):\n image_list = [self.spritesheet_dict['closed'],\n self.spritesheet_dict['opened']]\n\n return image_list", "def load_images(self):\n self.img_paths = sorted(glob(self.img_pattern))\n self.imgs = []\n for idx, this_path in enumerate(self.img_paths):\n try:\n this_img = cv2.imread(this_path)\n if self.downscale > 1:\n this_img = cv2.resize(this_img, (0, 0),\n fx=1/float(self.downscale),\n fy=1/float(self.downscale),\n interpolation=cv2.INTER_LINEAR)\n except Exception as e:\n print(\"error loading img: %s\" % (this_path))\n if this_img is not None:\n self.imgs.append(this_img)\n print(\"loaded img %d size=(%d,%d): %s\" %\n (idx, this_img.shape[0], this_img.shape[1], this_path))\n print(\"loaded %d images\" % (len(self.imgs)))", "def makeSlideShow(slides):\n from copy import deepcopy\n slidesCopy = deepcopy(slides)\n slideShowOrder = []\n while slidesCopy:\n nextSlide = findNextSlide(slidesCopy[len(slidesCopy) - 1], slidesCopy)\n slideShowOrder.append(nextSlide)\n slidesCopy.remove(slidesCopy[len(slidesCopy) - 2])\n \n return slideShowOrder", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def load_images(path):\n images = []\n images_names = []\n \n for file_name in os.listdir(path):\n image_name = file_name\n images_names.append(image_name)\n images_names = sorted(images_names) #use sort to insure linux file sys behaves\n print(images_names) #check for proper order\n\n for file_name in images_names:\n image = pygame.image.load(path + os.sep + file_name).convert()\n images.append(image)\n return images", "def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval", "def process(directory):\n files = []\n\n options = [\"Load\", \"Create\"]\n choice = options[int(ui.prompt(options=options))]\n\n for item in os.listdir(directory):\n if os.path.isfile(os.path.join(directory, item)):\n filename = os.path.join(directory, item)\n if choice == \"Load\" and item.endswith(\".png\"):\n files.append(filename)\n elif choice == \"Create\" and item.endswith(\".file\"):\n files.append(filename)\n\n filenames, pageNames = imagePages(files, choice)\n \n targets = [name.split('/')[-1][:5] for name in filenames]\n return pageNames, targets, filenames", "def get_images(self):\n return [self.get_image(i) for i in range(0, self.number_sprites - 1)]", "def main(vis_dirs, outdir):\n assert len(vis_dirs) == 4\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n for i, filename in enumerate(tqdm(os.listdir(vis_dirs[-1]))):\n # if i % 100 == 0:\n # print(i)\n\n files = [os.path.join(vis_dir, filename) for vis_dir in vis_dirs]\n outimg = os.path.join(outdir, filename)\n merge_four_images(files, outimg)\n\n print (\"Finished! Result dir is %s\" % outdir)", "def convert_list_images(self, listcv2):\n indice = 0\n listimg = []\n for imgcv2 in listcv2:\n img = cv2.cvtColor(imgcv2, cv2.COLOR_BGR2RGB)\n im_pil = Image.fromarray(img)\n #cv2.imshow('window', imgcv2)\n #cv2.waitKey(1) \n #sleep(tempo[indice])\n listimg.append(im_pil.convert('RGB'))\n indice += 1\n return listimg", "def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]", "def get_images(path):\n\n # Cast path to absolute path\n absolute = abspath(path)\n\n img_lis = [] # Holds images in a folder\n file_lis = get_files(absolute)\n\n # Now get the images within file list\n img_lis = [f for f in file_lis if is_filetype(f)]\n\n return img_lis", "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)", "def get_images(self):\n \n return self.img_lst", "def read_images(path_list, img_height, img_width):\n\n out = []\n for img_path in path_list:\n # Below line converts to a PIL Python Image Library format\n img = image.load_img(img_path, target_size=(img_height, img_width))\n x = image.img_to_array(img) # Convert a PIL image to a numpy array\n x = np.expand_dims(x, axis=0) # (1, dim1, dim2, channels) Type: float32\n out.append(x.astype('uint8'))\n return np.vstack(out) # Stack the images one below the other (rows)", "def get_data_images(path):\n\n return sorted(\n [os.path.join(root, filename) for root, dirnames, filenames in os.walk(path) for filename in\n filenames if\n filename.endswith('.jpg') and os.path.getsize(os.path.join(root, filename)) > 0]\n )", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X_test = []\n y_test = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y_test.append(label)\n X_test.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X_test,y_test", "def scanDir(dcmdir):\n\n if not enabled():\n raise RuntimeError('dcm2niix is not available or is too old')\n\n dcmdir = op.abspath(dcmdir)\n cmd = f'{dcm2niix()} -b o -ba n -f %s -o . \"{dcmdir}\"'\n series = []\n\n with tempdir.tempdir() as td:\n\n with open(os.devnull, 'wb') as devnull:\n sp.call(shlex.split(cmd), stdout=devnull, stderr=devnull)\n\n files = glob.glob(op.join(td, '*.json'))\n\n if len(files) == 0:\n return []\n\n for fn in files:\n with open(fn, 'rt') as f:\n meta = json.load(f)\n meta['DicomDir'] = dcmdir\n # SeriesDescription is not\n # guaranteed to be present\n if 'SeriesDescription' not in meta:\n meta['SeriesDescription'] = meta['SeriesNumber']\n series.append(meta)\n\n # sort by series number\n def key(s):\n return s.get('SeriesNumber', sys.maxsize)\n\n series = list(sorted(series, key=key))\n\n return series", "def load_images(self,im_paths,imlist,im_index):\n\n\t\timlist_arr = []\n\t\tj = 0\n\t\tfor im_path in im_paths:\n\t\t\tim = None\n\n\t\t\ttry:\n\t\t\t\tim = Image.open(im_path)\n\t\t\t\t#im = imread(im_path)\n\t\t\t\t#print im.shape\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\t\t\n\t\t\tif im != None:\n\t\t\t\ttry:\n\t\t\t\t\tim_aux = np.array(im,dtype=theano.config.floatX)\n\t\t\t\t\tim_converted = True\n\t\t\t\texcept TypeError, e:\n\t\t\t\t\tim_converted = False\n\t\t\t\t\tprint e\n\t\t\t\t\n\t\t\t\tif im_converted == True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif im_aux.shape[2] == 4:\n\t\t\t\t\t\t\tbackground = Image.new(\"RGB\", im.size, (255, 255, 255))\n\t\t\t\t\t\t\tbackground.paste(im, mask=im.split()[3]) # 3 is the alpha channel\n\t\t\t\t\t\t\tim = background\n\t\t\t\t\t\t\tim_aux = np.array(background,dtype=theano.config.floatX)\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\n\t\t\t\t\t\tif im_aux.shape[2] == 3:\n\t\t\t\t\t\t\tbn_parsed = os.path.basename(im_path).split(\"_\")\n\t\t\t\t\t\t\tim_id = int(bn_parsed[0])\n\t\t\t\t\t\t\t#print im_id\n\t\t\t\t\t\t\t#Ignore potential duplicates\n\t\t\t\t\t\t\t#if im_id not in self.im_index:\n\t\t\t\t\t\t\tif im_id not in im_index:\n\t\t\t\t\t\t\t\tim_aux = self.scale_and_crop_img(im)\n\t\t\t\t\t\t\t\t# This is for multiprocessing\n\t\t\t\t\t\t\t\tim_index.append(im_id)\n\t\t\t\t\t\t\t\timlist.append(np.asarray(im_aux))\n\n\t\t\t\t\t\t\t\t# Uncomment this if you are not using multiprocessing\n\t\t\t\t\t\t\t\t# self.im_index.append(im_id)\n\t\t\t\t\t\t\t\t# self.imlist.append(np.asarray(im_aux))\n\t\t\t\t\t\t\t\t#self.imlist.append(im_aux)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \"invalid image: {} size:{}\".format(im.filename, im_aux.shape)\n\t\t\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t#raise e\n\t\t\t\t\t\tprint e\n\t\n\t\t\t# if self.verbose:\n\t\t\t# \tsys.stdout.write(\"\\r Process: {0}/{1}\".format(j, len(im_paths)))\n\t\t\t# \tsys.stdout.flush()\n\n\t\t\tj += 1", "def read_vanhateren_images (n_imgs=5):\n folder_name = r'D:\\VanHateren\\vanhateren_imc' # change this to point to the directory which holds the van hateren data\n # files = listdir(folder_name)\n onlyfiles = [ f for f in listdir(folder_name) if isfile(join(folder_name,f)) ]\n imgs = []\n for i in range(n_imgs):\n filename = join(folder_name, onlyfiles[i])\n with open(filename, 'rb') as handle:\n s = handle.read()\n arr = array.array('H', s)\n arr.byteswap()\n img_i = np.array(arr, dtype='uint16').reshape(1024, 1536)\n imgs.append(img_i) \n return imgs\n #pylab.imshow(img)\n #pylab.show()", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def list_images(bin_lid):\n bin_url = DATA_NAMESPACE + bin_lid + '.json'\n logging.info('listing images for %s' % bin_lid)\n ds = json.loads(urllib.urlopen(bin_url).read())\n for d in ds:\n yield d['imagename']", "def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def read_paths(path):\n images = [[] for _ in range(2)]\n for dirname, dirnames, _ in os.walk(path):\n for subdirname in dirnames:\n filepath = os.path.join(dirname, subdirname)\n for filename in os.listdir(filepath):\n try:\n imgpath = str(os.path.join(filepath, filename))\n images[0].append(imgpath)\n limit = re.findall('[0-9]+', filename)\n images[1].append(limit[0])\n except IOError as err:\n print(\"I/O error\")\n except:\n print(\"I/O error 2\")\n raise\n return images", "def read_images(path, sz=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n try:\n im = Image.open(os.path.join(subject_path, filename))\n im = im.convert(\"L\")\n # resize to given size (if given)\n if (sz is not None):\n im = im.resize(sz, Image.ANTIALIAS)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n c = c+1\n return [X,y]", "def images(self):\n return self._primary_images + self._unique_images + self._floating_images", "def load_from_folder(path):\n images = []\n files = os.listdir(path)\n files.sort()\n for file in tqdm(files):\n images.append(io.imread(path + file))\n return images", "def _convert_path_list_to_images_and_labels(self, path_list, is_one_shot_task):\n number_of_pairs = int(len(path_list) / 2)\n pairs_of_images = [np.zeros(\n (number_of_pairs, self.image_height, self.image_height, 1)) for i in range(2)]\n labels = np.zeros((number_of_pairs, 1))\n\n for pair in range(number_of_pairs):\n image = Image.open(path_list[pair * 2])\n image = np.asarray(image).astype(np.float64)\n image = image / image.std() - image.mean()\n\n pairs_of_images[0][pair, :, :, 0] = image\n image = Image.open(path_list[pair * 2 + 1])\n image = np.asarray(image).astype(np.float64)\n image = image / image.std() - image.mean()\n\n pairs_of_images[1][pair, :, :, 0] = image\n if not is_one_shot_task:\n if (pair + 1) % 2 == 0:\n labels[pair] = 0\n else:\n labels[pair] = 1\n\n else:\n if pair == 0:\n labels[pair] = 1\n else:\n labels[pair] = 0\n\n if not is_one_shot_task:\n random_permutation = np.random.permutation(number_of_pairs)\n labels = labels[random_permutation]\n pairs_of_images[0][:, :, :,\n :] = pairs_of_images[0][random_permutation, :, :, :]\n pairs_of_images[1][:, :, :,\n :] = pairs_of_images[1][random_permutation, :, :, :]\n\n return pairs_of_images, labels", "def make_int_object_list(self):\n from libtbx import easy_pickle as ep\n\n if self.params.cctbx.selection.select_only.grid_search_path == None:\n int_dir = misc.set_base_dir('integration', True)\n else:\n int_dir = self.params.cctbx.selection.select_only.grid_search_path\n\n img_objects = []\n\n # Inspect integration folder for image objects\n for root, dirs, files in os.walk(int_dir):\n for filename in files:\n found_file = os.path.join(root, filename)\n if found_file.endswith(('int')):\n obj = ep.load(found_file)\n img_objects.append(obj)\n\n # Pick a randomized subset of images\n if self.params.advanced.random_sample.flag_on and \\\n self.params.advanced.random_sample.number < len(img_objects):\n gs_img_objects = self.select_random_subset(img_objects)\n else:\n gs_img_objects = img_objects\n\n return gs_img_objects", "def environmentImages(dirPath):\n images = []\n for f in os.listdir(dirPath):\n if os.path.isfile(os.path.join(dirPath, f)):\n name, ext = os.path.splitext(f)\n if ext.lower().replace(\".\", \"\") in [\"hdr\", \"exr\", \"rad\", \"tif\", \"tiff\"]:\n images.append(f)\n return sorted(images)", "def read_imgs(path):\n dirs = os.listdir(path)\n imgs = []\n for fn in dirs:\n img_path = path + '/' + fn\n img = cv2.imread(img_path, 1)\n img = np.float32(cv2.resize(img, (224, 224))) / 255\n imgs.append(img)\n imgs = np.array(imgs)\n return imgs", "def get_images(directory=None):\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def videos_to_images(dir_path: str, load_segmantation_masks: bool = False) -> None:\n os.makedirs(dir_path + \"_processed\", exist_ok=True)\n videos = [v for v in os.listdir(dir_path) if re.match(\"video_[0-9]+.npy\", v)]\n for video_path in videos:\n video = load_video(os.path.join(dir_path, video_path))\n for i in range(len(video)):\n im = Image.fromarray(video[i].astype(np.uint8))\n im.save(dir_path + f'_processed/{video_path[:-4]}_{i}.png')\n if load_segmantation_masks:\n seg_mask = load_seg_masks_from_video(os.path.join(dir_path, video_path[:-4] + segmentation_mask_sfx + \".npy\"))\n for i in range(len(video)):\n np.save(dir_path + f'_processed/{video_path[:-4]}_{i}_seg_mask', seg_mask[i])", "def sort(self):\n\n img_files = os.listdir(self.path)\n\n img_list = {}\n\n for img_file in img_files:\n filename = os.path.join(self.path, img_file)\n\n try:\n img = Image.open(filename)\n except:\n continue\n\n print \"Analyzing %s\" % img_file\n\n points = self.points(img.size[0], img.size[1])\n key = \"\"\n for point in points:\n\n # Get the average color for each point\n ave_points = self.diamond_points(point[0], point[1])\n red = 0\n green = 0\n blue = 0\n for ave_point in ave_points:\n try:\n rgb = img.getpixel(ave_point)\n red += rgb[0]\n green += rgb[1]\n blue += rgb[2]\n except IndexError:\n pass\n red /= len(ave_points)\n green /= len(ave_points)\n blue /= len(ave_points)\n\n # Bitdepths:\n # 12 bit - 4096 colors, range 0-F, divide by 16\n # 9 bit - 512 colors, range 0-7, divide by 32\n # 6 bit - 64 colors, range 0-3, divide by 64\n # 3 bit - 8 colors, range 0-1, divide by 128\n\n if self.num_colors == 8:\n div = 128\n elif self.num_colors == 64:\n div = 64\n elif self.num_colors == 512:\n div = 32\n elif self.num_colors == 4096:\n div = 16\n else:\n self.usage()\n\n # Lower the bitdepth\n red = int(red / div)\n green = int(green / div)\n blue = int(blue / div)\n\n # Add to the key\n key += \"%x%x%x\" % (red, green, blue)\n\n # Add the key if needed\n if key not in img_list:\n img_list[key] = []\n\n # Add the file to the list\n img_list[key].append(img_file)\n\n # Go through and rename the files, based on the img_list dictionary\n # and the prefix\n num = 1\n for img in sorted(img_list.iterkeys()):\n for filename in sorted(img_list[img]):\n name, ext = os.path.splitext(filename)\n new_filename = \"%s%04d%s\" % (self.prefix, num, ext)\n full_filename = os.path.join(self.path, filename)\n full_new_filename = os.path.join(self.path, new_filename)\n if os.path.isfile(full_new_filename):\n print \"File %s exists - aborting!\" % full_new_filename\n return\n\n os.rename(full_filename, full_new_filename)\n print \"Renamed %s to %s.\" % (filename, new_filename)\n num += 1", "def get_images(directory=None):\r\n \r\n if directory == None:\r\n directory = os.getcwd() # Use working directory if unspecified\r\n \r\n image_list = [] # Initialize aggregaotrs\r\n file_list = []\r\n \r\n directory_list = os.listdir(directory) # Get list of files\r\n for entry in directory_list:\r\n if len(file_list)<2:\r\n absolute_filename = os.path.join(directory, entry)\r\n try:\r\n image = PIL.Image.open(absolute_filename)\r\n file_list += [entry]\r\n image_list += [image]\r\n except IOError:\r\n pass # do nothing with errors tying to open non-images\r\n return image_list, file_list", "def load_images(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data", "def buildImages(files, targets, type):\n images = []\n for file in files:\n targets.append(file)\n with open(file, \"rb\") as f:\n if type == \"Byte\":\n images.append(bytePlot(list(f.read())))\n elif type == \"Markov\":\n images.append(markovPlot(list(f.read())))\n elif type == \"Hilbert\":\n images.append(hilbertPlot(list(f.read())))\n smp.imsave(\"{}.png\".format(file), images[-1])\n return images, targets", "def get_images_from_sheet(sheet, width, height):\n sprites = []\n dest_rect = pygame.Rect((0, 0), (width, height))\n total = sheet.get_width() / width\n i = 0\n while i < total:\n image = pygame.Surface((width, height))\n image.blit(sheet, (0,0), ((width * i), 0, width, height))\n image.set_colorkey((0,0,0))\n sprites.append(image)\n i += 1\n\n return sprites", "def getimagelist(folder):\n imagefolder = Path(folder) \n imagelist = imagefolder.glob(\"**/*.png\") \n return list(imagelist)", "def imagePages(files, choice):\n options = [\"Byte\", \"Markov\", \"Hilbert\"]\n type = options[int(ui.prompt(\"Choose a visualization type\", options))]\n\n targets = []\n pageNames = []\n pageSize = 100\n pages = range(math.ceil(len(files)/pageSize))\n for page in pb.progressbar(pages):\n # print(\"\\nPage {}/{}\".format(page+1, len(pages)))\n gc.collect() # Garbage collect\n\n images = []\n start = page*pageSize\n if choice == \"Create\":\n images, targets = buildImages(files[start:start+pageSize], targets, type)\n elif choice == \"Load\":\n images, targets = loadImages(files[start:start+pageSize], targets)\n pageNames.append(\"./pages/images_page{}.npy\".format(page))\n np.save(pageNames[-1], images)\n return targets, pageNames", "def list_images(self):\n raise NotImplementedError()", "def show_images(images, db):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n show_files(files)", "def add_slides(file_name):\n path = f'{input_decks}/{file_name}.pptx'\n sld = total_slides(path)+1\n lis = list(range(1, sld))\n return lis", "def read_images(path, fileNameFilter=FileNameFilter(\"None\"), sz=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n if fileNameFilter(filename):\n try:\n im = Image.open(os.path.join(subject_path, filename))\n im = im.convert(\"L\")\n # resize to given size (if given)\n if (sz is not None):\n im = im.resize(sz, Image.ANTIALIAS)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise \n c = c+1\n return [X,y]", "def load_images(image_name_to_label):\n images = []\n labels = []\n\n image_names = os.listdir(DEFAULT_IMG_PATH_EDITED)\n\n # Remove directories\n image_names.remove(\"COVID-19\")\n image_names.remove(\"Normal\")\n image_names.remove(\"ViralPneumonia\")\n\n # Load images from specific image directories (COVID-19, normal, viral pneumonia)\n def load_directory(directory):\n notifier.send(\" Loading from directory: \" + directory + \"...\")\n directory_path = DEFAULT_IMG_PATH_EDITED + os.sep + directory\n directory_image_names = os.listdir(directory_path)\n for i, image_name in enumerate(directory_image_names):\n base_image_name = get_base_image_name(image_name)\n query_name = directory + \"/\" + base_image_name\n query_name = query_name.lower().replace(\" \", \"\")\n if query_name in image_name_to_label:\n print(f\" {i / len(directory_image_names) * 100}% - [{image_name}]\")\n image_path = directory_path + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[query_name])\n load_directory(\"COVID-19\")\n load_directory(\"Normal\")\n load_directory(\"ViralPneumonia\")\n\n # Load images from default directory\n if LOAD_ALL_IMAGES:\n notifier.send(\" Loading from directory: default...\")\n for i, image_name in enumerate(image_names):\n base_image_name = get_base_image_name(image_name)\n if base_image_name in image_name_to_label:\n print(f\" {i / len(image_names) * 100}% - [{image_name}]\")\n image_path = DEFAULT_IMG_PATH_EDITED + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[base_image_name])\n\n return images, labels", "def splitImages(self):\n imgs = self.img_list\n frames = self.frame_number.value()\n grps = []\n for i in range(0, len(imgs), frames):\n grps.append(imgs[i:i + frames])\n\n return grps", "def extract_dl(driving_log_path):\r\n entries = []\r\n with open(driving_log_path) as csv_file:\r\n reader = csv.reader(csv_file)\r\n for entry in reader:\r\n entries.append(entry)\r\n empty_lists = [[] for i in range(7)]\r\n center_images, left_images, right_images, steerings, throttles, brakes, speeds = empty_lists\r\n for entry in entries:\r\n center_image_path, left_image_path, right_image_path = (entry[0], entry[1], entry[2])\r\n steering = float(entry[3])\r\n throttle = float(entry[4])\r\n brake = float(entry[5])\r\n speed = float(entry[6])\r\n center_image = cv2.imread(center_image_path)\r\n left_image = cv2.imread(left_image_path)\r\n right_image = cv2.imread(right_image_path)\r\n center_images.append(center_image)\r\n left_images.append(left_image)\r\n right_images.append(right_image)\r\n steerings.append(steering)\r\n throttles.append(throttle)\r\n brakes.append(brake)\r\n speeds.append(speed)\r\n return center_images, left_images, right_images, steerings, throttles, brakes, speeds", "def __get_sorted_file_list(self):\n d = self.__view.CurrentImgDir\n list = os.listdir(d)\n if self.__view.SortType == constant.THUMB_SORT_FILENAME:\n # Sort by Name\n list.sort()\n if self.__view.SortType == 2:\n # Sort by Size\n list.sort(lambda a, b: int(os.stat(os.path.join(d,a))[stat.ST_SIZE] - os.stat(os.path.join(d,b))[stat.ST_SIZE])) \n return list", "def imagesAt(self, rects, colorKey = None):\n return [self.image_at(rect, colorKey) for rect in rects]", "def get_lists_in_dir(dir_path):\n image_list = []\n\n for filename in glob.glob(dir_path + '/*.jpg'):\n image_list.append(filename)\n return image_list", "def images(self):\n return self.gameimage_set.all()", "def get_lst_images(file_path):\n return [i for i in os.listdir(file_path) if i != '.DS_Store']", "def get_imgs(path):\n imlist = {}\n for each in glob(path + \"*\"):\n word = each.split(\"/\")[-1]\n imlist[word] = []\n for imagefile in glob(path+word+\"/*\"):\n im = cv2.imread(imagefile, 0)\n imlist[word].append(im)\n\n return imlist", "def readImages(respository,*rescale):\n record = []\n onlyfiles = [f for f in listdir(respository) if isfile(join(respository, f))]\n for image in onlyfiles:\n record = record+[readImage(join(respository, image),[0,1,2],rescale)]\n return record\n pass", "def get_images(files, verbose=False):\n documents = defaultdict(dict)\n\n for fname in sorted(files):\n short_name = fname.split('/')[-1]\n if verbose:\n print(\"\\nExtracting\", short_name)\n if fname.endswith('.pdf'):\n document = fitz.open(fname)\n for page_num in range(len(document)):\n for im_num, im in enumerate(document.getPageImageList(page_num)):\n x_ref = im[0]\n pix = fitz.Pixmap(document, x_ref)\n im_name = \"{}_{}\".format(page_num, im_num)\n if verbose:\n print(im_name, end=', ')\n documents[short_name][im_name] = bytes_to_cv2img(pix.getPNGData())\n document.close()\n elif fname.endswith('.docx'):\n with zipfile.ZipFile(fname, 'r') as zf:\n for f in zf.namelist():\n if any(f.endswith(ext) for ext in ('.png', '.jpg')):\n if verbose:\n print(\"\\t\", f)\n documents[short_name][f] = bytes_to_cv2img(zf.read(f))\n return documents", "def returnImages(input_data):\r\n if type(input_data) is list:\r\n return [returnImages(element) for element in input_data]\r\n else:\r\n return cv.imread(input_data, 0)", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def numpy_to_pil(images):\n if images.ndim == 3:\n images = images[None, ...]\n images = (images * 255).round().astype(\"uint8\")\n if images.shape[-1] == 1:\n # special case for grayscale (single channel) images\n pil_images = [Image.fromarray(image.squeeze(), mode=\"L\") for image in images]\n else:\n pil_images = [Image.fromarray(image) for image in images]\n\n return pil_images", "def load_scans(pathDicom):\n reader = sitk.ImageSeriesReader()\n filenamesDICOM = reader.GetGDCMSeriesFileNames(pathDicom)\n reader.SetFileNames(filenamesDICOM)\n img = reader.Execute()\n return img", "def getGrouppedRawImages():\n imagesGlob = ['**/*_timestamped.jpg', '**/*_timestamped.JPG']\n images = func.reduce(operator.add, [[path for path in path.Path(\n '.').glob(glob)] for glob in imagesGlob], [])\n labelled = sorted([{\n 'label': image.parent.parent.name,\n 'time': image.parent.name,\n 'path': image\n } for image in images], key=lambda label: label['label'])\n return iter.groupby(labelled, key=lambda label: label['label'])", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)", "def get_existing_images(directory):\n validate_directory(directory)\n directory += '/'\n try:\n return listdir(directory)\n except:\n mkdir(directory)\n return []", "def extract(self, files):\n for i in range(len(files)):\n print(files[i])\n img = cv2.imread('{}/{}'.format('{}/{}/{}'.format(DIR_2DST_Mask, self.patient, self.plan), files[i]), 0)\n\n \"\"\"\n Find the indices of array elements that are non-zero, i.e,\n find the pixels' positions that represents the respiratory\n functions (pixels in the respiratory function are brighter).\n \"\"\"\n color_pts = np.argwhere(img > 70)\n\n \"\"\"\n Sorts the pixels according to their x coordenate.\n Obs: np.argwhere inverts x and y, it's like (y, x), because of it,\n the parameter of itemgetter is 1 (to get x coordinate)\n \"\"\"\n lcolor_pts = sorted(color_pts.tolist(), key=itemgetter(1))\n\n \"\"\"\n If there is no pixel representing the respiratory function\n (i.e., lighter pixel) it creates an empty image (without any\n respiratory function)\n \"\"\"\n if len(lcolor_pts) == 0:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open(\n # '{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], []))\n # file.close()\n\n continue\n\n # Reverse the coordinates and store the result in lordered_pts list\n lordered_pts = []\n for j in range(len(lcolor_pts)):\n lordered_pts.append(lcolor_pts[j][::-1])\n\n \"\"\"\n Convert pixels coordinates into a tuples and check which column\n has pixels that corresponding to diaphragmatic level\n Obs. There are some columns that doesnt have any pixel that\n correpond to diaphragmatic level.\n \"\"\"\n # Columns that have a pixel corresponding diaphragmatic level\n lcolumn_available = []\n for j in range(len(lordered_pts)):\n lordered_pts[j] = tuple(lordered_pts[j])\n lcolumn_available.append(lordered_pts[j][0])\n lcolumn_available = list(set(lcolumn_available))\n # print(\"Ordered points: \", lordered_pts)\n # print(\"Columns available: \", lcolumn_available)\n\n \"\"\"\n If there is not enough columns to build a respiratory pattern,\n create a blank image\n \"\"\"\n if len(lcolumn_available) < 20:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n continue\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n first column, assign to it the value of the second y coordinate\n \"\"\"\n if lcolumn_available[0] is not 0:\n y = max(\n [x for x in lordered_pts if x[0] == lcolumn_available[0]],\n key=itemgetter(1))[1]\n lordered_pts.insert(0, (0, y))\n lcolumn_available.insert(0, 0)\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n last column, assign to it the value of the penultimate y coordinate\n available\n \"\"\"\n if lcolumn_available[-1] is not 49:\n lordered_pts.append(\n (49, lordered_pts[len(lcolumn_available)][1]))\n lcolumn_available.append(49)\n\n \"\"\"\n Get the biggest y value in each column that represents the\n diaphragmatic level\n \"\"\"\n column = 0\n lcolumn = []\n ldiaphragm_pts = []\n for j in range(50):\n # Get the column's points\n lcolumn = [x for x in lordered_pts if x[0] == column]\n # print('{}: {}'.format(j, lcolumn))\n\n if len(lcolumn) > 0:\n ldiaphragm_pts.append(\n max(lcolumn, key=itemgetter(1))) # Get the biggest y\n else:\n # Get the y value from the previous column\n lcolumn_available.insert(column, column)\n ldiaphragm_pts.append((column, ldiaphragm_pts[-1][1]))\n column += 1\n lcolumn = []\n\n # Draw diaphragmatic level\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n j = 0\n while(j < len(lcolumn_available) - 1):\n cv2.line(\n diaphragmatic_lvl,\n ldiaphragm_pts[j], ldiaphragm_pts[j + 1],\n (0, 0, 255), 1)\n j = j + 1\n\n lcolumn_available = []\n\n print(\"Diaphragmatic's points: \", ldiaphragm_pts)\n cv2.imshow('Diaphragmatic level', diaphragmatic_lvl)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open('{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], ldiaphragm_pts))\n # file.close()\n\n # return ldiaphragm_pts", "def MergeClipData(clip_im_dir):\r\n\r\n image_list_dict = {'image_id':[]} \r\n\r\n clip_img = os.listdir(clip_im_dir)\r\n\r\n\r\n for folder_clip in clip_img :\r\n\r\n clip_path = os.path.join(clip_im_dir, folder_clip)\r\n # clip_path = clip_im_dir + '/' + folder_clip\r\n clip_list = os.listdir(clip_path)\r\n\r\n \r\n for folder in clip_list:\r\n\r\n images_path = os.path.join(clip_path, folder)\r\n # images_path = clip_path + '/' + folder\r\n image_list = os.listdir(images_path)\r\n\r\n image_list_dict['image_id'] += [os.path.join(images_path, name) for name in image_list]\r\n\r\n return image_list_dict", "def images_at(self, rects, colorkey = None):\n return [self.image_at(rect, colorkey) for rect in rects]", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def create_paths_to_saved_rotated_images_crops(qps_int, tuple_tags, path_to_directory_root):\n paths_to_saved_rotated_images_crops = []\n for qp in qps_int:\n paths_temp_qps = []\n path_to_directory_tag = os.path.join(path_to_directory_root,\n 'qp_{}'.format(qp),\n tuple_tags[0])\n \n # If the directory containing the saved luminance images\n # does not exist, it is created.\n if not os.path.isdir(path_to_directory_tag):\n os.makedirs(path_to_directory_tag)\n \n # `len(tuple_tags[1])` is equal to the number luminance images.\n for i in range(len(tuple_tags[1])):\n paths_temp_qps.append(\n [\n [os.path.join(path_to_directory_tag, '{}_reconstruction.png'.format(tuple_tags[1][i]))],\n [os.path.join(path_to_directory_tag, '{}_crop0.png'.format(tuple_tags[1][i]))],\n [os.path.join(path_to_directory_tag, '{}_crop1.png'.format(tuple_tags[1][i]))]\n ]\n )\n paths_to_saved_rotated_images_crops.append(paths_temp_qps)\n return paths_to_saved_rotated_images_crops", "def get_images_list(self):\n return self.image_filenames_list", "def move_images_and_list(path, final_path):\n #Lists all created folders\n directories = os.listdir(path)\n #Array that stores the path to each image\n lists = []\n #This variable will be used to give a unique name to each image\n tot_images = 0\n #Creates the path where will be stored all files\n if not os.path.exists(final_path):\n os.mkdir(final_path)\n #Iterates over each folder\n for ph in directories:\n #Iterates over each line of the generated file images.lst\n for img in open(os.path.join(path, ph, \"images.lst\")).readlines():\n \"\"\"Images are stored with a name, how many objects have and\n where it is, like this '01_0252_0067_0139_0222.jpg 1 252 67 139 222'\n so these five lines under changes the first part before '_', because\n in some cases, the command opencv_createsamples creates a same name\n to different positive images, this ensures a different name to each\n image\"\"\"\n split_space = img.split()\n split_underscore = split_space[0].split(\"_\")\n split_underscore[0] = str(tot_images)\n join_underscore = \"_\".join(split_underscore)\n join_space = \" \".join([join_underscore, *split_space[1:]])\n #Appends the new image's name to the list\n lists.append(join_space)\n #Moves each image in the folder to the final path, with a new name\n move(os.path.join(path, ph, split_space[0]),\n os.path.join(final_path, join_space.split()[0]))\n tot_images += 1\n #Writes a file withe the name of all images in the folder\n with open(os.path.join(final_path, \"images.lst\"), \"w+\") as f:\n for i in lists:\n f.write(\"\".join([i, '\\n']))\n #Removes the temporary path\n rmtree(os.path.abspath(path))\n #Name of the created file\n return \"images.lst\"", "def list(self):\n r = self.target.ttbd_iface_call(\"images\", \"list\", method = \"GET\")\n return r['result']" ]
[ "0.61587155", "0.5961282", "0.58819103", "0.58328515", "0.5788983", "0.5764814", "0.57471657", "0.573866", "0.5700897", "0.56667006", "0.5630598", "0.5601801", "0.5583045", "0.5580133", "0.55675536", "0.5564644", "0.5564586", "0.55578834", "0.5556173", "0.554764", "0.5540422", "0.5530811", "0.550367", "0.5480338", "0.5474774", "0.54747653", "0.5470889", "0.5465934", "0.5463811", "0.54492635", "0.54460025", "0.5432528", "0.54291505", "0.5421859", "0.5407273", "0.5405555", "0.5404673", "0.53944075", "0.5388721", "0.5382508", "0.53819054", "0.53568286", "0.5355062", "0.534644", "0.533494", "0.53337646", "0.5332984", "0.5324515", "0.5317777", "0.5315587", "0.52975976", "0.5279578", "0.526937", "0.5268991", "0.5263055", "0.5256004", "0.52533835", "0.5249565", "0.52438325", "0.524269", "0.5242186", "0.5238219", "0.523348", "0.523009", "0.5225709", "0.5220999", "0.5220334", "0.5202062", "0.51992214", "0.5194176", "0.51938885", "0.518141", "0.5168151", "0.5163184", "0.5158634", "0.51566684", "0.5156527", "0.5152396", "0.5146553", "0.51456016", "0.51436794", "0.5140303", "0.51401925", "0.51393634", "0.5137714", "0.5137311", "0.5130569", "0.5127477", "0.51261294", "0.51235646", "0.51226604", "0.5120702", "0.51188874", "0.5115591", "0.51119184", "0.5108249", "0.51047546", "0.5095701", "0.50952137", "0.5094398", "0.50923586" ]
0.0
-1