code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _on_report(self, report, connection_id): self._logger.info('Received report: %s', str(report)) self._trigger_callback('on_report', connection_id, report) return False
Callback function called when a report has been processed. Args: report (IOTileReport): The report object connection_id (int): The connection id related to this report Returns: - True to indicate that IOTileReportParser should also keep a copy of the report or False to indicate it should delete it.
codesearchnet
def stateful_ops(self): self._create_definition_if_needed() return self._stateful_ops
Returns the list of stateful ops in function definition. Returns: A list of (op.name, op.type) pairs.
github-repos
def apply_middleware(*middlewares): def inner(create_store_): def create_wrapper(reducer, enhancer=None): store = create_store_(reducer, enhancer) dispatch = store['dispatch'] middleware_api = { 'get_state': store['get_state'], 'dispatch': lambda action: dispatch(action), } chain = [mw(middleware_api) for mw in middlewares] dispatch = compose(*chain)(store['dispatch']) return extend(store, {'dispatch': dispatch}) return create_wrapper return inner
creates an enhancer function composed of middleware Args: *middlewares: list of middleware functions to apply Returns: an enhancer for subsequent calls to create_store()
juraj-google-style
def save_spectre_plot(self, filename='spectre.pdf', img_format='pdf', sigma=0.05, step=0.01): (d, plt) = self.get_spectre_plot(sigma, step) plt.savefig(filename, format=img_format)
Save matplotlib plot of the spectre to a file. Args: filename: Filename to write to. img_format: Image format to use. Defaults to EPS. sigma: Full width at half maximum in eV for normal functions. step: bin interval in eV
codesearchnet
def union_of_bboxes(height, width, bboxes, erosion_rate=0.0, to_int=False): (x1, y1) = (width, height) (x2, y2) = (0, 0) for b in bboxes: (w, h) = ((b[2] - b[0]), (b[3] - b[1])) (lim_x1, lim_y1) = ((b[0] + (erosion_rate * w)), (b[1] + (erosion_rate * h))) (lim_x2, lim_y2) = ((b[2] - (erosion_rate * w)), (b[3] - (erosion_rate * h))) (x1, y1) = (np.min([x1, lim_x1]), np.min([y1, lim_y1])) (x2, y2) = (np.max([x2, lim_x2]), np.max([y2, lim_y2])) return (x1, y1, x2, y2)
Calculate union of bounding boxes. Args: height (float): Height of image or space. width (float): Width of image or space. bboxes (list): List like bounding boxes. Format is `[x_min, y_min, x_max, y_max]`. erosion_rate (float): How much each bounding box can be shrinked, useful for erosive cropping. Set this in range [0, 1]. 0 will not be erosive at all, 1.0 can make any bbox to lose its volume.
codesearchnet
def ConvertStringToFilename(name): return re.sub( r"\W", lambda x: "%%%02X" % ord(x.group(0)), name, flags=re.UNICODE).rstrip("/")
Converts an unicode string to a filesystem safe filename. For maximum compatibility we escape all chars which are not alphanumeric (in the unicode sense). Args: name: a unicode string that is part of a subject. Returns: A safe filename with escaped special chars.
juraj-google-style
def add(self, name, value, bitmask=DEFMASK): _add_enum_member(self._eid, name, value, bitmask)
Add an enum member Args: name: Name of the member value: value of the member bitmask: bitmask. Only use if enum is a bitfield.
juraj-google-style
def setMaximum(self, maximum): if (not isinstance(maximum, int)): raise TypeError('Argument is not of type int or long') self._maximum = maximum
setter to _maximum. Args: maximum (int or long): new _maximum value
codesearchnet
def Readdir(self, path, fh=None): if self.DataRefreshRequired(path): self._RunAndWaitForVFSFileUpdate(path) return super(GRRFuse, self).Readdir(path, fh=None)
Updates the directory listing from the client. Args: path: The path to the directory to update. Client is inferred from this. fh: A file handler. Not used. Returns: A list of filenames.
juraj-google-style
def load_weights_from_hdf5_group(f, model): if 'keras_version' in f.attrs: original_keras_version = f.attrs['keras_version'] if hasattr(original_keras_version, 'decode'): original_keras_version = original_keras_version.decode('utf8') else: original_keras_version = '1' if 'backend' in f.attrs: original_backend = f.attrs['backend'] if hasattr(original_backend, 'decode'): original_backend = original_backend.decode('utf8') else: original_backend = None filtered_layers = [] for layer in model.layers: weights = _legacy_weights(layer) if weights: filtered_layers.append(layer) layer_names = load_attributes_from_hdf5_group(f, 'layer_names') filtered_layer_names = [] for name in layer_names: g = f[name] weight_names = load_attributes_from_hdf5_group(g, 'weight_names') if weight_names: filtered_layer_names.append(name) layer_names = filtered_layer_names if len(layer_names) != len(filtered_layers): raise ValueError(f'Layer count mismatch when loading weights from file. Model expected {len(filtered_layers)} layers, found {len(layer_names)} saved layers.') for k, name in enumerate(layer_names): g = f[name] layer = filtered_layers[k] symbolic_weights = _legacy_weights(layer) weight_values = load_subset_weights_from_hdf5_group(g) if len(weight_values) != len(symbolic_weights): raise ValueError(f'Weight count mismatch for layer _set_weights(layer, symbolic_weights, weight_values, name=f'layer if 'top_level_model_weights' in f: symbolic_weights = list((v for v in model._trainable_variables + model._non_trainable_variables if v in model.weights)) weight_values = load_subset_weights_from_hdf5_group(f['top_level_model_weights']) if len(weight_values) != len(symbolic_weights): raise ValueError(f'Weight count mismatch for top-level weights when loading weights from file. Model expects {len(symbolic_weights)} top-level weight(s). Received {len(weight_values)} saved top-level weight(s)') _set_weights(model, symbolic_weights, weight_values, name='top-level model')
Implements topological (order-based) weight loading. Args: f: A pointer to a HDF5 group. model: Model instance. Raises: ValueError: in case of mismatch between provided layers and weights file.
github-repos
def build_subresource_uri(self, resource_id_or_uri=None, subresource_id_or_uri=None, subresource_path=''): if subresource_id_or_uri and "/" in subresource_id_or_uri: return subresource_id_or_uri else: if not resource_id_or_uri: raise exceptions.HPOneViewValueError(RESOURCE_ID_OR_URI_REQUIRED) resource_uri = self.build_uri(resource_id_or_uri) uri = "{}/{}/{}".format(resource_uri, subresource_path, str(subresource_id_or_uri or '')) uri = uri.replace(" if uri.endswith("/"): uri = uri[:-1] return uri
Helps to build a URI with resource path and its sub resource path. Args: resoure_id_or_uri: ID/URI of the main resource. subresource_id__or_uri: ID/URI of the sub resource. subresource_path: Sub resource path to be added with the URI. Returns: Returns URI
juraj-google-style
def GetNTFSFileEntryByPathSpec(self, path_spec): location = getattr(path_spec, 'location', None) mft_attribute = getattr(path_spec, 'mft_attribute', None) mft_entry = getattr(path_spec, 'mft_entry', None) if mft_attribute is not None and mft_entry is not None: fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry) elif location is not None: fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location) else: raise errors.PathSpecError( 'Path specification missing location and MFT entry.') return fsntfs_file_entry
Retrieves the NTFS file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: pyfsntfs.file_entry: NTFS file entry. Raises: PathSpecError: if the path specification is missing location and MFT entry.
juraj-google-style
def list_documents(self, limit=None): limit_str = '' if limit: try: limit_str = 'LIMIT {}'.format(int(limit)) except (TypeError, ValueError): pass query = ('SELECT identifier FROM identifier_index ' + limit_str) for row in self.backend.library.database.connection.execute(query).fetchall(): (yield row['identifier'])
Generates vids of all indexed identifiers. Args: limit (int, optional): If not empty, the maximum number of results to return Generates: str: vid of the document.
codesearchnet
def _get_oxm_field_int(self): if (self.oxm_class == OxmClass.OFPXMC_OPENFLOW_BASIC): return OxmOfbMatchField(self.oxm_field).value elif ((not isinstance(self.oxm_field, int)) or (self.oxm_field > 127)): raise ValueError('oxm_field above 127: "{self.oxm_field}".') return self.oxm_field
Return a valid integer value for oxm_field. Used while packing. Returns: int: valid oxm_field value. Raises: ValueError: If :attribute:`oxm_field` is bigger than 7 bits or should be :class:`OxmOfbMatchField` and the enum has no such value.
codesearchnet
def multiprocess_mapping(func, iterable): if os.name == 'nt': return list(map(func, iterable)) try: p = multiprocessing.Pool() return_data = list(p.imap(func, iterable)) p.close() p.join() return return_data except OSError: return list(map(func, iterable))
Multiprocess mapping the given function on the given iterable. This only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on single processing. Also, if we reach memory limits we fall back on single cpu processing. Args: func (func): the function to apply iterable (iterable): the iterable with the elements we want to apply the function on
juraj-google-style
def read_scan(self): def floatList(l): ' return a list of float from a list of string ' return [float(v) for v in l] scan_patt = re.compile('^\\sSummary of the potential surface scan:') optscan_patt = re.compile('^\\sSummary of Optimized Potential Surface Scan') data = {'energies': list(), 'coords': dict()} with zopen(self.filename, 'r') as f: line = f.readline() while (line != ''): if optscan_patt.match(line): f.readline() line = f.readline() endScan = False while (not endScan): data['energies'] += floatList(float_patt.findall(line)) line = f.readline() while (not re.search('(^\\s+(\\d+)|^\\s-+)', line)): icname = line.split()[0].strip() if (icname in data['coords']): data['coords'][icname] += floatList(float_patt.findall(line)) else: data['coords'][icname] = floatList(float_patt.findall(line)) line = f.readline() if re.search('^\\s-+', line): endScan = True else: line = f.readline() elif scan_patt.match(line): line = f.readline() data['coords'] = {icname: list() for icname in line.split()[1:(- 1)]} f.readline() line = f.readline() while (not re.search('^\\s-+', line)): values = floatList(line.split()) data['energies'].append(values[(- 1)]) for (i, icname) in enumerate(data['coords']): data['coords'][icname].append(values[(i + 1)]) line = f.readline() else: line = f.readline() return data
Read a potential energy surface from a gaussian scan calculation. Returns: A dict: {"energies": [ values ], "coords": {"d1": [ values ], "A2", [ values ], ... }} "energies" are the energies of all points of the potential energy surface. "coords" are the internal coordinates used to compute the potential energy surface and the internal coordinates optimized, labelled by their name as defined in the calculation.
codesearchnet
def _mark_maybe_missing_members(self, values): values = list(values) seen = set() while values: v = values.pop(0) if v not in seen: seen.add(v) if isinstance(v, abstract.SimpleValue): v.maybe_missing_members = True for child in v.instance_type_parameters.values(): values.extend(child.data)
Set maybe_missing_members to True on these values and their type params. Args: values: A list of BaseValue objects. On every instance among the values, recursively set maybe_missing_members to True on the instance and its type parameters.
github-repos
def _get_job_metadata(provider, user_id, job_name, script, task_ids, user_project, unique_job_id): create_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal()) user_id = (user_id or dsub_util.get_os_user()) job_metadata = provider.prepare_job_metadata(script.name, job_name, user_id, create_time) if unique_job_id: job_metadata['job-id'] = uuid.uuid4().hex job_metadata['create-time'] = create_time job_metadata['script'] = script job_metadata['user-project'] = user_project if task_ids: job_metadata['task-ids'] = dsub_util.compact_interval_string(list(task_ids)) return job_metadata
Allow provider to extract job-specific metadata from command-line args. Args: provider: job service provider user_id: user submitting the job job_name: name for the job script: the script to run task_ids: a set of the task-ids for all tasks in the job user_project: name of the project to be billed for the request unique_job_id: generate a unique job id Returns: A dictionary of job-specific metadata (such as job id, name, etc.)
codesearchnet
def register_site(self): if self.oxd_id: logger.info('Client is already registered. ID: %s', self.oxd_id) return self.oxd_id params = {'authorization_redirect_uri': self.authorization_redirect_uri, 'oxd_rp_programming_language': 'python'} for op in self.opt_params: if self.config.get('client', op): params[op] = self.config.get('client', op) for olp in self.opt_list_params: if self.config.get('client', olp): params[olp] = self.config.get('client', olp).split(',') logger.debug('Sending command `register_site` with params %s', params) response = self.msgr.request('register_site', **params) logger.debug('Received response: %s', response) if (response['status'] == 'error'): raise OxdServerError(response['data']) self.oxd_id = response['data']['oxd_id'] self.config.set('oxd', 'id', self.oxd_id) logger.info('Site registration successful. Oxd ID: %s', self.oxd_id) return self.oxd_id
Function to register the site and generate a unique ID for the site Returns: **string:** The ID of the site (also called client id) if the registration is successful Raises: **OxdServerError:** If the site registration fails.
codesearchnet
def get_morph_files(directory): lsdir = (os.path.join(directory, m) for m in os.listdir(directory)) return list(filter(_is_morphology_file, lsdir))
Get a list of all morphology files in a directory Returns: list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)
codesearchnet
def log(cls, event=None, actor=None, data=None): from cloud_inquisitor.log import auditlog auditlog(event=event, actor=actor, data=data)
Generate and insert a new event Args: event (str): Action performed actor (str): Actor (user or subsystem) triggering the event data (dict): Any extra data necessary for describing the event Returns: `None`
juraj-google-style
def basis_state(str_state, num): n = int(str_state, 2) if (num >= len(str_state)): state = np.zeros((1 << num), dtype=complex) state[n] = 1 return state else: raise QiskitError('size of bitstring is greater than num.')
Return a basis state ndarray. Args: str_state (string): a string representing the state. num (int): the number of qubits Returns: ndarray: state(2**num) a quantum state with basis basis state. Raises: QiskitError: if the dimensions is wrong
codesearchnet
def from_tuples(year_month_day_tuples, validate=True): years, months, days = ([], [], []) for t in year_month_day_tuples: years.append(t[0]) months.append(t[1]) days.append(t[2]) years = tf.constant(years, dtype=tf.int32) months = tf.constant(months, dtype=tf.int32) days = tf.constant(days, dtype=tf.int32) return from_year_month_day(years, months, days, validate)
Creates DateTensor from a sequence of year-month-day Tuples. Args: year_month_day_tuples: Sequence of (year, month, day) Tuples. Months are 1-based; constants from Months enum can be used instead of ints. Days are also 1-based. validate: Whether to validate the dates. Returns: DateTensor object. #### Example ```python date_tensor = tff.datetime.dates_from_tuples([(2015, 4, 15), (2017, 12, 30)]) ```
github-repos
def GetArtifactsForCollection(os_name, artifact_list): artifact_arranger = ArtifactArranger(os_name, artifact_list) artifact_names = artifact_arranger.GetArtifactsInProperOrder() return artifact_names
Wrapper for the ArtifactArranger. Extend the artifact list by dependencies and sort the artifacts to resolve the dependencies. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names. Returns: A list of artifacts such that if they are collected in the given order their dependencies are resolved.
juraj-google-style
def get_variable(self, feature_column, name): if name in self._cols_to_vars_map[feature_column]: return self._cols_to_vars_map[feature_column][name] raise ValueError('Variable does not exist.')
Returns an existing variable. Args: feature_column: A `FeatureColumn` object this variable corresponds to. name: variable name.
github-repos
def get_vms(self, vm_names=None): if not vm_names: return self._vms.copy() missing_vms = [] vms = {} for name in vm_names: try: vms[name] = self._vms[name] except KeyError: missing_vms.append(name) if missing_vms: raise utils.LagoUserException( 'The following vms do not exist: \n{}'.format( '\n'.join(missing_vms) ) ) return vms
Returns the vm objects associated with vm_names if vm_names is None, return all the vms in the prefix Args: vm_names (list of str): The names of the requested vms Returns dict: Which contains the requested vm objects indexed by name Raises: utils.LagoUserException: If a vm name doesn't exist
juraj-google-style
def pack_tangents(tensors): return TangentInfo(*pywrap_tfe.TFE_Py_PackJVPs(tensors))
Packs forward accumulator state into a TangentInfo tuple. Args: tensors: A flat list of Tensors to pack forward accumulator state for. Returns: A tuple of (indices, tangents): indices: A sequence of sequences of two-element tuples. Each forward accumulator is represented as a sequence of tuples with (primal_index, jvp_index). Both integers index into the concatenated `tensors + jvps` array. tangents: A flat list of Tensors. Best interpreted as a sequence to be appended to `tensors`.
github-repos
def _SetupDatabase(host=None, port=None, user=None, password=None, database=None, client_key_path=None, client_cert_path=None, ca_cert_path=None): with contextlib.closing( _Connect( host=host, port=port, user=user, password=password, database=None, client_key_path=client_key_path, client_cert_path=client_cert_path, ca_cert_path=ca_cert_path)) as conn: with contextlib.closing(conn.cursor()) as cursor: try: cursor.execute(CREATE_DATABASE_QUERY.format(database)) except MySQLdb.MySQLError as e: if e.args[0] != mysql_error_constants.DB_CREATE_EXISTS: raise cursor.execute("USE {}".format(database)) _CheckCollation(cursor) def _MigrationConnect(): return _Connect( host=host, port=port, user=user, password=password, database=database, client_key_path=client_key_path, client_cert_path=client_cert_path, ca_cert_path=ca_cert_path) mysql_migration.ProcessMigrations(_MigrationConnect, config.CONFIG["Mysql.migrations_dir"])
Connect to the given MySQL host and create a utf8mb4_unicode_ci database. Args: host: The hostname to connect to. port: The port to connect to. user: The username to connect as. password: The password to connect with. database: The database name to create. client_key_path: The path of the client private key file. client_cert_path: The path of the client public key certificate file. ca_cert_path: The path of the Certificate Authority (CA) certificate file.
juraj-google-style
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor
github-repos
def serial_wire_viewer(jlink_serial, device): buf = StringIO.StringIO() jlink = pylink.JLink(log=buf.write, detailed_log=buf.write) jlink.open(serial_no=jlink_serial) jlink.set_tif(pylink.enums.JLinkInterfaces.SWD) jlink.connect(device, verbose=True) jlink.coresight_configure() jlink.set_reset_strategy(pylink.enums.JLinkResetStrategyCortexM3.RESETPIN) jlink.reset() jlink.halt() sys.stdout.write('Serial Wire Viewer\n') sys.stdout.write('Press Ctrl-C to Exit\n') sys.stdout.write('Reading data from port 0:\n\n') jlink.reset(ms=10, halt=False) try: while True: if jlink.register_read(0x0) != 0x05: continue offset = jlink.register_read(0x1) handle, ptr, num_bytes = jlink.memory_read32(offset, 3) read = ''.join(map(chr, jlink.memory_read8(ptr, num_bytes))) if num_bytes == 0: time.sleep(1) continue jlink.register_write(0x0, 0) jlink.step(thumb=True) jlink.restart(2, skip_breakpoints=True) sys.stdout.write(read) sys.stdout.flush() except KeyboardInterrupt: pass sys.stdout.write('\n') return 0
Implements a Serial Wire Viewer (SWV). A Serial Wire Viewer (SWV) allows us implement real-time logging of output from a connected device over Serial Wire Output (SWO). Args: jlink_serial (str): the J-Link serial number device (str): the target CPU Returns: Always returns ``0``. Raises: JLinkException: on error
juraj-google-style
def depricated_name(newmethod): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) warnings.warn( "Function {} is depricated, please use {} instead.".format(func.__name__, newmethod), category=DeprecationWarning, stacklevel=2 ) warnings.simplefilter('default', DeprecationWarning) return func(*args, **kwargs) return wrapper return decorator
Decorator for warning user of depricated functions before use. Args: newmethod (str): Name of method to use instead.
juraj-google-style
def CheckCommaSpacing(filename, clean_lines, linenum, error): raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] if (Search(',[^,\\s]', ReplaceAll('\\boperator\\s*,\\s*\\(', 'F(', line)) and Search(',[^,\\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') if Search(';[^\\s};\\\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;')
Checks for horizontal spacing near commas and semicolons. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def match_global_phase(a: np.ndarray, b: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]: if (a.shape != b.shape): return (a, b) k = max(np.ndindex(*a.shape), key=(lambda t: abs(b[t]))) def dephase(v): r = np.real(v) i = np.imag(v) if (i == 0): return ((- 1) if (r < 0) else 1) if (r == 0): return (1j if (i < 0) else (- 1j)) return np.exp(((- 1j) * np.arctan2(i, r))) return ((a * dephase(a[k])), (b * dephase(b[k])))
Phases the given matrices so that they agree on the phase of one entry. To maximize precision, the position with the largest entry from one of the matrices is used when attempting to compute the phase difference between the two matrices. Args: a: A numpy array. b: Another numpy array. Returns: A tuple (a', b') where a' == b' implies a == b*exp(i t) for some t.
codesearchnet
def _set_checkpoint_initializer(variable, ckpt_file, tensor_name, slice_spec, name='checkpoint_initializer'): base_type = variable.dtype.base_dtype with ops.device(variable.device), ops.device('/cpu:0'): restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0] names_to_saveables = saveable_object_util.op_list_to_dict([variable]) saveable_objects = [] for name, op in names_to_saveables.items(): for s in saveable_object_util.saveable_objects_for_op(op, name): saveable_objects.append(s) assert len(saveable_objects) == 1 init_op = saveable_objects[0].restore([restore_op], restored_shapes=None) variable._initializer_op = init_op restore_op.set_shape(variable.shape) variable._initial_value = restore_op
Overrides given variable's initialization op. Sets variable initializer to assign op that initializes variable from tensor's value in the checkpoint. Args: variable: `tf.Variable` object. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. slice_spec: Slice specification for loading partitioned tensors. name: Name of the operation.
github-repos
def save(self, items): rows = [] indx = self.indx size = 0 tick = s_common.now() for item in items: byts = s_msgpack.en(item) size += len(byts) lkey = s_common.int64en(indx) indx += 1 rows.append((lkey, byts)) self.slab.putmulti(rows, append=True, db=self.db) took = s_common.now() - tick origindx = self.indx self.indx = indx return {'indx': indx, 'size': size, 'count': len(items), 'time': tick, 'took': took} return origindx
Save a series of items to a sequence. Args: items (tuple): The series of items to save into the sequence. Returns: The index of the first item
juraj-google-style
def healthy_services(self, role=None): try: query = self.rr.table(self.table) if role: query = query.get_all(role, index='role') query = query.filter( lambda svc: r.now().sub(svc["last_heartbeat"]) < svc["ttl"] ).order_by("load") result = query.run() return result except r.ReqlNonExistenceError: return []
Look up healthy services in the registry. A service is considered healthy if its 'last_heartbeat' was less than 'ttl' seconds ago Args: role (str, optional): role name Returns: If `role` is supplied, returns list of healthy services for the given role, otherwise returns list of all healthy services. May return an empty list.
juraj-google-style
def parse_uniprot_txt_file(infile): uniprot_metadata_dict = {} metadata = old_parse_uniprot_txt_file(infile) metadata_keys = list(metadata.keys()) if metadata_keys: metadata_key = metadata_keys[0] else: return uniprot_metadata_dict uniprot_metadata_dict['seq_len'] = len(str(metadata[metadata_key]['sequence'])) uniprot_metadata_dict['reviewed'] = metadata[metadata_key]['is_reviewed'] uniprot_metadata_dict['seq_version'] = metadata[metadata_key]['sequence_version'] uniprot_metadata_dict['entry_version'] = metadata[metadata_key]['entry_version'] if 'gene' in metadata[metadata_key]: uniprot_metadata_dict['gene_name'] = metadata[metadata_key]['gene'] if 'description' in metadata[metadata_key]: uniprot_metadata_dict['description'] = metadata[metadata_key]['description'] if 'refseq' in metadata[metadata_key]: uniprot_metadata_dict['refseq'] = metadata[metadata_key]['refseq'] if 'kegg' in metadata[metadata_key]: uniprot_metadata_dict['kegg'] = metadata[metadata_key]['kegg'] if 'ec' in metadata[metadata_key]: uniprot_metadata_dict['ec_number'] = metadata[metadata_key]['ec'] if 'pfam' in metadata[metadata_key]: uniprot_metadata_dict['pfam'] = metadata[metadata_key]['pfam'] if 'pdbs' in metadata[metadata_key]: uniprot_metadata_dict['pdbs'] = list(set(metadata[metadata_key]['pdbs'])) return uniprot_metadata_dict
Parse a raw UniProt metadata file and return a dictionary. Args: infile: Path to metadata file Returns: dict: Metadata dictionary
juraj-google-style
def build_twisted_request(self, method, url, extra_headers={}, body_producer=None, full_url=False): uri = (url if full_url else self._url(url)) raw_headers = self.get_headers() if extra_headers: raw_headers.update(extra_headers) headers = http_headers.Headers() for header in raw_headers: headers.addRawHeader(header, raw_headers[header]) agent = client.Agent(reactor) request = agent.request(method, uri, headers, body_producer) return (reactor, request)
Build a request for twisted Args: method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None url (str): Destination URL (full, or relative) Kwargs: extra_headers (dict): Headers (override default connection headers, if any) body_producer (:class:`twisted.web.iweb.IBodyProducer`): Object producing request body full_url (bool): If False, URL is relative Returns: tuple. Tuple with two elements: reactor, and request
codesearchnet
def assert_keys_exist(self, caller, *keys): assert keys, '*keys parameter must be specified.' for key in keys: self.assert_key_exists(key, caller)
Assert that context contains keys. Args: keys: validates that these keys exists in context caller: string. calling function or module name - this used to construct error messages Raises: KeyNotInContextError: When key doesn't exist in context.
codesearchnet
def read_parquet(path, engine="auto", columns=None, **kwargs): return DataFrame( query_compiler=BaseFactory.read_parquet( path=path, columns=columns, engine=engine, **kwargs ) )
Load a parquet object from the file path, returning a DataFrame. Args: path: The filepath of the parquet file. We only support local files for now. engine: This argument doesn't do anything for now. kwargs: Pass into parquet's read_pandas function.
juraj-google-style
def get_neighbors_of_site_with_index(struct, n, approach='min_dist', delta=0.1, cutoff=10.0): if (approach == 'min_dist'): return MinimumDistanceNN(tol=delta, cutoff=cutoff).get_nn(struct, n) elif (approach == 'voronoi'): return VoronoiNN(tol=delta, cutoff=cutoff).get_nn(struct, n) elif (approach == 'min_OKeeffe'): return MinimumOKeeffeNN(tol=delta, cutoff=cutoff).get_nn(struct, n) elif (approach == 'min_VIRE'): return MinimumVIRENN(tol=delta, cutoff=cutoff).get_nn(struct, n) else: raise RuntimeError('unsupported neighbor-finding method ({}).'.format(approach))
Returns the neighbors of a given site using a specific neighbor-finding method. Args: struct (Structure): input structure. n (int): index of site in Structure object for which motif type is to be determined. approach (str): type of neighbor-finding approach, where "min_dist" will use the MinimumDistanceNN class, "voronoi" the VoronoiNN class, "min_OKeeffe" the MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class. delta (float): tolerance involved in neighbor finding. cutoff (float): (large) radius to find tentative neighbors. Returns: neighbor sites.
codesearchnet
def validate(self, read_tuple_name): if (reg_lrn.match(read_tuple_name) is None): self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_read_tuple_name_structure', message="'{}' is not matched".format(reg_lrn)) else: parts = read_tuple_name.split('__') if (reg_prefix_part.match(parts[0]) is None): self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_prefix_part', message="'{}' is not matched".format(reg_prefix_part)) if (reg_id_part.match(parts[1]) is None): self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_id_part', message="'{}' is not matched".format(reg_id_part)) if (reg_segmental_part.match(parts[2]) is None): self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_segmental_part', message="'{}' is not matched".format(reg_segmental_part)) if (reg_suffix_part.match(parts[3]) is None): self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_suffix_part', message="'{}' is not matched".format(reg_suffix_part)) if (not self.rnf_profile.check(read_tuple_name)): self.report_error(read_tuple_name=read_tuple_name, error_name='wrong_profile', message='Read has a wrong profile (wrong widths). It should be: {} but it is: {}.'.format(self.rnf_profile, rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name)), warning=True)
Check RNF validity of a read tuple. Args: read_tuple_name (str): Read tuple name to be checked.s
codesearchnet
def get_associated_uplink_groups(self): uri = '{}/associatedUplinkGroups'.format(self.data['uri']) return self._helper.do_get(uri)
Gets the uplink sets which are using an Ethernet network. Returns: list: URIs of the associated uplink sets.
codesearchnet
def _delocalize_logging_command(self, logging_path, user_project): logging_prefix = os.path.splitext(logging_path.uri)[0] if logging_path.file_provider == job_model.P_LOCAL: mkdir_cmd = 'mkdir -p "%s"\n' % os.path.dirname(logging_prefix) cp_cmd = 'cp' elif logging_path.file_provider == job_model.P_GCS: mkdir_cmd = '' if user_project: cp_cmd = 'gsutil -u {} -mq cp'.format(user_project) else: cp_cmd = 'gsutil -mq cp' else: assert False copy_logs_cmd = textwrap.dedent().format( cp_cmd=cp_cmd, prefix=logging_prefix) body = textwrap.dedent().format( mkdir_cmd=mkdir_cmd, copy_logs_cmd=copy_logs_cmd) return body
Returns a command to delocalize logs. Args: logging_path: location of log files. user_project: name of the project to be billed for the request. Returns: eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12'
juraj-google-style
def _gen_indicator_method(self, name, custom_class, value_count): method_name = name.replace(' ', '_').lower() def method_1(value1, xid, **kwargs): indicator_obj = custom_class(value1, xid, **kwargs) return self._indicator(indicator_obj) def method_2(value1, value2, xid, **kwargs): indicator_obj = custom_class(value1, value2, xid, **kwargs) return self._indicator(indicator_obj) def method_3(value1, value2, value3, xid, **kwargs): indicator_obj = custom_class(value1, value2, value3, xid, **kwargs) return self._indicator(indicator_obj) method = locals()['method_{}'.format(value_count)] setattr(self, method_name, method)
Dynamically generate custom Indicator methods. Args: name (str): The name of the method. custom_class (object): The class to add. value_count (int): The number of value parameters to support.
juraj-google-style
def _determine_trace_and_create_report(self, graph, ops_in_exec_path, graph_summary_tag): self._check_trace_files() graph_order = tensor_tracer_report.sort_tensors_and_ops(graph) tensor_trace_points = graph.get_collection(_TENSOR_TRACER_COLLECTION) report_handler = tensor_tracer_report.TTReportHandle() traced_tensors = self._determine_and_instrument_traced_tensors(graph_order, ops_in_exec_path, tensor_trace_points, report_handler) logging.info('TensorTracer is tracing %d tensors.', len(traced_tensors)) if traced_tensors and tensor_tracer_flags.TT_CHECK_FILTER.value: raise RuntimeError('Verify ops being traced by tensor tracer.') tensor_trace_order = tensor_tracer_report.TensorTraceOrder(graph_order, traced_tensors) num_signatures = self._num_signature_dimensions() if num_signatures and self._use_tensor_values_cache(): if self._use_temp_cache(): self._create_temp_cache(len(traced_tensors), num_signatures, graph) else: self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph, [len(traced_tensors), num_signatures]) if self._parameters.trace_mode in tensor_tracer_flags.TRACE_MODE_HISTORY: self._create_or_get_tensor_history_values_cache(_TT_SUMMARY_TAG, graph, [len(traced_tensors), num_signatures]) if self._parameters.trace_mode in (tensor_tracer_flags.TRACE_MODE_SUMMARY, tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY): self._report_proto = report_handler.create_report_proto(self._tt_config, self._parameters, tensor_trace_order, tensor_trace_points, self._signature_types()) if self._parameters.use_fingerprint_subdir: self._parameters.trace_dir = os.path.join(self._parameters.trace_dir, self._report_proto.fingerprint) logging.info('TensorTracer updating trace_dir to %s', self._parameters.trace_dir) self._report_proto_path = report_handler.report_proto_path(self._parameters.trace_dir, graph_summary_tag) if self._parameters.report_file_path != _SKIP_REPORT_FILE: report_handler.write_report_proto(self._report_proto_path, self._report_proto, self._parameters) elif self._parameters.trace_mode not in tensor_tracer_flags.TRACE_MODE_HISTORY: report_handler.create_report(self._tt_config, self._parameters, tensor_trace_order, tensor_trace_points) return tensor_trace_order
Work needs to be done prior to TPU or CPU tracing. Args: graph: tf.graph ops_in_exec_path: Set of operations in the execution path. graph_summary_tag: the summary tag name for the given graph. Returns: An instance of tensor_tracer_report.TensorTraceOrder, containing list of tensors to be traced with their topological order information. Raises: RuntimeError: If opname filtering is incorrectly set.
github-repos
def slice_inputs(self, indices_dataset, inputs): flat_inputs = nest.flatten(inputs) def dynamic_shape_like(t): shape = list(t.shape) shape[0] = None return tuple(shape) flat_dtypes = [inp.dtype for inp in flat_inputs] contiguous = True if self._shuffle and self._shuffle != 'batch': contiguous = False def grab_batch(indices): def py_method(ind): def slice_array(data): return training_utils.slice_arrays(data, ind.numpy(), contiguous=contiguous) return [slice_array(inp) for inp in flat_inputs] flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes) for v, original_inp in zip(flat_out, flat_inputs): v.set_shape(dynamic_shape_like(original_inp)) return nest.pack_sequence_as(inputs, flat_out) dataset = indices_dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE) return dataset
Slice inputs into a Dataset of batches. Given a Dataset of batch indices and the unsliced inputs, this step slices the inputs in a parallelized fashion and produces a dataset of input batches. Args: indices_dataset: A Dataset of batched indices inputs: A python data structure that contains the inputs, targets, and possibly sample weights. Returns: A Dataset of input batches matching the batch indices.
github-repos
def convert_variables_to_constants(sess, input_graph_def, output_node_names, variable_names_whitelist=None, variable_names_blacklist=None): ret = convert_variables_to_constants_from_session_graph(session=sess, graph_def=input_graph_def, output_node_names=output_node_names, variable_names_allowlist=variable_names_whitelist, variable_names_denylist=variable_names_blacklist) return ret
Replaces all the variables in a graph with constants of the same values. If you have a trained graph containing Variable ops, it can be convenient to convert them all to Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. Args: sess: Active TensorFlow session containing the variables. input_graph_def: GraphDef object holding the network. output_node_names: List of name strings for the result nodes of the graph. variable_names_whitelist: The set of variable names to convert (by default, all variables are converted). variable_names_blacklist: The set of variable names to omit converting to constants. Returns: GraphDef containing a simplified version of the original. Raises: RuntimeError: if a DT_RESOURCE op is found whose ancestor Variables are both denylisted AND whitelisted for freezing.
github-repos
def dfa(self, ttab: TransitionTable, init: int = 0) -> int: state = init while True: disp = ttab[state] ch = self.peek() state = disp.get(ch, disp[""])() if state < 0: return state self.offset += 1
Run a DFA and return the final (negative) state. Args: ttab: Transition table (with possible side-effects). init: Initial state. Raises: EndOfInput: If past the end of `self.input`.
juraj-google-style
def waitForEvent(self, event_name, predicate, timeout=None): if timeout is None: timeout = self.default_timeout_sec deadline = time.perf_counter() + timeout while time.perf_counter() <= deadline: single_rpc_timeout = deadline - time.perf_counter() if single_rpc_timeout < 0: break single_rpc_timeout = min(single_rpc_timeout, self.rpc_max_timeout_sec) try: event = self.waitAndGet(event_name, single_rpc_timeout) except errors.CallbackHandlerTimeoutError: break if predicate(event): return event raise errors.CallbackHandlerTimeoutError(self._device, f'Timed out after {timeout}s waiting for an "{event_name}" event that satisfies the predicate "{predicate.__name__}".')
Waits for an event of the specific name that satisfies the predicate. This call will block until the expected event has been received or time out. The predicate function defines the condition the event is expected to satisfy. It takes an event and returns True if the condition is satisfied, False otherwise. Note all events of the same name that are received but don't satisfy the predicate will be discarded and not be available for further consumption. Args: event_name: str, the name of the event to wait for. predicate: function, a function that takes an event (dictionary) and returns a bool. timeout: float, the number of seconds to wait before giving up. If None, it will be set to self.default_timeout_sec. Returns: dictionary, the event that satisfies the predicate if received. Raises: errors.CallbackHandlerTimeoutError: raised if no event that satisfies the predicate is received after timeout seconds.
github-repos
def check(self, dsm, simplicity_factor=2, **kwargs): economy_of_mechanism = False message = '' data = dsm.data categories = dsm.categories dsm_size = dsm.size[0] if not categories: categories = ['appmodule'] * dsm_size dependency_number = 0 for i in range(0, dsm_size): for j in range(0, dsm_size): if (categories[i] not in ('framework', 'corelib') and categories[j] not in ('framework', 'corelib') and data[i][j] > 0): dependency_number += 1 if dependency_number < dsm_size * simplicity_factor: economy_of_mechanism = True else: message = ' '.join([ 'Number of dependencies (%s)' % dependency_number, '> number of rows (%s)' % dsm_size, '* simplicity factor (%s) = %s' % ( simplicity_factor, dsm_size * simplicity_factor)]) return economy_of_mechanism, message
Check economy of mechanism. As first abstraction, number of dependencies between two modules < 2 * the number of modules (dependencies to the framework are NOT considered). Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. simplicity_factor (int): simplicity factor. Returns: bool: True if economic, else False
juraj-google-style
def configure_profile(msg_type, profile_name, data, auth): with jsonconfig.Config('messages', indent=4) as cfg: write_data(msg_type, profile_name, data, cfg) write_auth(msg_type, profile_name, auth, cfg) print((('[+] Configuration entry for <' + profile_name) + '> created.')) print(('[+] Configuration file location: ' + cfg.filename))
Create the profile entry. Args: :msg_type: (str) message type to create config entry. :profile_name: (str) name of the profile entry :data: (dict) dict values for the 'settings' :auth: (dict) auth parameters
codesearchnet
def post_process_segmentation(self, outputs: 'MaskFormerForInstanceSegmentationOutput', target_size: Optional[Tuple[int, int]]=None) -> 'torch.Tensor': warnings.warn('`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use `post_process_instance_segmentation`', FutureWarning) class_queries_logits = outputs.class_queries_logits masks_queries_logits = outputs.masks_queries_logits if target_size is not None: masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=target_size, mode='bilinear', align_corners=False) masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs) return segmentation
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. target_size (`Tuple[int, int]`, *optional*): If set, the `masks_queries_logits` will be resized to `target_size`. Returns: `torch.Tensor`: A tensor of shape (`batch_size, num_class_labels, height, width`).
github-repos
def find_in_coord_list(coord_list, coord, atol=1e-08): if (len(coord_list) == 0): return [] diff = (np.array(coord_list) - np.array(coord)[(None, :)]) return np.where(np.all((np.abs(diff) < atol), axis=1))[0]
Find the indices of matches of a particular coord in a coord_list. Args: coord_list: List of coords to test coord: Specific coordinates atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and array. Returns: Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
codesearchnet
def aerosol_optical_depth(self, value=0.999): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `aerosol_optical_depth`'.format(value)) self._aerosol_optical_depth = value
Corresponds to IDD Field `aerosol_optical_depth` Args: value (float): value for IDD Field `aerosol_optical_depth` Unit: thousandths Missing value: 0.999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def GetSectionByIndex(self, section_index): if (not self._is_parsed): self._Parse() self._is_parsed = True if ((section_index < 0) or (section_index >= len(self._sections))): return None return self._sections[section_index]
Retrieves a specific section based on the index. Args: section_index (int): index of the section. Returns: VolumeExtent: a volume extent or None if not available.
codesearchnet
def every_other(x, name=None): with tf.name_scope(name, 'every_other', [x]) as scope: x = tf.convert_to_tensor(x, name='x') return tf.reshape( tf.slice( tf.reshape(x, [-1, 2]), [0, 0], [-1, 1]), [-1], name=scope)
Drops every other value from the tensor and returns a 1D tensor. This is useful if you are running multiple inputs through a model tower before splitting them and you want to line it up with some other data. Args: x: the target tensor. name: the name for this op, defaults to every_other Returns: A tensorflow op.
juraj-google-style
def make_seeds(self, count=1): alg = self.algorithm if alg in (a.value for a in random_ops_util.Algorithm): keys = self._make_int64_keys(shape=[count]) zeros = array_ops.zeros_like(keys) return array_ops_stack.stack([keys, zeros]) else: raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))
Generates seeds for stateless random ops. For example: ```python seeds = get_global_generator().make_seeds(count=10) for i in range(10): seed = seeds[:, i] numbers = stateless_random_normal(shape=[2, 3], seed=seed) ... ``` Args: count: the number of seed pairs (note that stateless random ops need a pair of seeds to invoke). Returns: A tensor of shape [2, count] and dtype int64.
github-repos
def parallel(processor_list: Sequence[PartProcessor]) -> PartProcessor: if not processor_list: raise ValueError('processor_list is empty') return _ParallelPartProcessor(processor_list)
Create a sequence of part processors to be run in parallel. Args: processor_list: list of part processors. Returns: A processor consisting of the parallel run of all the processors in the list. The execution is sequential from the first processor to the last but parts are processed concurrently overall.
github-repos
def get_metrics_namespace(self) -> str: return 'RunInference'
Returns: A namespace for metrics collected by the RunInference transform.
github-repos
def product_category(request, category_id): PRODUCTS_FORM_PREFIX = 'products' VOUCHERS_FORM_PREFIX = 'vouchers' v = _handle_voucher(request, VOUCHERS_FORM_PREFIX) (voucher_form, voucher_handled) = v category_id = int(category_id) category = inventory.Category.objects.get(pk=category_id) with BatchController.batch(request.user): products = ProductController.available_products(request.user, category=category) if (not products): messages.warning(request, ('There are no products available from category: ' + category.name)) return redirect('dashboard') p = _handle_products(request, category, products, PRODUCTS_FORM_PREFIX) (products_form, discounts, products_handled) = p if (request.POST and (not voucher_handled) and (not products_form.errors)): if products_form.has_changed(): messages.success(request, 'Your reservations have been updated.') return redirect(review) data = {'category': category, 'discounts': discounts, 'form': products_form, 'voucher_form': voucher_form} return render(request, 'registrasion/product_category.html', data)
Form for selecting products from an individual product category. Arguments: category_id (castable to int): The id of the category to display. Returns: redirect or render: If the form has been sucessfully submitted, redirect to ``dashboard``. Otherwise, render ``registrasion/product_category.html`` with data:: { "category": category, # An inventory.Category for # category_id "discounts": discounts, # A list of # DiscountAndQuantity "form": products_form, # A form for selecting # products "voucher_form": voucher_form, # A form for entering a # voucher code }
codesearchnet
def generate_defect_structure(self, supercell=(1, 1, 1)): defect_structure = self.bulk_structure.copy() defect_structure.make_supercell(supercell) defect_properties = self.site.properties.copy() if ('velocities' in self.bulk_structure.site_properties) and \ 'velocities' not in defect_properties: if all( vel == self.bulk_structure.site_properties['velocities'][0] for vel in self.bulk_structure.site_properties['velocities']): defect_properties['velocities'] = self.bulk_structure.site_properties['velocities'][0] else: raise ValueError("No velocity property specified for defect site and " "bulk_structure velocities are not homogeneous. Please specify this " "property within the initialized defect_site object.") site_properties_for_fake_struct = {prop: [val] for prop,val in defect_properties.items()} struct_for_defect_site = Structure( self.bulk_structure.copy().lattice, [self.site.specie], [self.site.frac_coords], to_unit_cell=True, site_properties = site_properties_for_fake_struct) struct_for_defect_site.make_supercell(supercell) defect_site = struct_for_defect_site[0] poss_deflist = sorted( defect_structure.get_sites_in_sphere(defect_site.coords, 2, include_index=True), key=lambda x: x[1]) defindex = poss_deflist[0][2] subsite = defect_structure.pop(defindex) defect_structure.append(self.site.specie.symbol, subsite.coords, coords_are_cartesian=True, properties = defect_site.properties) defect_structure.set_charge(self.charge) return defect_structure
Returns Defective Substitution structure, decorated with charge Args: supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
juraj-google-style
def export(self, top=True): out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.holiday_name)) out.append(self._to_str(self.holiday_day)) return ",".join(out)
Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation
juraj-google-style
def on_connection_state_change(self, event_type, callback): listeners = self._connection_state_listeners.get(event_type, []) listeners.append(callback) self._connection_state_listeners[event_type] = listeners
Register a callback for a specific connection state change. Register a callback to be triggered when the connection changes to the specified state, signified by a ConnectionEvent. The callback must be a coroutine. Args: event_type (ConnectionEvent): the connection event to listen for callback (coroutine): a coroutine to call on the event occurrence
codesearchnet
def get_log(self): log_path = self.meta_data['logs_resource'] conn = Qubole.agent() r = conn.get_raw(log_path) return r.text
Fetches log for the command represented by this object Returns: The log as a string
codesearchnet
def reinforce_grid(self): for grid_district in self.mv_grid_districts(): grid_district.mv_grid.reinforce_grid() for lv_load_area in grid_district.lv_load_areas(): if not lv_load_area.is_aggregated: for lv_grid_district in lv_load_area.lv_grid_districts(): lv_grid_district.lv_grid.reinforce_grid()
Performs grid reinforcement measures for all MV and LV grids Args: Returns:
juraj-google-style
def setup_data_stream(self, connection_factory: Callable[([tuple], Connection)], data_stream_factory: Callable[([Connection], DataStream)]=DataStream) -> DataStream: (yield from self._control_stream.write_command(Command('TYPE', 'I'))) reply = (yield from self._control_stream.read_reply()) self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply) address = (yield from self.passive_mode()) connection = (yield from connection_factory(address)) connection.reset() (yield from connection.connect()) data_stream = data_stream_factory(connection) return data_stream
Create and setup a data stream. This function will set up passive and binary mode and handle connecting to the data connection. Args: connection_factory: A coroutine callback that returns a connection data_stream_factory: A callback that returns a data stream Coroutine. Returns: DataStream
codesearchnet
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: if input_data_format is None: input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4)) data_format = input_data_format if data_format is None else data_format if 'longest_edge' in size: size = get_resize_output_image_size(image, resolution_max_side=size['longest_edge'], input_data_format=input_data_format) elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("size must be a dictionary with key 'longest_edge' or 'height' and 'width'.") image_mode = None if image.ndim == 2 or image.shape[-1] == 1: image_mode = 'P' image = to_pil_image(image, image_mode=image_mode, input_data_format=input_data_format) resized_image = image.resize((size[1], size[0]), resample=resample) resized_image = np.array(resized_image) resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image resized_image = to_channel_dimension_format(resized_image, data_format, input_channel_dim=ChannelDimension.LAST) return resized_image
Resize an image. The longest edge of the image is resized to size["longest_edge"], with the shortest edge resized to keep the input aspect ratio. Can also be used with size["height"] and size["width"]. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use when resizing the image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the output image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred.
github-repos
def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=(- 0.05), half_kpts_first_relax=False, auto_continue=False): incar_update = {'ISTART': 1} if ediffg: incar_update['EDIFFG'] = ediffg settings_overide_1 = None settings_overide_2 = [{'dict': 'INCAR', 'action': {'_set': incar_update}}, {'file': 'CONTCAR', 'action': {'_file_copy': {'dest': 'POSCAR'}}}] if (half_kpts_first_relax and os.path.exists('KPOINTS') and os.path.exists('POSCAR')): kpts = Kpoints.from_file('KPOINTS') orig_kpts_dict = kpts.as_dict() kpts.kpts = np.round(np.maximum((np.array(kpts.kpts) / 2), 1)).astype(int).tolist() low_kpts_dict = kpts.as_dict() settings_overide_1 = [{'dict': 'KPOINTS', 'action': {'_set': low_kpts_dict}}] settings_overide_2.append({'dict': 'KPOINTS', 'action': {'_set': orig_kpts_dict}}) return [VaspJob(vasp_cmd, final=False, suffix='.relax1', auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_1), VaspJob(vasp_cmd, final=True, backup=False, suffix='.relax2', auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_2)]
Returns a list of two jobs corresponding to an AFLOW style double relaxation run. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] auto_npar (bool): Whether to automatically tune NPAR to be sqrt( number of cores) as recommended by VASP for DFT calculations. Generally, this results in significant speedups. Defaults to True. Set to False for HF, GW and RPA calculations. ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. Returns: List of two jobs corresponding to an AFLOW style run.
codesearchnet
def get_sketch(self, sketch_id): resource_url = '{0:s}/sketches/{1:d}/'.format(self.api_base_url, sketch_id) response = self.session.get(resource_url) response_dict = response.json() try: response_dict['objects'] except KeyError: raise ValueError('Sketch does not exist or you have no access') return response_dict
Get information on the specified sketch. Args: sketch_id (int): ID of sketch Returns: dict: Dictionary of sketch information Raises: ValueError: Sketch is inaccessible
juraj-google-style
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected, use_gpu): total_size_1 = 1 total_size_2 = 1 for s in tensor_in_sizes: total_size_1 *= s for s in filter_in_sizes: total_size_2 *= s x1 = [f * 1.0 for f in range(1, total_size_1 + 1)] x2 = [f * 1.0 for f in range(1, total_size_2 + 1)] with self.cached_session(use_gpu=use_gpu) as sess: t1 = constant_op.constant(x1, shape=tensor_in_sizes) t1.set_shape(tensor_in_sizes) t2 = constant_op.constant(x2, shape=filter_in_sizes) conv = nn_ops.depthwise_conv2d_native(t1, t2, strides=[1, stride, stride, 1], padding=padding) value = self.evaluate(conv) tf_logging.info('value = %r', value) self.assertArrayNear(expected, np.ravel(value), 1e-05) self.assertShapeEqual(value, conv)
Verifies the output values of the depthwise convolution function. Args: tensor_in_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols, input_depth, depth_multiplier]. stride: Stride. padding: Padding type. expected: An array containing the expected operation outputs. use_gpu: Whether to use GPU.
github-repos
class TrainState(train_state.TrainState): logits_fn: Callable = struct.field(pytree_node=False) loss_fn: Callable = struct.field(pytree_node=False)
Train state with an Optax optimizer. The two functions below differ depending on whether the task is classification or regression. Args: logits_fn: Applied to last layer to obtain the logits. loss_fn: Function to compute the loss.
github-repos
def filter_genes_and_strains(self, remove_genes_not_in_reference_model=True, remove_strains_with_no_orthology=True, remove_strains_with_no_differences=False, custom_keep_strains=None, custom_keep_genes=None): if (len(self.df_orthology_matrix) == 0): raise RuntimeError('Empty orthology matrix, please calculate first!') reference_strain_gene_ids = [x.id for x in self.reference_gempro.genes] initial_num_genes = len(reference_strain_gene_ids) initial_num_strains = len(self.strain_ids) to_remove_genes = [] if custom_keep_genes: to_remove_genes.extend([x for x in reference_strain_gene_ids if (x not in custom_keep_genes)]) if remove_genes_not_in_reference_model: to_remove_genes.extend([x for x in reference_strain_gene_ids if (x not in self.df_orthology_matrix.index.tolist())]) to_remove_genes = list(set(to_remove_genes)) if self.reference_gempro.model: cobra.manipulation.delete_model_genes(self.reference_gempro.model, to_remove_genes) else: for g_id in to_remove_genes: self.reference_gempro.genes.get_by_id(g_id).functional = False new_gene_subset = [x.id for x in self.reference_gempro.functional_genes] tmp_new_orthology_matrix = self.df_orthology_matrix[self.df_orthology_matrix.index.isin(new_gene_subset)] if (custom_keep_strains or remove_strains_with_no_orthology or remove_strains_with_no_differences): for strain_id in self.strain_ids: if custom_keep_strains: if (strain_id not in custom_keep_strains): self.strain_ids.remove(strain_id) continue if remove_strains_with_no_orthology: if (strain_id not in tmp_new_orthology_matrix.columns): self.strain_ids.remove(strain_id) log.info('{}: no orthologous genes found for this strain, removed from analysis.'.format(strain_id)) continue elif tmp_new_orthology_matrix[strain_id].isnull().all(): self.strain_ids.remove(strain_id) log.info('{}: no orthologous genes found for this strain, removed from analysis.'.format(strain_id)) continue if remove_strains_with_no_differences: not_in_strain = tmp_new_orthology_matrix[pd.isnull(tmp_new_orthology_matrix[strain_id])][strain_id].index.tolist() if (len(not_in_strain) == 0): self.strain_ids.remove(strain_id) log.info('{}: strain has no differences from the base, removed from analysis.') continue log.info('{} genes to be analyzed, originally {}'.format(len(self.reference_gempro.functional_genes), initial_num_genes)) log.info('{} strains to be analyzed, originally {}'.format(len(self.strain_ids), initial_num_strains))
Filters the analysis by keeping a subset of strains or genes based on certain criteria. Args: remove_genes_not_in_reference_model (bool): Remove genes from reference model not in orthology matrix remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model. Default is False because since orthology is found using a PID cutoff, all genes may be present but differences may be on the sequence level. custom_keep_genes (list): List of gene IDs to keep in analysis custom_keep_strains (list): List of strain IDs to keep in analysis
codesearchnet
def replace_batch_norm(model): for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = DabDetrFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device('meta'): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module)
Recursively replace all `torch.nn.BatchNorm2d` with `DabDetrFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model
github-repos
def get_event(self, event_name, event_history=None): if (event_history is None): event_history = (event_name + '_history') return self._db.rpoplpush(event_name, event_history)
Get an event from the database. Gets an event from the named event list removing the event and adding it to the event history. Args: event_name (str): Event list key. event_history (str, optional): Event history list. Returns: str: string representation of the event object
codesearchnet
def download_models(self, uniprot_acc, outdir='', force_rerun=False): downloaded = [] subset = self.get_models(uniprot_acc) for entry in subset: ident = '{}_{}_{}_{}'.format(uniprot_acc, entry['template'], entry['from'], entry['to']) outfile = op.join(outdir, (ident + '.pdb')) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): response = requests.get(entry['url']) if (response.status_code == 404): log.error('{}: 404 returned, no model available.'.format(ident)) else: with open(outfile, 'w') as f: f.write(response.text) log.debug('{}: downloaded homology model'.format(ident)) downloaded.append(outfile) else: downloaded.append(outfile) return downloaded
Download all models available for a UniProt accession number. Args: uniprot_acc (str): UniProt ACC/ID outdir (str): Path to output directory, uses working directory if not set force_rerun (bool): Force a redownload the models if they already exist Returns: list: Paths to the downloaded models
codesearchnet
def set_interface(self, vrf_name, interface, default=False, disable=False): cmds = [('interface %s' % interface)] cmds.append(self.command_builder('vrf forwarding', value=vrf_name, default=default, disable=disable)) return self.configure(cmds)
Adds a VRF to an interface Notes: Requires interface to be in routed mode. Must apply ip address after VRF has been applied. This feature can also be accessed through the interfaces api. Args: vrf_name (str): The VRF name to configure interface (str): The interface to add the VRF too default (bool): Set interface VRF forwarding to default disable (bool): Negate interface VRF forwarding Returns: True if the operation was successful otherwise False
codesearchnet
def get_sid_string(principal): if (principal is None): principal = 'NULL SID' try: return win32security.ConvertSidToStringSid(principal) except TypeError: principal = get_sid(principal) try: return win32security.ConvertSidToStringSid(principal) except pywintypes.error: log.exception('Invalid principal %s', principal) raise CommandExecutionError('Invalid principal {0}'.format(principal))
Converts a PySID object to a string SID. Args: principal(str): The principal to lookup the sid. Must be a PySID object. Returns: str: A string sid Usage: .. code-block:: python # Get a PySID object py_sid = salt.utils.win_dacl.get_sid('jsnuffy') # Get the string version of the SID salt.utils.win_dacl.get_sid_string(py_sid)
codesearchnet
def create_s3_event(app_name, env, region, bucket, triggers): session = boto3.Session(profile_name=env, region_name=region) s3_client = session.client('s3') lambda_alias_arn = get_lambda_alias_arn(app_name, env, region) LOG.debug("Lambda ARN for lambda function %s is %s.", app_name, lambda_alias_arn) LOG.debug("Creating S3 events for bucket %s", bucket) principal = 's3.amazonaws.com' statement_id = "{}_s3_{}".format(app_name, bucket).replace('.', '') source_arn = "arn:aws:s3:::{}".format(bucket) add_lambda_permissions( function=lambda_alias_arn, env=env, region=region, principal=principal, statement_id=statement_id, source_arn=source_arn) template_kwargs = {"lambda_arn": lambda_alias_arn, "triggers": triggers} config = get_template(template_file='infrastructure/lambda/s3_event.json.j2', **template_kwargs) s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=json.loads(config)) LOG.info("Created lambda %s S3 event on bucket %s", app_name, bucket)
Create S3 lambda events from triggers Args: app_name (str): name of the lambda function env (str): Environment/Account for lambda function region (str): AWS region of the lambda function triggers (list): List of triggers from the settings
juraj-google-style
def dump_tree(self, statement=None, indent_level=0): out = u'' indent = (u' ' * indent_level) if (statement is None): for root_statement in self.statements: out += self.dump_tree(root_statement, indent_level) else: out += ((indent + str(statement)) + u'\n') if (len(statement.children) > 0): for child in statement.children: out += self.dump_tree(child, indent_level=(indent_level + 4)) return out
Dump the AST for this parsed file. Args: statement (SensorGraphStatement): the statement to print if this function is called recursively. indent_level (int): The number of spaces to indent this statement. Used for recursively printing blocks of statements. Returns: str: The AST for this parsed sg file as a nested tree with one node per line and blocks indented.
codesearchnet
def slice(self, start, end): reverse = False if start > end: temp = start start = end end = temp reverse = True seg = self.copy() seg.points = seg.points[start:end+1] if reverse: seg.points = list(reversed(seg.points)) return seg
Creates a copy of the current segment between indexes. If end > start, points are reverted Args: start (int): Start index end (int): End index Returns: :obj:`Segment`
juraj-google-style
def create_bagit_stream(dir_name, payload_info_list): zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) _add_path(dir_name, payload_info_list) (payload_byte_count, payload_file_count) = _add_payload_files(zip_file, payload_info_list) tag_info_list = _add_tag_files(zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count) _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list) _add_tag_manifest_file(zip_file, dir_name, tag_info_list) return zip_file
Create a stream containing a BagIt zip archive. Args: dir_name : str The name of the root directory in the zip file, under which all the files are placed (avoids "zip bombs"). payload_info_list: list List of payload_info_dict, each dict describing a file. - keys: pid, filename, iter, checksum, checksum_algorithm - If the filename is None, the pid is used for the filename.
codesearchnet
def get_images_by_catid_and_aoi(self, catid, aoi_wkt): self.logger.debug('Retrieving IDAHO metadata') url = '%s/search' % self.base_url body = {"filters": ["catalogID = '%s'" % catid], "types": ["IDAHOImage"], "searchAreaWkt": aoi_wkt} r = self.gbdx_connection.post(url, data=json.dumps(body)) r.raise_for_status() if r.status_code == 200: results = r.json() numresults = len(results['results']) self.logger.debug('%s IDAHO images found associated with catid %s' % (numresults, catid)) return results
Retrieves the IDAHO image records associated with a given catid. Args: catid (str): The source catalog ID from the platform catalog. aoi_wkt (str): The well known text of the area of interest. Returns: results (json): The full catalog-search response for IDAHO images within the catID.
juraj-google-style
def needle_statistics_alignio(infile): alignments = list(AlignIO.parse(infile, "emboss")) if len(alignments) > 1: raise ValueError('Alignment file contains more than one pairwise alignment') alignment = alignments[0] with open(infile) as f: line = f.readline() for i in range(len(alignments)): while line.rstrip() != " line = f.readline() if not line: raise StopIteration while line[0] == " parts = line[1:].split(":", 1) key = parts[0].lower().strip() if key == 'identity': ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split() ident_num = int(ident_parse[0].split('/')[0]) ident_percent = float(ident_parse[1]) alignment.annotations['identity'] = ident_num alignment.annotations['percent_identity'] = ident_percent if key == 'similarity': sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split() sim_num = int(sim_parse[0].split('/')[0]) sim_percent = float(sim_parse[1]) alignment.annotations['similarity'] = sim_num alignment.annotations['percent_similarity'] = sim_percent if key == 'gaps': gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split() gap_num = int(gap_parse[0].split('/')[0]) gap_percent = float(gap_parse[1]) alignment.annotations['gaps'] = gap_num alignment.annotations['percent_gaps'] = gap_percent if key == 'score': score = float(parts[1].strip()) alignment.annotations['score'] = score line = f.readline() return alignment
Reads in a needle alignment file and returns an AlignIO object with annotations Args: infile (str): Alignment file name Returns: AlignIO: annotated AlignIO object
juraj-google-style
def success(channel, stats, name, platform, dp): datapacks = [("Platform", platform, False)] for stat in stats: if stat[0] in ("Duel 1v1", "Doubles 2v2", "Solo Standard 3v3", "Standard 3v3"): stat_name = "__" + stat[0] + "__" stat_value = "**" + stat[1] + "**" else: stat_name = stat[0] stat_value = stat[1] if stat[2]: stat_value += " *(Top " + stat[2] + "%)*" datapacks.append((stat_name, stat_value, True)) gui = ui_embed.UI( channel, "Rocket League Stats: {}".format(name), "*Stats obtained from [Rocket League Tracker Network](https: modulename=modulename, colour=0x0088FF, thumbnail=dp, datapacks=datapacks ) return gui
Creates an embed UI containing the Rocket League stats Args: channel (discord.Channel): The Discord channel to bind the embed to stats (tuple): Tuples of (field, value, percentile) name (str): The name of the player platform (str): The playfor to search on, can be 'steam', 'ps', or 'xbox' dp (str): URL to the player's dp Returns: (discord.Embed): The created embed
juraj-google-style
def _set_device(self, device) -> None: self._set_device_from_string(compat.as_str(_device_string(device)))
Set the device of this operation. Args: device: string or device.. The device to set.
github-repos
def create_image_uri(region, framework, instance_type, framework_version, py_version=None, account='520713654638', accelerator_type=None, optimized_families=None): optimized_families = (optimized_families or []) if (py_version and (py_version not in VALID_PY_VERSIONS)): raise ValueError('invalid py_version argument: {}'.format(py_version)) account = VALID_ACCOUNTS_BY_REGION.get(region, account) if instance_type.startswith('local'): device_type = ('cpu' if (instance_type == 'local') else 'gpu') elif (not instance_type.startswith('ml.')): raise ValueError('{} is not a valid SageMaker instance type. See: https: else: family = instance_type.split('.')[1] if (family in optimized_families): device_type = family elif (family[0] in ['g', 'p']): device_type = 'gpu' else: device_type = 'cpu' if py_version: tag = '{}-{}-{}'.format(framework_version, device_type, py_version) else: tag = '{}-{}'.format(framework_version, device_type) if _accelerator_type_valid_for_framework(framework=framework, accelerator_type=accelerator_type, optimized_families=optimized_families): framework += '-eia' return '{}/sagemaker-{}:{}'.format(get_ecr_image_uri_prefix(account, region), framework, tag)
Return the ECR URI of an image. Args: region (str): AWS region where the image is uploaded. framework (str): framework used by the image. instance_type (str): SageMaker instance type. Used to determine device type (cpu/gpu/family-specific optimized). framework_version (str): The version of the framework. py_version (str): Optional. Python version. If specified, should be one of 'py2' or 'py3'. If not specified, image uri will not include a python component. account (str): AWS account that contains the image. (default: '520713654638') accelerator_type (str): SageMaker Elastic Inference accelerator type. optimized_families (str): Instance families for which there exist specific optimized images. Returns: str: The appropriate image URI based on the given parameters.
codesearchnet
def load_variables(defines, config_file): if config_file is not None: with open(config_file, "r") as conf_file: variables = yaml.load(conf_file) else: variables = {} for define in defines: name, equ, value = define.partition('=') if equ != '=': print("Invalid variable definition") print("- expected name=value") print("- found: '%s'" % define) sys.exit(1) variables[name] = value return variables
Load all variables from cmdline args and/or a config file. Args: defines (list of str): A list of name=value pairs that define free variables. config_file (str): An optional path to a yaml config file that defines a single dict with name=value variable definitions.
juraj-google-style
def insert_before(self, value: Union[(RawValue, Value)], raw: bool=False) -> 'ArrayEntry': return ArrayEntry(self.index, self.before, self.after.cons(self.value), self._cook_value(value, raw), self.parinst, self.schema_node, datetime.now())
Insert a new entry before the receiver. Args: value: The value of the new entry. raw: Flag to be set if `value` is raw. Returns: An instance node of the new inserted entry.
codesearchnet
def get_all_dataset_names(configuration=None, **kwargs): dataset = Dataset(configuration=configuration) dataset['id'] = 'all dataset names' return dataset._write_to_hdx('list', kwargs, 'id')
Get all dataset names in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below limit (int): Number of rows to return. Defaults to all dataset names. offset (int): Offset in the complete result for where the set of returned dataset names should begin Returns: List[str]: list of all dataset names in HDX
codesearchnet
def _estimate_data_distribution(c, num_examples_per_class_seen): num_classes = num_examples_per_class_seen.get_shape()[0] num_examples_per_class_seen = math_ops.add(num_examples_per_class_seen, math_ops.reduce_sum(array_ops.one_hot(c, num_classes, dtype=dtypes.int64), 0)) init_prob_estimate = math_ops.truediv(num_examples_per_class_seen, math_ops.reduce_sum(num_examples_per_class_seen)) dist = math_ops.cast(init_prob_estimate, dtypes.float32) return (num_examples_per_class_seen, dist)
Estimate data distribution as labels are seen. Args: c: The class labels. Type `int32`, shape `[batch_size]`. num_examples_per_class_seen: Type `int64`, shape `[num_classes]`, containing counts. Returns: num_examples_per_lass_seen: Updated counts. Type `int64`, shape `[num_classes]`. dist: The updated distribution. Type `float32`, shape `[num_classes]`.
github-repos
def cancel_id(cls, id): conn = Qubole.agent() data = {'status': 'kill'} return conn.put(cls.element_path(id), data)
Cancels command denoted by this id Args: `id`: command id
codesearchnet
def render_registered(url_id, remote_info): return template(read_index_template(), registered=True, url=remote_info['url'], seeder_data=json.dumps(remote_info), url_id=url_id)
Render template file for the registered user, which has some of the values prefilled. Args: url_id (str): Seeder URL id. remote_info (dict): Informations read from Seeder. Returns: str: Template filled with data.
codesearchnet
def forward_event_shape_tensor(self, input_shape, name='forward_event_shape_tensor'): with self._name_scope(name, [input_shape]): input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32, name='input_shape') return self._forward_event_shape_tensor(input_shape)
Shape of a single sample from a single batch as an `int32` 1D `Tensor`. Args: input_shape: `Tensor`, `int32` vector indicating event-portion shape passed into `forward` function. name: name to give to the op Returns: forward_event_shape_tensor: `Tensor`, `int32` vector indicating event-portion shape after applying `forward`.
github-repos
def suggestions(self, word): suggestions = set(self._misspelling_dict.get(word, [])).union(set(self._misspelling_dict.get(word.lower(), []))) return sorted([same_case(source=word, destination=w) for w in suggestions])
Returns a list of suggestions for a misspelled word. Args: word: The word to check. Returns: List of zero or more suggested replacements for word.
codesearchnet
def set_napp(self, user, napp, version=None): self.user = user self.napp = napp self.version = version or 'latest'
Set info about NApp. Args: user (str): NApps Server username. napp (str): NApp name. version (str): NApp version.
juraj-google-style
def CheckEmptyBlockBody(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue')
Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
juraj-google-style
def up(self) -> 'InstanceNode': ts = max(self.timestamp, self.parinst.timestamp) return self.parinst._copy(self._zip(), ts)
Return an instance node corresponding to the receiver's parent. Raises: NonexistentInstance: If there is no parent.
codesearchnet
def log_transition(self, transition, from_state, instance, *args, **kwargs): logger = logging.getLogger('xworkflows.transitions') try: instance_repr = u(repr(instance), 'ignore') except (UnicodeEncodeError, UnicodeDecodeError): instance_repr = u('<bad repr>') logger.info(u('%s performed transition %s.%s (%s -> %s)'), instance_repr, self.__class__.__name__, transition.name, from_state.name, transition.target.name)
Log a transition. Args: transition (Transition): the name of the performed transition from_state (State): the source state instance (object): the modified object Kwargs: Any passed when calling the transition
codesearchnet
def _on_scan(self, info): device_id = info['uuid'] expiration_time = info.get('validity_period', 60) infocopy = deepcopy(info) infocopy['expiration_time'] = (monotonic() + expiration_time) with self._scan_lock: self._scanned_devices[device_id] = infocopy
Callback called when a new device is discovered on this CMDStream Args: info (dict): Information about the scanned device
codesearchnet