code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def is_reading_in_conditional_node(self, variable): variables_read = [n.variables_read for n in self.nodes if n.contains_if()] variables_read = [item for sublist in variables_read for item in sublist] return variable in variables_read
Check if the function reads the variable in a IF node Args: variable (Variable): Returns: bool: True if the variable is read
juraj-google-style
def set_led(self, colorcode): data = [] data.append(0x0A) data.append(self.servoid) data.append(RAM_WRITE_REQ) data.append(LED_CONTROL_RAM) data.append(0x01) data.append(colorcode) send_data(data)
Set the LED Color of Herkulex Args: colorcode (int): The code for colors (0x00-OFF 0x02-BLUE 0x03-CYAN 0x04-RED 0x05-ORANGE 0x06-VIOLET 0x07-WHITE
juraj-google-style
def get_sample(self, md5): if len(md5) < 32: md5 = self.get_full_md5(md5, self.sample_collection) sample_info = self.database[self.sample_collection].find_one({'md5': md5}) if not sample_info: return None try: grid_fs_id = sample_info['__grid_fs'] sample_info = self.clean_for_serialization(sample_info) sample_info.update({'raw_bytes':self.gridfs_handle.get(grid_fs_id).read()}) return sample_info except gridfs.errors.CorruptGridFile: self.database[self.sample_collection].update({'md5': md5}, {'md5': None}) return None
Get the sample from the data store. This method first fetches the data from datastore, then cleans it for serialization and then updates it with 'raw_bytes' item. Args: md5: The md5 digest of the sample to be fetched from datastore. Returns: The sample dictionary or None
juraj-google-style
def _create_unicode_map(): unicode_map = {} for (beta, uni) in _map.BETACODE_MAP.items(): norm = unicodedata.normalize('NFC', uni) unicode_map[norm] = beta unicode_map[uni] = beta final_sigma_norm = unicodedata.normalize('NFC', _FINAL_LC_SIGMA) unicode_map[final_sigma_norm] = 's' unicode_map[_FINAL_LC_SIGMA] = 's' return unicode_map
Create the inverse map from unicode to betacode. Returns: The hash map to convert unicode characters to the beta code representation.
codesearchnet
def __init__(self, clustering_algorithm, n_clusters: int, cluster_args: dict, checkpoints_path: str, batch_size: int=1024, is_batched: bool=False): super().__init__() self.clustering_algorithm = clustering_algorithm self.n_clusters = n_clusters self.batch_size = batch_size self.cluster_args = cluster_args self.checkpoints_path = checkpoints_path self.is_batched = is_batched
Clustering transformation itself, it first preprocesses the data, then it applies the clustering transformation step by step on each of the batches. Example Usage:: pcoll | OnlineClustering( clustering_algorithm=OnlineKMeansClustering batch_size=1024, n_clusters=6 cluster_args={})) Args: clustering_algorithm: Clustering algorithm (DoFn) n_clusters: Number of clusters cluster_args: Arguments for the sklearn clustering algorithm (check sklearn documentation for more information) batch_size: size of the data batches is_batched: boolean value that marks if the collection is already batched and thus doesn't need to be batched by this transform
github-repos
def _construct_context_for_args(args): global_default_context = google.datalab.Context.default() config = {} for key in global_default_context.config: config[key] = global_default_context.config[key] billing_tier_arg = args.get('billing', None) if billing_tier_arg: config['bigquery_billing_tier'] = billing_tier_arg return google.datalab.Context(project_id=global_default_context.project_id, credentials=global_default_context.credentials, config=config)
Construct a new Context for the parsed arguments. Args: args: the dictionary of magic arguments. Returns: A new Context based on the current default context, but with any explicitly specified arguments overriding the default's config.
codesearchnet
def transform(geom, to_sref): try: geom = getattr(geom, 'polygon', Envelope(geom).polygon) except (TypeError, ValueError): pass else: geom.AssignSpatialReference(to_sref) try: geom_sref = geom.GetSpatialReference() except AttributeError: return transform(Geometry(geom), to_sref) if geom_sref is None: raise Exception('Cannot transform from unknown spatial reference') if not geom_sref.IsSame(to_sref): geom = geom.Clone() geom.TransformTo(to_sref) return geom
Returns a transformed Geometry. Arguments: geom -- any coercible Geometry value or Envelope to_sref -- SpatialReference or EPSG ID as int
juraj-google-style
def remove(self, name): try: del self.data[name] except (ValueError, KeyError): import warnings warnings.warn(("Unable to find column '%s' in data source" % name))
Remove a column of data. Args: name (str) : name of the column to remove Returns: None .. note:: If the column name does not exist, a warning is issued.
codesearchnet
def _ConvertFile(cls, path): with open(path) as f: src = f.read() short_path = os.path.basename(path) assertions = 0 for assertion_re in (cls.ASSERTION_RE, cls.MOCK_METHOD_CALL_RE): start = 0 match = assertion_re.search(src, start) while match: assertion_start = match.start('assertion') i = assertion_start + len(match.group('assertion')) last_comma = i - 1 args = [] depth_round = 1 depth_curly = 0 depth_square = 0 while depth_round: if i == len(src): line = src[:assertion_start].count('\n') + 1 snippet = src[assertion_start:src.find('\n', assertion_start)] logging.error('Unbalanced parentheses at %s:%d: %s', short_path, line, snippet) return False elif cls.QUOTE_RE.match(src[i]): start_quote = src[i] i += 1 while src[i] != start_quote or src[i - 1] == '\\': i += 1 elif src[i] == ' while src[i] != '\n': i += 1 elif src[i] == '(': depth_round += 1 elif src[i] == ')': depth_round -= 1 elif src[i] == '{': depth_curly += 1 elif src[i] == '}': depth_curly -= 1 elif src[i] == '[': depth_square += 1 elif src[i] == ']': depth_square -= 1 if not depth_curly and (not depth_square) and (src[i] == ',' and depth_round == 1 or (src[i] == ')' and (not depth_round))): arg = src[last_comma + 1:i].strip() if arg: args.append(arg) last_comma = i i += 1 end = i indentation, akey = match.group('indent', 'akey') if akey not in cls.MOCK_METHOD_ASSERTIONS and (not akey.startswith('Raises')): args = args[:2] if 'method' in match.groupdict(): args.insert(0, match.group('method')) replacement = cls._GetReplacement(indentation, akey, args) logging.debug((start, end, replacement)) src = ''.join((src[:assertion_start], replacement, src[end:])) assertions += 1 start = assertion_start + len(replacement) match = assertion_re.search(src, start) output_path = FLAGS.output and os.path.expanduser(FLAGS.output) or path with open(output_path, 'w') as f: f.write(src) logging.info('Converted %s (%d assertion%s)', short_path, assertions, '' if assertions == 1 else 's') return True
Converts a single file from unittest to PyTruth. Args: path: string, the path of file to be converted. Returns: Boolean: True if the file was successfully converted, otherwise False.
github-repos
def get_transaction(self, transaction_id): payload = self._get_data_by_id(transaction_id, 'commit_store_get_transaction') txn = Transaction() txn.ParseFromString(payload) return txn
Returns a Transaction object from the block store by its id. Params: transaction_id (str): The header_signature of the desired txn Returns: Transaction: The specified transaction Raises: ValueError: The transaction is not in the block store
codesearchnet
def write_events(self, events): with self.write_lock, self.conn: self.conn.executemany( 'INSERT INTO state_events(' ' identifier, source_statechange_id, log_time, data' ') VALUES(?, ?, ?, ?)', events, )
Save events. Args: state_change_identifier: Id of the state change that generate these events. events: List of Event objects.
juraj-google-style
def returns_true_or_raises(f): @functools.wraps(f) def wrapped(*args, **kwargs): ret = f(*args, **kwargs) if (ret is not True): raise RuntimeError(('Unexpected return value %r' % ret)) return True return wrapped
A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True.
codesearchnet
def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator): for idx, func in enumerate(iterator): assert callable(func), 'Test generators must yield callables, got %r' % ( func,) if getattr(func, '__x_use_name__', False): new_name = func.__name__ else: new_name = '%s%s%d' % (name, _SEPARATOR, idx) assert new_name not in dct, ( 'Name of parameterized test case "%s" not unique' % (new_name,)) dct[new_name] = func id_suffix[new_name] = getattr(func, '__x_extra_id__', '')
Adds individual test cases to a dictionary. Args: dct: The target dictionary. id_suffix: The dictionary for mapping names to test IDs. name: The original name of the test case. iterator: The iterator generating the individual test cases.
juraj-google-style
def FromId(os_id, error_on_unknown=True): if not os_id: return None for operating_system in OperatingSystem._ALL: if operating_system.id == os_id: return operating_system if error_on_unknown: raise InvalidEnumValue(os_id, 'Operating System', [value.id for value in OperatingSystem._ALL]) return None
Gets the enum corresponding to the given operating system id. Args: os_id: str, The operating system id to parse error_on_unknown: bool, True to raise an exception if the id is unknown, False to just return None. Raises: InvalidEnumValue: If the given value cannot be parsed. Returns: OperatingSystemTuple, One of the OperatingSystem constants or None if the input is None.
github-repos
def nav_to_vcf_dir(ftp, build): if (build == 'b37'): ftp.cwd(DIR_CLINVAR_VCF_B37) elif (build == 'b38'): ftp.cwd(DIR_CLINVAR_VCF_B38) else: raise IOError('Genome build not recognized.')
Navigate an open ftplib.FTP to appropriate directory for ClinVar VCF files. Args: ftp: (type: ftplib.FTP) an open connection to ftp.ncbi.nlm.nih.gov build: (type: string) genome build, either 'b37' or 'b38'
codesearchnet
def _AddWebPageCriteria(client, ad_group_id): ad_group_criterion_service = client.GetService('AdGroupCriterionService', version='v201809') operations = [{ 'operator': 'ADD', 'operand': { 'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': ad_group_id, 'criterion': { 'xsi_type': 'Webpage', 'parameter': { 'criterionName': 'Special offers for children.', 'conditions': [ { 'operand': 'URL', 'argument': '/marscruise/children' }, { 'operand': 'PAGE_TITLE', 'argument': 'Special Offer' } ] } }, 'userStatus': 'PAUSED', 'biddingStrategyConfiguration': { 'bids': [{ 'xsi_type': 'CpcBid', 'bid': { 'microAmount': 10000000L } }] } } }] criterion = ad_group_criterion_service.mutate(operations)['value'][0] print 'Webpage criterion with ID "%d" was added to ad group ID "%d".' % ( criterion['criterion']['id'], criterion['adGroupId'])
Adds a web page criterion to target Dynamic Search Ads. Args: client: an AdWordsClient instance. ad_group_id: an integer ID of the ad group the criteria is being added to.
juraj-google-style
def set_lacp_mode(self, name, mode): if mode not in ['on', 'passive', 'active']: return False grpid = re.search(r'(\d+)', name).group() remove_commands = list() add_commands = list() for member in self.get_members(name): remove_commands.append('interface %s' % member) remove_commands.append('no channel-group %s' % grpid) add_commands.append('interface %s' % member) add_commands.append('channel-group %s mode %s' % (grpid, mode)) return self.configure(remove_commands + add_commands)
Configures the LACP mode of the member interfaces Args: name(str): The Port-Channel interface name to configure the LACP mode mode(str): The LACP mode to configure the member interfaces to. Valid values are 'on, 'passive', 'active' Returns: True if the operation succeeds otherwise False
juraj-google-style
def sync(self, since=None, timeout_ms=30000, filter=None, full_state=None, set_presence=None): request = { "timeout": int(timeout_ms) } if since: request["since"] = since if filter: request["filter"] = filter if full_state: request["full_state"] = json.dumps(full_state) if set_presence: request["set_presence"] = set_presence return self._send("GET", "/sync", query_params=request, api_path=MATRIX_V2_API_PATH)
Perform a sync request. Args: since (str): Optional. A token which specifies where to continue a sync from. timeout_ms (int): Optional. The time in milliseconds to wait. filter (int|str): Either a Filter ID or a JSON string. full_state (bool): Return the full state for every room the user has joined Defaults to false. set_presence (str): Should the client be marked as "online" or" offline"
juraj-google-style
def email_has_role(self, email, role_name, uuid=None): mbr_data = self.get_membership(uuid=uuid) docs = [] try: docs = mbr_data['response']['docs'] except KeyError: failure_message = 'KeyError in membership data - got {0}'.format(mbr_data) log.exception(failure_message) raise PyLmodUnexpectedData(failure_message) if (len(docs) == 0): return False has_role = any((((x.get('email') == email) and (x.get('roleType') == role_name)) for x in docs)) if has_role: return True return False
Determine if an email is associated with a role. Args: email (str): user email role_name (str): user role uuid (str): optional uuid. defaults to self.cuuid Raises: PyLmodUnexpectedData: Unexpected data was returned. requests.RequestException: Exception connection error Returns: bool: True or False if email has role_name
codesearchnet
def upload(target): log.info('Uploading to pypi server <33>{}'.format(target)) with conf.within_proj_dir(): shell.run('python setup.py sdist register -r "{}"'.format(target)) shell.run('python setup.py sdist upload -r "{}"'.format(target))
Upload the release to a pypi server. TODO: Make sure the git directory is clean before allowing a release. Args: target (str): pypi target as defined in ~/.pypirc
codesearchnet
def codemirror_field_js_bundle(field): manifesto = CodemirrorAssetTagRender() manifesto.register_from_fields(field) try: bundle_name = manifesto.js_bundle_names()[0] except IndexError: msg = "Given field with configuration name '{}' does not have a Javascript bundle name" raise CodeMirrorFieldBundleError(msg.format(field.config_name)) return bundle_name
Filter to get CodeMirror Javascript bundle name needed for a single field. Example: :: {% load djangocodemirror_tags %} {{ form.myfield|codemirror_field_js_bundle }} Arguments: field (django.forms.fields.Field): A form field that contains a widget :class:`djangocodemirror.widget.CodeMirrorWidget`. Raises: CodeMirrorFieldBundleError: If Codemirror configuration form field does not have a bundle name. Returns: string: Bundle name to load with webassets.
codesearchnet
def stringize( self, rnf_profile, ): coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right))) return "({},{},{},{},{})".format( str(self.genome_id).zfill(rnf_profile.genome_id_width), str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction, str(self.left).zfill(coor_width), str(self.right).zfill(coor_width) )
Create RNF representation of this segment. Args: rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths).
juraj-google-style
def _format_batch_statuses(statuses, batch_ids, tracker): proto_statuses = [] for batch_id in batch_ids: if (statuses[batch_id] == client_batch_submit_pb2.ClientBatchStatus.INVALID): invalid_txns = tracker.get_invalid_txn_info(batch_id) for txn_info in invalid_txns: try: txn_info['transaction_id'] = txn_info.pop('id') except KeyError as e: LOGGER.debug(e) else: invalid_txns = None proto_statuses.append(client_batch_submit_pb2.ClientBatchStatus(batch_id=batch_id, status=statuses[batch_id], invalid_transactions=invalid_txns)) return proto_statuses
Takes a statuses dict and formats it for transmission with Protobuf and ZMQ. Args: statuses (dict of int): Dict with batch ids as the key, status as value batch_ids (list of str): The batch ids in their original order tracker (BatchTracker): A batch tracker with access to invalid info
codesearchnet
def add_migrations(self, migrations): if self.__closed: raise MigrationSessionError("Can't change applied session") self._to_apply.extend(migrations)
Add migrations to be applied. Args: migrations: a list of migrations to add of the form [(app, migration_name), ...] Raises: MigrationSessionError if called on a closed MigrationSession
codesearchnet
def get_victim_email_asset(self, main_type, sub_type, unique_id, asset_id, params=None): params = params or {} return self.victim_email_asset(main_type, sub_type, unique_id, asset_id, params=params)
Args: main_type: sub_type: unique_id: asset_id: params: Return:
juraj-google-style
def merge(self, x=None, y=None, ildj_map=None, kwargs=None, mapping=None): if mapping is None: mapping = _Mapping(x=x, y=y, ildj_map=ildj_map, kwargs=kwargs) elif any((arg is not None for arg in [x, y, ildj_map, kwargs])): raise ValueError('Cannot simultaneously specify mapping and individual arguments.') return _Mapping(x=self._merge(self.x, mapping.x), y=self._merge(self.y, mapping.y), ildj_map=self._merge_dicts(self.ildj_map, mapping.ildj_map), kwargs=self._merge(self.kwargs, mapping.kwargs))
Returns new _Mapping with args merged with self. Args: x: `Tensor`. Forward. y: `Tensor`. Inverse. ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor` representing the inverse log det jacobian. kwargs: Python dictionary. Extra args supplied to forward/inverse/etc functions. mapping: Instance of _Mapping to merge. Can only be specified if no other arg is specified. Returns: mapping: New instance of `_Mapping` which has inputs merged with self. Raises: ValueError: if mapping and any other arg is not `None`.
github-repos
def capture_widget(widget, path=None): if use_qt5: pixmap = widget.grab() else: pixmap = QtGui.QPixmap.grabWidget(widget) if path: pixmap.save(path) else: image_buffer = QtCore.QBuffer() image_buffer.open(QtCore.QIODevice.ReadWrite) pixmap.save(image_buffer, 'PNG') return image_buffer.data().data()
Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned.
codesearchnet
def _peek(self, chars=1): line = self._socket.recv(chars, socket.MSG_PEEK) logger.debug(('Server sent (peek): ' + line.rstrip())) return line
Peek at the data in the server response. Peeking should only be done when the response can be predicted. Make sure that the socket will not block by requesting too much data from it while peeking. Args: chars -- the number of characters to peek.
codesearchnet
def GetControlSequenceLen(self, buf): if not self._csi or not buf.startswith(self._csi): return 0 n = 0 for c in buf: n += 1 if c.isalpha(): break return n
Returns the control sequence length at the beginning of buf. Used in display width computations. Control sequences have display width 0. Args: buf: The string to check for a control sequence. Returns: The control sequence length at the beginning of buf or 0 if buf does not start with a control sequence.
github-repos
def set_style(self, column, style): column_idx = None while (len(self.headers) > len(self.__style_list)): self.__style_list.append(None) if isinstance(column, six.integer_types): column_idx = column elif isinstance(column, six.string_types): try: column_idx = self.headers.index(column) except ValueError: pass if (column_idx is not None): self.__style_list[column_idx] = style self.__clear_preprocess() self._dp_extractor.format_flags_list = [_ts_to_flag[self.__get_thousand_separator(col_idx)] for col_idx in range(len(self.__style_list))] return raise ValueError('column must be an int or string: actual={}'.format(column))
Set |Style| for a specific column. Args: column (|int| or |str|): Column specifier. column index or header name correlated with the column. style (|Style|): Style value to be set to the column. Raises: ValueError: If the column specifier is invalid.
codesearchnet
def AddKeyByPath(self, key_path, registry_key): if not key_path.startswith(definitions.KEY_PATH_SEPARATOR): raise ValueError('Key path does not start with: {0:s}'.format( definitions.KEY_PATH_SEPARATOR)) if not self._root_key: self._root_key = FakeWinRegistryKey(self._key_path_prefix) path_segments = key_paths.SplitKeyPath(key_path) parent_key = self._root_key for path_segment in path_segments: try: subkey = FakeWinRegistryKey(path_segment) parent_key.AddSubkey(subkey) except KeyError: subkey = parent_key.GetSubkeyByName(path_segment) parent_key = subkey parent_key.AddSubkey(registry_key)
Adds a Windows Registry key for a specific key path. Args: key_path (str): Windows Registry key path to add the key. registry_key (WinRegistryKey): Windows Registry key. Raises: KeyError: if the subkey already exists. ValueError: if the Windows Registry key cannot be added.
juraj-google-style
def build_signature_def(inputs=None, outputs=None, method_name=None, defaults=None): signature_def = meta_graph_pb2.SignatureDef() if inputs is not None: for item in inputs: signature_def.inputs[item].CopyFrom(inputs[item]) if outputs is not None: for item in outputs: signature_def.outputs[item].CopyFrom(outputs[item]) if method_name is not None: signature_def.method_name = method_name if defaults is not None: for arg_name, default in defaults.items(): if isinstance(default, ops.EagerTensor): signature_def.defaults[arg_name].CopyFrom(tensor_util.make_tensor_proto(default.numpy())) elif default.op.type == 'Const': signature_def.defaults[arg_name].CopyFrom(default.op.get_attr('value')) else: raise ValueError(f'Unable to convert object {str(default)} of type {type(default)} to TensorProto.') return signature_def
Utility function to build a SignatureDef protocol buffer. Args: inputs: Inputs of the SignatureDef defined as a proto map of string to tensor info. outputs: Outputs of the SignatureDef defined as a proto map of string to tensor info. method_name: Method name of the SignatureDef as a string. defaults: Defaults of the SignatureDef defined as a proto map of string to TensorProto. Returns: A SignatureDef protocol buffer constructed based on the supplied arguments.
github-repos
def max_range(ranges, combined=True): try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered') values = [tuple(np.NaN if v is None else v for v in r) for r in ranges] if pd and any(isinstance(v, datetime_types) and not isinstance(v, cftime_types) for r in values for v in r): converted = [] for l, h in values: if isinstance(l, datetime_types) and isinstance(h, datetime_types): l, h = (pd.Timestamp(l).to_datetime64(), pd.Timestamp(h).to_datetime64()) converted.append((l, h)) values = converted arr = np.array(values) if not len(arr): return np.NaN, np.NaN elif arr.dtype.kind in 'OSU': arr = list(python2sort([ v for r in values for v in r if not is_nan(v) and v is not None])) return arr[0], arr[-1] elif arr.dtype.kind in 'M': return ((arr.min(), arr.max()) if combined else (arr[:, 0].min(), arr[:, 1].min())) if combined: return (np.nanmin(arr), np.nanmax(arr)) else: return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1])) except: return (np.NaN, np.NaN)
Computes the maximal lower and upper bounds from a list bounds. Args: ranges (list of tuples): A list of range tuples combined (boolean, optional): Whether to combine bounds Whether range should be computed on lower and upper bound independently or both at once Returns: The maximum range as a single tuple
juraj-google-style
def __init__(self, api_key: str, config: interfaces.Config | None=None): self._config = config or interfaces.Config() self._genai_processor = genai_model.GenaiModel(api_key=api_key, model_name=self._config.topic_researcher_model_name, generate_content_config=types.GenerateContentConfig(tools=self._config.enabled_research_tools)) p_preamble = preamble.Preamble(content=[ProcessorPart(prompts.TOPIC_RESEARCH_PREAMBLE), ProcessorPart('Topic to research: ')]) p_verbalizer = topic_verbalizer.TopicVerbalizer(config=self._config) p_suffix = preamble.Suffix(content=[ProcessorPart('Your research: ')]) self._pipeline = p_verbalizer + p_preamble + p_suffix + self._genai_processor
Initializes the TopicResearcher. Args: api_key: The API key to use for the GenAI API. config: The agent configuration.
github-repos
def _file_changed_nilrt(full_filepath): rs_state_dir = '/var/lib/salt/restartcheck_state' base_filename = os.path.basename(full_filepath) timestamp_file = os.path.join(rs_state_dir, '{0}.timestamp'.format(base_filename)) md5sum_file = os.path.join(rs_state_dir, '{0}.md5sum'.format(base_filename)) if ((not os.path.exists(timestamp_file)) or (not os.path.exists(md5sum_file))): return True prev_timestamp = __salt__['file.read'](timestamp_file).rstrip() cur_timestamp = str(int(os.path.getmtime(full_filepath))) if (prev_timestamp != cur_timestamp): return True return bool(__salt__['cmd.retcode']('md5sum -cs {0}'.format(md5sum_file), output_loglevel='quiet'))
Detect whether a file changed in an NILinuxRT system using md5sum and timestamp files from a state directory. Returns: - False if md5sum/timestamp state files don't exist - True/False depending if ``base_filename`` got modified/touched
codesearchnet
def poll( self, transaction_hash: bytes, ): if len(transaction_hash) != 32: raise ValueError( 'transaction_hash must be a 32 byte hash', ) transaction_hash = encode_hex(transaction_hash) last_result = None while True: transaction = self.web3.eth.getTransaction(transaction_hash) if transaction is None and last_result is not None: raise Exception('invalid transaction, check gas price') if transaction and transaction['blockNumber'] is not None: last_result = transaction transaction_block = transaction['blockNumber'] confirmation_block = transaction_block + self.default_block_num_confirmations block_number = self.block_number() if block_number >= confirmation_block: return transaction gevent.sleep(1.0)
Wait until the `transaction_hash` is applied or rejected. Args: transaction_hash: Transaction hash that we are waiting for.
juraj-google-style
def cancel(self, request, *args, **kwargs): status = self.get_object() status.cancel() serializer = StatusSerializer(status, context={'request': request}) return Response(serializer.data)
Cancel the task associated with the specified status record. Arguments: request (Request): A POST including a task status record ID Returns ------- Response: A JSON response indicating whether the cancellation succeeded or not
juraj-google-style
def _api_scrape(json_inp, ndx): try: headers = json_inp['resultSets'][ndx]['headers'] values = json_inp['resultSets'][ndx]['rowSet'] except KeyError: try: headers = json_inp['resultSet'][ndx]['headers'] values = json_inp['resultSet'][ndx]['rowSet'] except KeyError: headers = json_inp['resultSet']['headers'] values = json_inp['resultSet']['rowSet'] if HAS_PANDAS: return DataFrame(values, columns=headers) else: return [dict(zip(headers, value)) for value in values]
Internal method to streamline the getting of data from the json Args: json_inp (json): json input from our caller ndx (int): index where the data is located in the api Returns: If pandas is present: DataFrame (pandas.DataFrame): data set from ndx within the API's json else: A dictionary of both headers and values from the page
juraj-google-style
def chmod_r(root: str, permission: int) -> None: os.chmod(root, permission) for dirpath, dirnames, filenames in os.walk(root): for d in dirnames: os.chmod(os.path.join(dirpath, d), permission) for f in filenames: os.chmod(os.path.join(dirpath, f), permission)
Recursive ``chmod``. Args: root: directory to walk down permission: e.g. ``e.g. stat.S_IWUSR``
juraj-google-style
def get_tensor_sharding(tensor): if isinstance(tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled(): sharding = tensor._get_xla_sharding() if sharding is None: return None else: return sharding.SerializeToString() try: return get_op_sharding(tensor.op) except AttributeError: return None
Returns sharding attribute of a Tensor. Args: tensor: a Tensor. Returns: The attribute representing XLA sharding on tensor's op.
github-repos
def update_tag(self, tag_name, description=None, custom_properties=None, **kwargs): data = {'description': (description or ''), 'customProperties': (custom_properties or {})} resp = self._put(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name), data=data, **kwargs) resp.raise_for_status() return resp.json()
update a tag by name Args: tag_name (string): name of tag to update description (optional[string]): a description custom_properties (optional[dict]): dictionary of custom properties
codesearchnet
def split_by_sparsity(values): dense_values = [] dense_indices = [] sparse_values = [] sparse_indices = [] for i, v in enumerate(values): if is_indexed_slices(v): sparse_values.append(v) sparse_indices.append(i) else: dense_values.append(v) dense_indices.append(i) return (dense_values, dense_indices, sparse_values, sparse_indices)
Split values into dense and sparse values. Args: values: a list of tensors or `PerReplica`s. Returns: Four lists: a list of dense values, a list of their indices in `values` and a list of sparse values, a list of their indices in `values`.
github-repos
def standardize_tuple(value, n, name, allow_zero=False): error_msg = f'The `{name}` argument must be a tuple of {n} integers. Received {name}={value}' if isinstance(value, int): value_tuple = (value,) * n else: try: value_tuple = tuple(value) except TypeError: raise ValueError(error_msg) if len(value_tuple) != n: raise ValueError(error_msg) for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError): error_msg += f'including element {single_value} of type {type(single_value)}' raise ValueError(error_msg) if allow_zero: unqualified_values = {v for v in value_tuple if v < 0} req_msg = '>= 0' else: unqualified_values = {v for v in value_tuple if v <= 0} req_msg = '> 0' if unqualified_values: error_msg += f', including values {unqualified_values} that do not satisfy `value {req_msg}`' raise ValueError(error_msg) return value_tuple
Transforms non-negative/positive integer/integers into an integer tuple. Args: value: int or iterable of ints. The value to validate and convert. n: int. The size of the tuple to be returned. name: string. The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. allow_zero: bool, defaults to `False`. A `ValueError` will raised if zero is received and this argument is `False`. Returns: A tuple of n integers.
github-repos
def halt(self): if self.is_closed: _std_log.info('Disconnect requested, but AMQP connection already gone') self._channel = None return _std_log.info('Waiting for %d consumer(s) to finish processing before halting', len(self._consumers)) pending_cancels = [] for c in list(self._consumers.values()): pending_cancels.append(c.cancel()) (yield defer.gatherResults(pending_cancels)) _std_log.info('Finished canceling %d consumers', len(self._consumers)) try: (yield self.close()) except pika.exceptions.ConnectionWrongStateError: pass self._consumers = {} self._channel = None
Signal to consumers they should stop after finishing any messages currently being processed, then close the connection. Returns: defer.Deferred: fired when all consumers have successfully stopped and the connection is closed.
codesearchnet
def quote_xml(text): text = _coerce_unicode(text) if text.startswith(CDATA_START): return text return saxutils.escape(text)
Format a value for display as an XML text node. Returns: Unicode string (str on Python 3, unicode on Python 2)
codesearchnet
def removeColumns(self, columnNames): model = self.tableView.model() if (model is not None): model.removeDataFrameColumns(columnNames) self.removeColumnButton.setChecked(False)
Removes one or multiple columns from the model. This method is also a slot. Args: columnNames (list): A list of columns, which shall be removed from the model.
codesearchnet
def _all_number_groups_are_exactly_present(numobj, normalized_candidate, formatted_number_groups): candidate_groups = re.split(NON_DIGITS_PATTERN, normalized_candidate) if numobj.extension is not None: candidate_number_group_index = len(candidate_groups) - 2 else: candidate_number_group_index = len(candidate_groups) - 1 if (len(candidate_groups) == 1 or candidate_groups[candidate_number_group_index].find(national_significant_number(numobj)) != -1): return True formatted_number_group_index = len(formatted_number_groups) - 1 while (formatted_number_group_index > 0 and candidate_number_group_index >= 0): if (candidate_groups[candidate_number_group_index] != formatted_number_groups[formatted_number_group_index]): return False formatted_number_group_index -= 1 candidate_number_group_index -= 1 return (candidate_number_group_index >= 0 and candidate_groups[candidate_number_group_index].endswith(formatted_number_groups[0]))
Returns True if the groups of digits found in our candidate phone number match our expectations. Arguments: numobj -- the original number we found when parsing normalized_candidate -- the candidate number, normalized to only contain ASCII digits, but with non-digits (spaces etc) retained expected_number_groups -- the groups of digits that we would expect to see if we formatted this number Returns True if expectations matched.
juraj-google-style
def find_indices(lst, element): result = [] offset = (- 1) while True: try: offset = lst.index(element, (offset + 1)) except ValueError: return result result.append(offset)
Returns the indices for all occurrences of 'element' in 'lst'. Args: lst (list): List to search. element: Element to find. Returns: list: List of indices or values
codesearchnet
def get_all_datasets(cls, configuration=None, page_size=1000, check_duplicates=True, **kwargs): dataset = Dataset(configuration=configuration) dataset['id'] = 'all datasets' total_rows = kwargs.get('limit', cls.max_int) start = kwargs.get('offset', 0) all_datasets = None attempts = 0 while ((attempts < cls.max_attempts) and (all_datasets is None)): all_datasets = list() for page in range(((total_rows pagetimespagesize = (page * page_size) kwargs['offset'] = (start + pagetimespagesize) rows_left = (total_rows - pagetimespagesize) rows = min(rows_left, page_size) kwargs['limit'] = rows result = dataset._write_to_hdx('all', kwargs, 'id') datasets = list() if isinstance(result, list): no_results = len(result) if ((no_results == 0) and (page == 0)): all_datasets = None break for datasetdict in result: dataset = Dataset(configuration=configuration) dataset.old_data = dict() dataset.data = datasetdict dataset._dataset_create_resources() datasets.append(dataset) all_datasets += datasets if (no_results < rows): break else: logger.debug(result) if (all_datasets is None): attempts += 1 elif check_duplicates: names_list = [dataset['name'] for dataset in all_datasets] names = set(names_list) if (len(names_list) != len(names)): all_datasets = None attempts += 1 if ((attempts == cls.max_attempts) and (all_datasets is None)): raise HDXError('Maximum attempts reached for getting all datasets!') return all_datasets
Get all datasets in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. page_size (int): Size of page to return. Defaults to 1000. check_duplicates (bool): Whether to check for duplicate datasets. Defaults to True. **kwargs: See below limit (int): Number of rows to return. Defaults to all datasets (sys.maxsize) offset (int): Offset in the complete result for where the set of returned datasets should begin Returns: List[Dataset]: list of all datasets in HDX
codesearchnet
def resource_action(client, action='', log_format='item: %(key)s', **kwargs): result = None try: result = getattr(client, action)(**kwargs) LOG.info(log_format, kwargs) except botocore.exceptions.ClientError as error: error_code = error.response['Error']['Code'] if (error_code == 'AccessDenied'): LOG.fatal(error) raise elif (error_code == 'EntityAlreadyExists'): LOG.info(' '.join(('Found', log_format)), kwargs) else: LOG.fatal(error) return result
Call _action_ using boto3 _client_ with _kwargs_. This is meant for _action_ methods that will create or implicitely prove a given Resource exists. The _log_failure_ flag is available for methods that should always succeed, but will occasionally fail due to unknown AWS issues. Args: client (botocore.client.IAM): boto3 client object. action (str): Client method to call. log_format (str): Generic log message format, 'Added' or 'Found' will be prepended depending on the scenario. prefix (str): Prefix word to use in successful INFO message. **kwargs: Keyword arguments to pass to _action_ method. Returns: dict: boto3 response.
codesearchnet
def get_soa_record(client, zone_id, zone_name): response = client.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=zone_name, StartRecordType="SOA", MaxItems="1") return SOARecord(response["ResourceRecordSets"][0])
Gets the SOA record for zone_name from zone_id. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_id (string): The AWS Route53 zone id of the hosted zone to query. zone_name (string): The name of the DNS hosted zone to create. Returns: :class:`stacker.util.SOARecord`: An object representing the parsed SOA record returned from AWS Route53.
juraj-google-style
def localopt(self, forcefield='mmff94', steps=500): pbmol = pb.Molecule(self._obmol) pbmol.localopt(forcefield=forcefield, steps=steps) self._obmol = pbmol.OBMol
A wrapper to pybel's localopt method to optimize a Molecule. Args: forcefield: Default is mmff94. Options are 'gaff', 'ghemical', 'mmff94', 'mmff94s', and 'uff'. steps: Default is 500.
codesearchnet
def set_query_parameter(url, param_name, param_value): scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) query_params[param_name] = [param_value] new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
Given a URL, set or replace a query parameter and return the modified URL. Args: url: a given URL param_name: the parameter name to add param_value: the parameter value Returns: URL with the added parameter
juraj-google-style
def load_ui_type(uifile): import pysideuic import xml.etree.ElementTree as ElementTree from cStringIO import StringIO parsed = ElementTree.parse(uifile) widget_class = parsed.find('widget').get('class') form_class = parsed.find('class').text with open(uifile, 'r') as f: o = StringIO() frame = {} pysideuic.compileUi(f, o, indent=0) pyc = compile(o.getvalue(), '<string>', 'exec') (exec(pyc) in frame) form_class = frame[('Ui_%s' % form_class)] base_class = eval(('QtWidgets.%s' % widget_class)) return (form_class, base_class)
Pyside equivalent for the loadUiType function in PyQt. From the PyQt4 documentation: Load a Qt Designer .ui file and return a tuple of the generated form class and the Qt base class. These can then be used to create any number of instances of the user interface without having to parse the .ui file more than once. Note: Pyside lacks the "loadUiType" command, so we have to convert the ui file to py code in-memory first and then execute it in a special frame to retrieve the form_class. Args: uifile (str): Absolute path to .ui file Returns: tuple: the generated form class, the Qt base class
codesearchnet
def days_until(self, target_date_tensor): return target_date_tensor.ordinal() - self._ordinals
Computes the number of days until the target dates. Args: target_date_tensor: A DateTensor object broadcastable to the shape of "self". Returns: An int32 tensor with numbers of days until the target dates. #### Example ```python dates = tff.datetime.dates_from_tuples([(2020, 1, 25), (2020, 3, 2)]) target = tff.datetime.dates_from_tuples([(2020, 3, 5)]) dates.days_until(target) # [40, 3] targets = tff.datetime.dates_from_tuples([(2020, 2, 5), (2020, 3, 5)]) dates.days_until(targets) # [11, 3] ```
github-repos
def floodlight_email(config, task: dict, day: str, alerts: dict[str, list[str, str, str, str, int, str]]) -> None: for email, table in alerts.items(): t = EmailTemplate() t.align('center') t.section(True) issues = sum((1 for row in table if row[5] != 'NORMAL')) if issues > 0: subject = '%d Floodlight Alerts For %s' % (issues, day) else: subject = 'All Floodlights Normal For %s' % day t.header(subject) t.paragraph('The following floodlights are being monitored. A status of LOW or HIGH inidcates impressions have changed significantly for the day. A status of NORMAL means impressions are close to the average for the past 7 days.') t.table([{'name': 'Date', 'type': 'STRING'}, {'name': 'Floodlight', 'type': 'STRING'}, {'name': 'Activity Id', 'type': 'STRING'}, {'name': 'Activity', 'type': 'STRING'}, {'name': 'Impressions', 'type': 'INTEGER'}, {'name': 'Status', 'type': 'STRING'}], table) t.paragraph('Your monitored floodlights and recipients are listed in the sheet below.') t.button('Floodlight Monitoring Sheet', sheets_url(config, task['auth'], task['sheet']['sheet']), big=True) t.section(False) if config.verbose: print('FLOODLIGHT MONITOR EMAIL ALERTS', email, len(table)) send_email(config, task['auth'], email, None, None, subject, t.get_text(), t.get_html())
Send an email to each alert group with status of all activities. The email template will contain all activities for each email address specified in the input sheet. Args: day - the latest day that was present in all combined reports, used for title of email. alerts - Each email in the sheet with a list of activities and statuses. Returns: Nothing.
github-repos
def get_config_parameter_multiline(config: ConfigParser, section: str, param: str, default: List[str]) -> List[str]: try: multiline = config.get(section, param) lines = [x.strip() for x in multiline.splitlines()] return [line for line in lines if line] except (TypeError, ValueError, NoOptionError): log.warning( "Configuration variable {} not found or improper in section [{}]; " "using default of {!r}", param, section, default) return default
Get multi-line string parameter from ``configparser`` ``.INI`` file, as a list of strings (one per line, ignoring blank lines). Args: config: :class:`ConfigParser` object section: section name within config file param: name of parameter within section default: default value Returns: parameter value, or default
juraj-google-style
def upsert(self, insert_index, val, fn=None): fn = (fn or (lambda current, passed: passed)) self._magnitude = 0 position = self.position_for_index(insert_index) if ((position < len(self.elements)) and (self.elements[position] == insert_index)): self.elements[(position + 1)] = fn(self.elements[(position + 1)], val) else: self.elements.insert(position, val) self.elements.insert(position, insert_index)
Inserts or updates an existing index within the vector. Args: - insert_index (int): The index at which the element should be inserted. - val (int|float): The value to be inserted into the vector. - fn (callable, optional): An optional callable taking two arguments, the current value and the passed value to generate the final inserted value at the position in case of collision.
codesearchnet
def _to_snake_case(string): sub_string = r'\1_\2' string = REGEX_CAMEL_FIRST.sub(sub_string, string) return REGEX_CAMEL_SECOND.sub(sub_string, string).lower()
Return a snake cased version of the input string. Args: string (str): A camel cased string. Returns: str: A snake cased string.
juraj-google-style
def attention_lm_moe_small(): hparams = attention_lm_moe_base() hparams.num_hidden_layers = 4 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.moe_num_experts = 128 hparams.moe_layers = '2' return hparams
Cheap model for single-gpu training. on lm1b_32k: ~312M params 1.6 steps/sec on [GeForce GTX TITAN X] After 50K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.31 Returns: an hparams object.
codesearchnet
def execute_edit(args, root_dir=None): EDITOR = os.environ.get('EDITOR', 'vim') key = args['key'] status = command_factory('status')({}, root_dir=root_dir) if not isinstance(status['data'], str) and key in status['data']: if status['data'][key]['status'] in ['queued', 'stashed']: command = status['data'][key]['command'] else: print("Entry is not 'queued' or 'stashed'") sys.exit(1) else: print('No entry with this key') sys.exit(1) with tempfile.NamedTemporaryFile(suffix=".tmp") as tf: tf.write(command.encode('utf-8')) tf.flush() call([EDITOR, tf.name]) tf.seek(0) edited_command = tf.read().decode('utf-8') print_command_factory('edit')({ 'key': key, 'command': edited_command, }, root_dir=root_dir)
Edit a existing queue command in the daemon. Args: args['key'] int: The key of the queue entry to be edited root_dir (string): The path to the root directory the daemon is running in.
juraj-google-style
def _DepthwiseConv2dNativeBackpropInputGrad(op: ops.Operation, grad): return [None, gen_nn_ops.depthwise_conv2d_native_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), data_format=op.get_attr('data_format')), gen_nn_ops.depthwise_conv2d_native(grad, op.inputs[1], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), data_format=op.get_attr('data_format'))]
The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter
github-repos
def get_definition(self, stmt: Statement, sctx: SchemaContext) -> Tuple[(Statement, SchemaContext)]: if (stmt.keyword == 'uses'): kw = 'grouping' elif (stmt.keyword == 'type'): kw = 'typedef' else: raise ValueError("not a 'uses' or 'type' statement") (loc, did) = self.resolve_pname(stmt.argument, sctx.text_mid) if (did == sctx.text_mid): dstmt = stmt.get_definition(loc, kw) if dstmt: return (dstmt, sctx) else: dstmt = self.modules[did].statement.find1(kw, loc) if dstmt: return (dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, did)) for sid in self.modules[did].submodules: dstmt = self.modules[sid].statement.find1(kw, loc) if dstmt: return (dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, sid)) raise DefinitionNotFound(kw, stmt.argument)
Find the statement defining a grouping or derived type. Args: stmt: YANG "uses" or "type" statement. sctx: Schema context where the definition is used. Returns: A tuple consisting of the definition statement ('grouping' or 'typedef') and schema context of the definition. Raises: ValueError: If `stmt` is neither "uses" nor "type" statement. ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If the prefix specified in the argument of `stmt` is not declared. DefinitionNotFound: If the corresponding definition is not found.
codesearchnet
def get_windows_if_list(extended=False): def _get_mac(x): size = x["physical_address_length"] if size != 6: return "" data = bytearray(x["physical_address"]) return str2mac(bytes(data)[:size]) def _get_ips(x): unicast = x['first_unicast_address'] anycast = x['first_anycast_address'] multicast = x['first_multicast_address'] def _resolve_ips(y): if not isinstance(y, list): return [] ips = [] for ip in y: addr = ip['address']['address'].contents if addr.si_family == socket.AF_INET6: ip_key = "Ipv6" si_key = "sin6_addr" else: ip_key = "Ipv4" si_key = "sin_addr" data = getattr(addr, ip_key) data = getattr(data, si_key) data = bytes(bytearray(data.byte)) if data: ips.append(inet_ntop(addr.si_family, data)) return ips ips = [] ips.extend(_resolve_ips(unicast)) if extended: ips.extend(_resolve_ips(anycast)) ips.extend(_resolve_ips(multicast)) return ips if six.PY2: _str_decode = lambda x: x.encode('utf8', errors='ignore') else: _str_decode = plain_str return [ { "name": _str_decode(x["friendly_name"]), "win_index": x["interface_index"], "description": _str_decode(x["description"]), "guid": _str_decode(x["adapter_name"]), "mac": _get_mac(x), "ipv4_metric": 0 if WINDOWS_XP else x["ipv4_metric"], "ipv6_metric": 0 if WINDOWS_XP else x["ipv6_metric"], "ips": _get_ips(x) } for x in GetAdaptersAddresses() ]
Returns windows interfaces through GetAdaptersAddresses. params: - extended: include anycast and multicast IPv6 (default False)
juraj-google-style
def _print_reference(self, reference: message.Message) -> None: set_oneof = reference.WhichOneof('reference') if self.json_format == _FhirJsonFormat.PURE and set_oneof is not None and (set_oneof != 'uri'): standardized_reference = copy.copy(reference) new_uri = proto_utils.get_value_at_field(standardized_reference, 'uri') proto_utils.set_value_at_field(new_uri, 'value', references.reference_to_string(reference)) self._print_message(standardized_reference) else: self._print_message(reference)
Standardizes and prints the provided reference. Note that "standardization" in the case of PURE FHIR JSON refers to un-typing the typed-reference prior to printing. Args: reference: The reference to print.
github-repos
def handle(self, connection_id, message_content): try: request = self._request_proto() request.ParseFromString(message_content) except DecodeError: LOGGER.info('Protobuf %s failed to deserialize', request) return self._wrap_result(self._status.INTERNAL_ERROR) try: response = self._respond(request) except _ResponseFailed as e: response = e.status return self._wrap_result(response)
Handles parsing incoming requests, and wrapping the final response. Args: connection_id (str): ZMQ identity sent over ZMQ socket message_content (bytes): Byte encoded request protobuf to be parsed Returns: HandlerResult: result to be sent in response back to client
juraj-google-style
def _add_value_to_extension(msg: message.Message, extension: message.Message, is_choice_type: bool) -> None: if is_choice_type: oneofs = msg.DESCRIPTOR.oneofs if not oneofs: raise fhir_errors.InvalidFhirError(f'Choice type is missing a oneof: {msg.DESCRIPTOR.full_name}') value_field_name = msg.WhichOneof(oneofs[0].name) if value_field_name is None: raise ValueError(f'Choice type has no value set: {msg.DESCRIPTOR.full_name}') value_field = msg.DESCRIPTOR.fields_by_name[value_field_name] _verify_field_is_proto_message_type(value_field) _add_value_to_extension(proto_utils.get_value_at_field(msg, value_field), extension, False) else: value_field_mapping = _get_value_field_mapping_for_extension(extension) value_field = value_field_mapping.get(msg.DESCRIPTOR.full_name) if value_field is not None: proto_utils.set_value_at_field(cast(Any, extension).value, cast(Any, value_field), msg) elif annotation_utils.has_fhir_valueset_url(msg): codes.copy_code(msg, cast(Any, extension).value.code) elif fhir_types.is_type_or_profile_of_coding(msg): codes.copy_coding(msg, cast(Any, extension).value.coding) else: _add_fields_to_extension(msg, extension)
Adds the fields from msg to a generic Extension. Attempts are first made to set the "value" field of the generic Extension based on the type of field set on message. If this fails, checks are made against the generic Code and Coding types, and finally we fall back to adding the message's fields as sub-extensions. Args: msg: The message whose values to add to extension. extension: The generic Extension to populate. is_choice_type: Whether or not the provided message represents a "choice" type.
github-repos
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None): if ((frames is None) and (fps is None)): return times_to_ms(h, m, s, ms) elif ((frames is not None) and (fps is not None)): return frames_to_ms(frames, fps) else: raise ValueError('Both fps and frames must be specified')
Convert time to milliseconds. See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified, :func:`pysubs2.time.frames_to_ms()` is called instead. Raises: ValueError: Invalid fps, or one of frames/fps is missing. Example: >>> make_time(s=1.5) 1500 >>> make_time(frames=50, fps=25) 2000
codesearchnet
def has_atomic_move(path): try: return _pywrap_file_io.HasAtomicMove(compat.path_to_bytes(path)) except errors.OpError: return True
Checks whether the file system supports atomic moves. Returns whether or not the file system of the given path supports the atomic move operation for a file or folder. If atomic move is supported, it is recommended to use a temp location for writing and then move to the final location. Args: path: string, path to a file Returns: True, if the path is on a file system that supports atomic move False, if the file system does not support atomic move. In such cases we need to be careful about using moves. In some cases it is safer not to use temporary locations in this case.
github-repos
def diff_text1(self, diffs): text = [] for (op, data) in diffs: if op != self.DIFF_INSERT: text.append(data) return "".join(text)
Compute and return the source text (all equalities and deletions). Args: diffs: Array of diff tuples. Returns: Source text.
juraj-google-style
def setup_engines(client=None): if not client: try: client = ipyparallel.Client() except: raise DistobClusterError( u) eids = client.ids if not eids: raise DistobClusterError( u'No ipyparallel compute engines are available') nengines = len(eids) dv = client[eids] dv.use_dill() with dv.sync_imports(quiet=True): import distob ars = [] for i in eids: dv.targets = i ars.append(dv.apply_async(_remote_setup_engine, i, nengines)) dv.wait(ars) for ar in ars: if not ar.successful(): raise ar.r if distob.engine is None: distob.engine = ObjectHub(-1, client)
Prepare all iPython engines for distributed object processing. Args: client (ipyparallel.Client, optional): If None, will create a client using the default ipyparallel profile.
juraj-google-style
def get_atlas_per_gene_mutation_df(self, gene_id): g = self.reference_gempro.genes.get_by_id(gene_id) single, fingerprint = g.protein.sequence_mutation_summary(alignment_type='seqalign') structure_type_suffix = 'NA' appender = [] for k, strains in single.items(): to_append = {} orig_res = k[0] resnum = int(k[1]) mutated_res = k[2] num_strains_mutated = len(strains) strain_ids = [str(x.split(g.id + '_')[1]) for x in strains] to_append['ref_residue'] = orig_res to_append['ref_resnum'] = resnum to_append['strain_residue'] = mutated_res to_append['num_strains_mutated'] = num_strains_mutated to_append['strains_mutated'] = ';'.join(strain_ids) to_append['at_disulfide_bridge'] = False origres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(orig_res) mutres_props = ssbio.protein.sequence.properties.residues.residue_biochemical_definition(mutated_res) to_append['ref_residue_prop'] = origres_props to_append['strain_residue_prop'] = mutres_props grantham_s, grantham_txt = ssbio.protein.sequence.properties.residues.grantham_score(orig_res, mutated_res) to_append['grantham_score'] = grantham_s to_append['grantham_annotation'] = grantham_txt to_append.update(g.protein.get_residue_annotations(seq_resnum=resnum, use_representatives=True)) if g.protein.representative_structure: if g.protein.representative_structure.is_experimental: to_append['structure_type'] = 'EXP' else: to_append['structure_type'] = 'HOM' repchain = g.protein.representative_chain repchain_annotations = g.protein.representative_structure.chains.get_by_id(repchain).seq_record.annotations if 'SSBOND-biopython' in repchain_annotations: structure_resnum = g.protein.map_seqprop_resnums_to_structprop_resnums(resnums=resnum, use_representatives=True) if resnum in structure_resnum: ssbonds = repchain_annotations['SSBOND-biopython'] ssbonds_res = [] for x in ssbonds: ssbonds_res.append(x[0]) ssbonds_res.append(x[1]) if structure_resnum in ssbonds_res: to_append['at_disulfide_bridge'] = True appender.append(to_append) if not appender: return pd.DataFrame() cols = ['ref_residue', 'ref_resnum', 'strain_residue', 'num_strains_mutated', 'strains_mutated', 'ref_residue_prop', 'strain_residue_prop', 'grantham_score', 'grantham_annotation', 'at_disulfide_bridge', 'seq_SS-sspro', 'seq_SS-sspro8', 'seq_RSA-accpro', 'seq_RSA-accpro20', 'seq_TM-tmhmm', 'struct_SS-dssp', 'struct_RSA-dssp', 'struct_ASA-dssp', 'struct_CA_DEPTH-msms', 'struct_RES_DEPTH-msms', 'struct_PHI-dssp', 'struct_PSI-dssp', 'struct_resnum', 'struct_residue' 'strains_mutated'] df_gene_summary = pd.DataFrame.from_records(appender, columns=cols) df_gene_summary.dropna(axis=1, how='all', inplace=True) df_gene_summary.sort_values(by='ref_resnum', inplace=True) df_gene_summary = df_gene_summary.set_index('ref_resnum') return df_gene_summary
Create a single data frame which summarizes a gene and its mutations. Args: gene_id (str): Gene ID in the base model Returns: DataFrame: Pandas DataFrame of the results
juraj-google-style
def loop_until_timeout_or_true(timeout_s, function, sleep_s=1): return loop_until_timeout_or_valid(timeout_s, function, (lambda x: x), sleep_s)
Loops until the specified function returns True or a timeout is reached. Note: The function may return anything which evaluates to implicit True. This function will loop calling it as long as it continues to return something which evaluates to False. We ensure this method is called at least once regardless of timeout. Args: timeout_s: The number of seconds to wait until a timeout condition is reached. As a convenience, this accepts None to mean never timeout. Can also be passed a PolledTimeout object instead of an integer. function: The function to call each iteration. sleep_s: The number of seconds to wait after calling the function. Returns: Whatever the function returned last.
codesearchnet
def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None): return ImportLaidOutTensorOperation(mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0]
Import a laid_out_tensor. For expert users. The input must be laid out appropriately given the eventual MeshImpl, and layout. Args: mesh: a Mesh laid_out_tensor: a LaidOutTensor shape: a mtf.Shape name: an optional string Returns: a mtf.Tensor
codesearchnet
def content_ratings(self, **kwargs): path = self._get_id_path('content_ratings') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the content ratings for a TV Series. Args: language: (optional) ISO 639 code. append_to_response: (optional) Comma separated, any collection method. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def create_output_excerpts(self, test_info): dest_path = test_info.output_path utils.create_dir(dest_path) filename = self._ad.generate_filename(self.OUTPUT_FILE_TYPE, test_info, 'txt') excerpt_file_path = os.path.join(dest_path, filename) with open(excerpt_file_path, 'w', encoding='utf-8', errors='replace', newline='') as out: while self._adb_logcat_file_obj: line = self._adb_logcat_file_obj.readline() if not line: break out.write(line) self._ad.log.debug('logcat excerpt created at: %s', excerpt_file_path) return [excerpt_file_path]
Convenient method for creating excerpts of adb logcat. This copies logcat lines from self.adb_logcat_file_path to an excerpt file, starting from the location where the previous excerpt ended. Call this method at the end of: `setup_class`, `teardown_test`, and `teardown_class`. Args: test_info: `self.current_test_info` in a Mobly test. Returns: List of strings, the absolute paths to excerpt files.
github-repos
def set(self, name, value): if name not in self._options: self.register(name, self._generator()) return self._options[name].__set__(self, value)
Set an option value. Args: name (str): The name of the option. value: The value to set the option to. Raises: TypeError: If the value is not a string or appropriate native type. ValueError: If the value is a string but cannot be coerced. If the name is not registered a new option will be created using the option generator.
juraj-google-style
def assign_sub(self, delta, use_locking=False, name=None, read_value=True): raise NotImplementedError
Subtracts a value from this variable. This is essentially a shortcut for `assign_sub(self, delta)`. Args: delta: A `Tensor`. The value to subtract from this variable. use_locking: If `True`, use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: The updated variable. If `read_value` is false, instead returns None in Eager mode and the assign op in graph mode.
github-repos
def all_pairs(sets, similarity_func_name='jaccard', similarity_threshold=0.5): if ((not isinstance(sets, list)) or (len(sets) == 0)): raise ValueError('Input parameter sets must be a non-empty list.') if (similarity_func_name not in _similarity_funcs): raise ValueError('Similarity function {} is not supported.'.format(similarity_func_name)) if ((similarity_threshold < 0) or (similarity_threshold > 1.0)): raise ValueError('Similarity threshold must be in the range [0, 1].') if (similarity_func_name not in _symmetric_similarity_funcs): raise ValueError('The similarity function must be symmetric ({})'.format(', '.join(_symmetric_similarity_funcs))) similarity_func = _similarity_funcs[similarity_func_name] overlap_threshold_func = _overlap_threshold_funcs[similarity_func_name] position_filter_func = _position_filter_funcs[similarity_func_name] (sets, _) = _frequency_order_transform(sets) index = defaultdict(list) logging.debug('Find all pairs with similarities >= {}...'.format(similarity_threshold)) count = 0 for x1 in np.argsort([len(s) for s in sets]): s1 = sets[x1] t = overlap_threshold_func(len(s1), similarity_threshold) prefix_size = ((len(s1) - t) + 1) prefix = s1[:prefix_size] candidates = set([x2 for (p1, token) in enumerate(prefix) for (x2, p2) in index[token] if position_filter_func(s1, sets[x2], p1, p2, similarity_threshold)]) for x2 in candidates: s2 = sets[x2] sim = similarity_func(s1, s2) if (sim < similarity_threshold): continue (yield tuple((sorted([x1, x2], reverse=True) + [sim]))) count += 1 for (j, token) in enumerate(prefix): index[token].append((x1, j)) logging.debug('{} pairs found.'.format(count))
Find all pairs of sets with similarity greater than a threshold. This is an implementation of the All-Pair-Binary algorithm in the paper "Scaling Up All Pairs Similarity Search" by Bayardo et al., with position filter enhancement. Args: sets (list): a list of sets, each entry is an iterable representing a set. similarity_func_name (str): the name of the similarity function used; this function currently supports `"jaccard"` and `"cosine"`. similarity_threshold (float): the threshold used, must be a float between 0 and 1.0. Returns: pairs (Iterator[tuple]): an iterator of tuples `(x, y, similarity)` where `x` and `y` are the indices of sets in the input list `sets`.
codesearchnet
def size(x): if any_symbolic_tensors((x,)): return Size().symbolic_call(x) return backend.numpy.size(x)
Return the number of elements in a tensor. Args: x: Input tensor. Returns: Number of elements in `x`.
github-repos
def GetSubkeyByName(self, name): pyregf_key = self._pyregf_key.get_sub_key_by_name(name) if not pyregf_key: return None key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name]) return REGFWinRegistryKey(pyregf_key, key_path=key_path)
Retrieves a subkey by name. Args: name (str): name of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
juraj-google-style
def topological_sort_operations(operations): in_degrees = collections.OrderedDict() for op in reversed(operations): if op not in in_degrees: in_degrees[op] = 0 for next_op in reversed(_op_dependencies(op)): in_degrees[next_op] = in_degrees.get(next_op, 0) + 1 nexts = [] for op, in_degree in in_degrees.items(): if in_degree == 0: nexts.append(op) order = {} next_order = 0 while nexts: op, nexts = (nexts[0], nexts[1:]) order[op] = next_order next_order += 1 for next_op in reversed(_op_dependencies(op)): in_degrees[next_op] -= 1 if in_degrees[next_op] == 0: nexts.append(next_op) assert len(order) == len(operations) return order
Topological sorts a list of operations. This does a topological sort of the operations in a graph. The edges include both data dependencies and control dependencies. Note that the edge goes from an operation to its dependencies. The sort is intentionally unstable, reversing orders of operations and dependencies on ties. Args: operations: a list of tf.Operation in the same graph. Returns: A map from a tf.Operation to its topological order.
github-repos
def _record_local(self, node, op, name, typ, orig_val=None, final=None): if orig_val: self.current_local_ops.append(LocalOp(name, LocalOp.Op.ASSIGN)) if typ: self.current_local_ops.append(LocalOp(name, LocalOp.Op.ANNOTATE)) self._update_annotations_dict(node, op, name, typ, orig_val, self.current_annotated_locals, final=final)
Record a type annotation on a local variable. This method records three types of local operations: - An annotation, e.g., `x: int`. In this case, `typ` is PyTDClass(int) and `orig_val` is None. - An assignment, e.g., `x = 0`. In this case, `typ` is None and `orig_val` is Instance(int). - An annotated assignment, e.g., `x: int = None`. In this case, `typ` is PyTDClass(int) and `orig_val` is Instance(None). Args: node: The current node. op: The current opcode. name: The variable name. typ: The annotation. orig_val: The original value, if any. final: Whether the annotation is tagged Final (None to preserve any existing Final tag when updating an existing annotation).
github-repos
def queryize(terms, exclude_screen_name=None): ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-')) nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-')) sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else '' return ' '.join((ors, nots, sn))
Create query from list of terms, using OR but intelligently excluding terms beginning with '-' (Twitter's NOT operator). Optionally add -from:exclude_screen_name. >>> helpers.queryize(['apple', 'orange', '-peach']) u'apple OR orange -peach' Args: terms (list): Search terms. exclude_screen_name (str): A single screen name to exclude from the search. Returns: A string ready to be passed to tweepy.API.search
juraj-google-style
def addSearchers(self, *searchers): self._searchers.extend(searchers) debug.logger & debug.flagCompiler and debug.logger( 'current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers])) return self
Add more transformed MIBs repositories. MibCompiler.compile will invoke each of configured searcher objects in order of their addition asking each if already transformed MIB module already exists and is more recent than specified. Args: searchers: searcher object(s) Returns: reference to itself (can be used for call chaining)
juraj-google-style
def get(self,identity,params=None, headers=None): path = self._sub_url_params('/creditor_bank_accounts/:identity', { 'identity': identity, }) response = self._perform_request('GET', path, params, headers, retry_failures=True) return self._resource_for(response)
Get a single creditor bank account. Retrieves the details of an existing creditor bank account. Args: identity (string): Unique identifier, beginning with "BA". params (dict, optional): Query string parameters. Returns: ListResponse of CreditorBankAccount instances
juraj-google-style
def stage_tc_indicator_entity(self, indicator_data): path = '@.{value: summary, ' path += 'type: type, ' path += 'ownerName: ownerName, ' path += 'confidence: confidence || `0`, ' path += 'rating: rating || `0`}' return self.path_data(indicator_data, path)
Convert JSON data to TCEntity. Args: indicator_data (str): [description] Returns: [type]: [description]
codesearchnet
def correct_structure(self, atol=1e-08): return np.allclose(self.structure.lattice.matrix, self.prim.lattice.matrix, atol=atol)
Determine if the structure matches the standard primitive structure. The standard primitive will be different between seekpath and pymatgen high-symmetry paths, but this is handled by the specific subclasses. Args: atol (:obj:`float`, optional): Absolute tolerance used to compare the input structure with the primitive standard structure. Returns: bool: ``True`` if the structure is the same as the standard primitive, otherwise ``False``.
codesearchnet
def _OpenParentFile(self, file_system, path_spec, vhdi_file): location = getattr(path_spec, 'location', None) if (not location): raise errors.PathSpecError('Unsupported path specification without location.') location_path_segments = file_system.SplitPath(location) parent_filename = vhdi_file.parent_filename (_, _, parent_filename) = parent_filename.rpartition('\\') location_path_segments.pop() location_path_segments.append(parent_filename) parent_file_location = file_system.JoinPath(location_path_segments) kwargs = path_spec_factory.Factory.GetProperties(path_spec) kwargs['location'] = parent_file_location if (path_spec.parent is not None): kwargs['parent'] = path_spec.parent parent_file_path_spec = path_spec_factory.Factory.NewPathSpec(path_spec.type_indicator, **kwargs) if (not file_system.FileEntryExistsByPathSpec(parent_file_path_spec)): return file_object = resolver.Resolver.OpenFileObject(parent_file_path_spec, resolver_context=self._resolver_context) vhdi_parent_file = pyvhdi.file() vhdi_parent_file.open_file_object(file_object) if vhdi_parent_file.parent_identifier: self._OpenParentFile(file_system, parent_file_path_spec, vhdi_parent_file) vhdi_file.set_parent(vhdi_parent_file) self._parent_vhdi_files.append(vhdi_parent_file) self._sub_file_objects.append(file_object)
Opens the parent file. Args: file_system (FileSystem): file system of the VHDI file. path_spec (PathSpec): path specification of the VHDI file. vhdi_file (pyvhdi.file): VHDI file. Raises: PathSpecError: if the path specification is incorrect.
codesearchnet
def update_mapping(mapping: Dict[ops.Qid, LogicalIndex], operations: ops.OP_TREE ) -> None: for op in ops.flatten_op_tree(operations): if (isinstance(op, ops.GateOperation) and isinstance(op.gate, PermutationGate)): op.gate.update_mapping(mapping, op.qubits)
Updates a mapping (in place) from qubits to logical indices according to a set of permutation gates. Any gates other than permutation gates are ignored. Args: mapping: The mapping to update. operations: The operations to update according to.
juraj-google-style
def chat(self, id): json = self.skype.conn('GET', '{0}/users/ME/conversations/{1}'.format(self.skype.conn.msgsHost, id), auth=SkypeConnection.Auth.RegToken, params={'view': 'msnp24Equivalent'}).json() cls = SkypeSingleChat if ('threadProperties' in json): info = self.skype.conn('GET', '{0}/threads/{1}'.format(self.skype.conn.msgsHost, json.get('id')), auth=SkypeConnection.Auth.RegToken, params={'view': 'msnp24Equivalent'}).json() json.update(info) cls = SkypeGroupChat return self.merge(cls.fromRaw(self.skype, json))
Get a single conversation by identifier. Args: id (str): single or group chat identifier
codesearchnet
def set_vrf(self, name, vrf, default=False, disable=False): commands = [('interface %s' % name)] commands.append(self.command_builder('vrf forwarding', vrf, default=default, disable=disable)) return self.configure(commands)
Applies a VRF to the interface Note: VRF being applied to interface must already exist in switch config. Ethernet port must be in routed mode. This functionality can also be handled in the VRF api. Args: name (str): The interface identifier. It must be a full interface name (ie Ethernet, not Et) vrf (str): The vrf name to be applied to the interface default (bool): Specifies the default value for VRF disable (bool): Specifies to disable VRF Returns: True if the operation succeeds otherwise False is returned
codesearchnet
def generate(self, model, outfolder, *, exclude=None): with pythonic_names(): super().generate(model, outfolder) check_dependency = self.with_dependencies and model.eResource if check_dependency: if exclude is None: exclude = set() resource = model.eResource exclude.add(resource) rset = resource.resource_set direct_resources = {r for r in rset.resources.values() if r not in exclude} for resource in direct_resources: self.generate(resource.contents[0], outfolder, exclude=exclude)
Generate model code. Args: model: The meta-model to generate code for. outfolder: Path to the directoty that will contain the generated code. exclude: List of referenced resources for which code was already generated (to prevent regeneration).
juraj-google-style
def save_to_text_file(monsoon_data, file_path): if (not monsoon_data): raise MonsoonError('Attempting to write empty Monsoon data to file, abort') utils.create_dir(os.path.dirname(file_path)) with io.open(file_path, 'w', encoding='utf-8') as f: for md in monsoon_data: f.write(str(md)) f.write(MonsoonData.delimiter)
Save multiple MonsoonData objects to a text file. Args: monsoon_data: A list of MonsoonData objects to write to a text file. file_path: The full path of the file to save to, including the file name.
codesearchnet
def should_stop(self): return self._coord.should_stop()
Check if the coordinator was told to stop. See `Coordinator.should_stop()`. Returns: True if the coordinator was told to stop, False otherwise.
github-repos
def VerifyRow(self, parser_mediator, row): if len(row) < self.MIN_COLUMNS: return False try: timestamp = self._ConvertToTimestamp(row['date'], row['time']) except (ValueError, TypeError): return False if timestamp is None: return False try: action = int(row['action'], 10) except (ValueError, TypeError): return False if action not in formatter.SCAN_RESULTS: return False return True
Verifies if a line of the file is in the expected format. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, str]): fields of a single row, as specified in COLUMNS. Returns: bool: True if this is the correct parser, False otherwise.
juraj-google-style
def from_monitoring_infos(monitoring_info_list, user_metrics_only=False): counters = {} distributions = {} gauges = {} string_sets = {} bounded_tries = {} for mi in monitoring_info_list: if user_metrics_only and (not monitoring_infos.is_user_monitoring_info(mi)): continue try: key = _create_metric_key(mi) except ValueError as e: _LOGGER.debug(str(e)) continue metric_result = monitoring_infos.extract_metric_result_map_value(mi) if monitoring_infos.is_counter(mi): counters[key] = metric_result elif monitoring_infos.is_distribution(mi): distributions[key] = metric_result elif monitoring_infos.is_gauge(mi): gauges[key] = metric_result elif monitoring_infos.is_string_set(mi): string_sets[key] = metric_result elif monitoring_infos.is_bounded_trie(mi): bounded_tries[key] = metric_result return (counters, distributions, gauges, string_sets, bounded_tries)
Groups MonitoringInfo objects into counters, distributions, gauges and string sets Args: monitoring_info_list: An iterable of MonitoringInfo objects. user_metrics_only: If true, includes user metrics only. Returns: A tuple containing three dictionaries: counters, distributions, gauges and string set, respectively. Each dictionary contains (MetricKey, metric result) pairs.
github-repos
def rank(self, **kwargs): axis = kwargs.get('axis', 0) numeric_only = (True if axis else kwargs.get('numeric_only', False)) func = self._prepare_method(pandas.DataFrame.rank, **kwargs) new_data = self._map_across_full_axis(axis, func) if numeric_only: new_columns = self.compute_index(1, new_data, True) else: new_columns = self.columns new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns) return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
Computes numerical rank along axis. Equal values are set to the average. Returns: DataManager containing the ranks of the values along an axis.
codesearchnet
def TryConsume(self, token): if (self.token == token): self.NextToken() return True return False
Tries to consume a given piece of text. Args: token: Text to consume. Returns: True iff the text was consumed.
codesearchnet
def __recognize_list(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a list') if (not isinstance(node, yaml.SequenceNode)): message = '{}{}Expected a list here.'.format(node.start_mark, os.linesep) return ([], message) item_type = generic_type_args(expected_type)[0] for item in node.value: (recognized_types, message) = self.recognize(item, item_type) if (len(recognized_types) == 0): return ([], message) if (len(recognized_types) > 1): recognized_types = [List[t] for t in recognized_types] return (recognized_types, message) return ([expected_type], '')
Recognize a node that we expect to be a list of some kind. Args: node: The node to recognize. expected_type: List[...something...] Returns expected_type and the empty string if it was recognized, [] and an error message otherwise.
codesearchnet