code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _eval_comparison(self, ident: tuple[str, int | slice | None], op: str, value: str | int | tuple[int, ...]) -> bool: name, key = ident if name == 'sys.version_info': if key is None: key = slice(None, None, None) if isinstance(key, int) and (not isinstance(value, int)): raise _ParseError('an element of sys.version_info must be compared to an integer') if isinstance(key, slice) and (not _is_int_tuple(value)): raise _ParseError('sys.version_info must be compared to a tuple of integers') try: actual = self._options.python_version[key] except IndexError as e: raise _ParseError(str(e)) from e if isinstance(key, slice): actual = _three_tuple(actual) value = _three_tuple(value) elif name == 'sys.platform': if not isinstance(value, str): raise _ParseError('sys.platform must be compared to a string') valid_cmps = (cmp_slots.EQ, cmp_slots.NE) if op not in valid_cmps: raise _ParseError('sys.platform must be compared using %s or %s' % valid_cmps) actual = self._options.platform else: raise _ParseError(f'Unsupported condition: {name!r}.') return cmp_slots.COMPARES[op](actual, value)
Evaluate a comparison and return a bool. Args: ident: A tuple of a dotted name string and an optional __getitem__ key. op: One of the comparison operator strings in cmp_slots.COMPARES. value: The value to be compared against. Returns: The boolean result of the comparison. Raises: ParseError: If the comparison cannot be evaluated.
github-repos
def _get_two_lines(f): l0 = f.readline() l1 = f.readline() return l0, l1
Get the first and second lines Args: f (filelike): File that is opened for ascii. Returns: bytes
juraj-google-style
def concurrent_exec(func, param_list): with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor: future_to_params = {executor.submit(func, *p): p for p in param_list} return_vals = [] for future in concurrent.futures.as_completed(future_to_params): params = future_to_params[future] try: return_vals.append(future.result()) except Exception as exc: logging.exception('{} generated an exception: {}'.format(params, traceback.format_exc())) return_vals.append(exc) return return_vals
Executes a function with different parameters pseudo-concurrently. This is basically a map function. Each element (should be an iterable) in the param_list is unpacked and passed into the function. Due to Python's GIL, there's no true concurrency. This is suited for IO-bound tasks. Args: func: The function that parforms a task. param_list: A list of iterables, each being a set of params to be passed into the function. Returns: A list of return values from each function execution. If an execution caused an exception, the exception object will be the corresponding result.
codesearchnet
def set_channel_created(self, channel_link, channel_id): self.channel_link = channel_link self.channel_id = channel_id self.__record_progress(Status.PUBLISH_CHANNEL if config.PUBLISH else Status.DONE)
set_channel_created: records progress after creating channel on Kolibri Studio Args: channel_link (str): link to uploaded channel channel_id (str): id of channel that has been uploaded Returns: None
juraj-google-style
def validate(cls, mapper_spec): writer_spec = cls.get_params(mapper_spec, allow_old=False) if cls.BUCKET_NAME_PARAM not in writer_spec: raise errors.BadWriterParamsError( "%s is required for Google Cloud Storage" % cls.BUCKET_NAME_PARAM) try: cloudstorage.validate_bucket_name( writer_spec[cls.BUCKET_NAME_PARAM]) except ValueError, error: raise errors.BadWriterParamsError("Bad bucket name, %s" % (error)) cls._generate_filename(writer_spec, "name", "id", 0) cls._generate_filename(writer_spec, "name", "id", 0, 1, 0)
Validate mapper specification. Args: mapper_spec: an instance of model.MapperSpec. Raises: BadWriterParamsError: if the specification is invalid for any reason such as missing the bucket name or providing an invalid bucket name.
juraj-google-style
def get_cqz(self, callsign, timestamp=timestamp_now): return self.get_all(callsign, timestamp)[const.CQZ]
Returns CQ Zone of a callsign Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: containing the callsign's CQ Zone Raises: KeyError: no CQ Zone found for callsign
codesearchnet
def get_best_blockhash(self, id=None, endpoint=None): return self._call_endpoint(GET_BEST_BLOCK_HASH, id=id, endpoint=endpoint)
Get the hash of the highest block Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
juraj-google-style
def targets(self, module): if (module not in self.module_targets): raise BuildError('Could not find module in targets()', module=module) return [self.find(x, module) for x in self.module_targets[module]]
Find the targets for a given module. Returns: list: A sequence of all of the targets for the specified module.
codesearchnet
def debug(text): frame = inspect.currentframe().f_back module = frame.f_globals['__name__'] func = frame.f_code.co_name msg = ('%s.%s: %s' % (module, func, text)) _LOGGER.debug(msg)
Log a message to syslog and stderr Args: text (str): The string object to print
codesearchnet
def reminders_info(self, *, reminder: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({'reminder': reminder}) return self.api_call('reminders.info', http_verb='GET', params=kwargs)
Gets information about a reminder. Args: reminder (str): The ID of the reminder. e.g. 'Rm12345678'
codesearchnet
def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterAndroidContactEventData() event_data.query = query event_data.identifier = self._GetRowValue(query_hash, row, '_id') event_data.user_identifier = self._GetRowValue(query_hash, row, 'user_id') event_data.username = self._GetRowValue(query_hash, row, 'username') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.description = self._GetRowValue(query_hash, row, 'description') event_data.web_url = self._GetRowValue(query_hash, row, 'web_url') event_data.location = self._GetRowValue(query_hash, row, 'location') event_data.followers = self._GetRowValue(query_hash, row, 'followers') event_data.friends = self._GetRowValue(query_hash, row, 'friends') event_data.statuses = self._GetRowValue(query_hash, row, 'statuses') event_data.image_url = self._GetRowValue(query_hash, row, 'image_url') timestamp = self._GetRowValue(query_hash, row, 'profile_created') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'updated') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'friendship_time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a status row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
juraj-google-style
def load_text(self, text, tokenizer=None): if tokenizer: words = [x.lower() for x in tokenizer(text)] else: words = self.tokenize(text) self._dictionary.update(words) self._update_dictionary()
Load text from which to generate a word frequency list Args: text (str): The text to be loaded tokenizer (function): The function to use to tokenize a string
codesearchnet
def configure_vrf(self, vrf_name, commands): commands = make_iterable(commands) commands.insert(0, ('vrf definition %s' % vrf_name)) return self.configure(commands)
Configures the specified VRF using commands Args: vrf_name (str): The VRF name to configure commands: The list of commands to configure Returns: True if the commands completed successfully
codesearchnet
def deserialize(proto): _, type_registrations = _REVIVED_TYPE_REGISTRY.get(proto.identifier, (None, None)) if type_registrations is not None: for type_registration in type_registrations: if type_registration.should_load(proto): return (type_registration.from_proto(proto), type_registration.setter) return None
Create a trackable object from a SavedUserObject proto. Args: proto: A SavedUserObject to deserialize. Returns: A tuple of (trackable, assignment_fn) where assignment_fn has the same signature as setattr and should be used to add dependencies to `trackable` when they are available.
github-repos
def do_post(endpoint, body, access_token): headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token} headers['User-Agent'] = get_user_agent() return requests.post(endpoint, data=body, headers=headers)
Do an HTTP POST request and return JSON. Args: endpoint (str): Azure Resource Manager management endpoint. body (str): JSON body of information to post. access_token (str): A valid Azure authentication token. Returns: HTTP response. JSON body.
juraj-google-style
def _find_human_readable_labels(synsets, synset_to_human): humans = [] for s in synsets: assert (s in synset_to_human), ('Failed to find: %s' % s) humans.append(synset_to_human[s]) return humans
Build a list of human-readable labels. Args: synsets: list of strings; each string is a unique WordNet ID. synset_to_human: dict of synset to human labels, e.g., 'n02119022' --> 'red fox, Vulpes vulpes' Returns: List of human-readable strings corresponding to each synset.
codesearchnet
def finish(self, exitcode): self._queue.put(self.Finish(exitcode)) self._thread.join()
Cleans up. Anything pushed after finish will be dropped. Args: exitcode: The exitcode of the watched process.
codesearchnet
def set_current(self, current): self.current = current self.input = current.input self.output = current.output self.cmd = current.task_data['cmd'] if self.cmd and NEXT_CMD_SPLITTER in self.cmd: self.cmd, self.next_cmd = self.cmd.split(NEXT_CMD_SPLITTER) else: self.next_cmd = None
Creates some aliases for attributes of ``current``. Args: current: :attr:`~zengine.engine.WFCurrent` object.
juraj-google-style
def to_cmd_args(mapping): sorted_keys = sorted(mapping.keys()) def arg_name(obj): string = _decode(obj) if string: return u'--%s' % string if len(string) > 1 else u'-%s' % string else: return u'' arg_names = [arg_name(argument) for argument in sorted_keys] def arg_value(value): if hasattr(value, 'items'): map_items = ['%s=%s' % (k, v) for k, v in sorted(value.items())] return ','.join(map_items) return _decode(value) arg_values = [arg_value(mapping[key]) for key in sorted_keys] items = zip(arg_names, arg_values) return [item for item in itertools.chain.from_iterable(items)]
Transform a dictionary in a list of cmd arguments. Example: >>>args = mapping.to_cmd_args({'model_dir': '/opt/ml/model', 'batch_size': 25}) >>> >>>print(args) ['--model_dir', '/opt/ml/model', '--batch_size', 25] Args: mapping (dict[str, object]): A Python mapping. Returns: (list): List of cmd arguments
juraj-google-style
class TimmWrapperImageProcessor(BaseImageProcessor): main_input_name = 'pixel_values' def __init__(self, pretrained_cfg: Dict[str, Any], architecture: Optional[str]=None, **kwargs): requires_backends(self, 'timm') super().__init__(architecture=architecture) self.data_config = timm.data.resolve_data_config(pretrained_cfg, model=None, verbose=False) self.val_transforms = timm.data.create_transform(**self.data_config, is_training=False) self.train_transforms = timm.data.create_transform(**self.data_config, is_training=True) self._not_supports_tensor_input = any((transform.__class__.__name__ == 'ToTensor' for transform in self.val_transforms.transforms)) def to_dict(self) -> Dict[str, Any]: output = super().to_dict() output.pop('train_transforms', None) output.pop('val_transforms', None) output.pop('_not_supports_tensor_input', None) return output @classmethod def get_image_processor_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]: image_processor_filename = kwargs.pop('image_processor_filename', 'config.json') return super().get_image_processor_dict(pretrained_model_name_or_path, image_processor_filename=image_processor_filename, **kwargs) def preprocess(self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]]='pt') -> BatchFeature: if return_tensors != 'pt': raise ValueError(f"return_tensors for TimmWrapperImageProcessor must be 'pt', but got {return_tensors}") if self._not_supports_tensor_input and isinstance(images, torch.Tensor): images = images.cpu().numpy() if isinstance(images, torch.Tensor): images = self.val_transforms(images) images = images.unsqueeze(0) if images.ndim == 3 else images else: images = make_list_of_images(images) images = [to_pil_image(image) for image in images] images = torch.stack([self.val_transforms(image) for image in images]) return BatchFeature({'pixel_values': images}, tensor_type=return_tensors) def save_pretrained(self, *args, **kwargs): logger.warning_once('The `save_pretrained` method is disabled for TimmWrapperImageProcessor. The image processor configuration is saved directly in `config.json` when `save_pretrained` is called for saving the model.')
Wrapper class for timm models to be used within transformers. Args: pretrained_cfg (`Dict[str, Any]`): The configuration of the pretrained model used to resolve evaluation and training transforms. architecture (`Optional[str]`, *optional*): Name of the architecture of the model.
github-repos
def from_lengths_and_angles(abc: List[float], ang: List[float]): return Lattice.from_parameters(abc[0], abc[1], abc[2], ang[0], ang[1], ang[2])
Create a Lattice using unit cell lengths and angles (in degrees). Args: abc (3x1 array): Lattice parameters, e.g. (4, 4, 5). ang (3x1 array): Lattice angles in degrees, e.g., (90,90,120). Returns: A Lattice with the specified lattice parameters.
juraj-google-style
def find_sanitiser_nodes(sanitiser, sanitisers_in_file): for sanitiser_tuple in sanitisers_in_file: if (sanitiser == sanitiser_tuple.trigger_word): (yield sanitiser_tuple.cfg_node)
Find nodes containing a particular sanitiser. Args: sanitiser(string): sanitiser to look for. sanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser. Returns: Iterable of sanitiser nodes.
codesearchnet
def load(self, data): resp = self.client.api.load_image(data) images = [] for chunk in resp: if 'stream' in chunk: match = re.search( r'(^Loaded image ID: |^Loaded image: )(.+)$', chunk['stream'] ) if match: image_id = match.group(2) images.append(image_id) if 'error' in chunk: raise ImageLoadError(chunk['error']) return [self.get(i) for i in images]
Load an image that was previously saved using :py:meth:`~docker.models.images.Image.save` (or ``docker save``). Similar to ``docker load``. Args: data (binary): Image data to be loaded. Returns: (list of :py:class:`Image`): The images. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def _parse(self, stream): builddata = json.load(stream) log.debug('This is a JSON build file.') if 'targets' not in builddata: log.warn('Warning: No targets defined here.') return for tdata in builddata['targets']: target = address.new(target=tdata.pop('name'), repo=self.target.repo, path=self.target.path) if target in self.node and 'target_obj' in self.node[target]: raise error.ButcherError( 'Target is defined more than once: %s', target) rule_obj = targets.new(name=target, ruletype=tdata.pop('type'), **tdata) log.debug('New target: %s', target) self.add_node(target, {'target_obj': rule_obj}) for dep in rule_obj.composed_deps() or []: d_target = address.new(dep) if not d_target.repo: d_target.repo = self.target.repo if d_target.repo == self.target.repo and not d_target.path: d_target.path = self.target.path if d_target not in self.nodes(): self.add_node(d_target) log.debug('New dep: %s -> %s', target, d_target) self.add_edge(target, d_target)
Parse a JSON BUILD file. Args: builddata: dictionary of buildfile data reponame: name of the repo that it came from path: directory path within the repo
juraj-google-style
def _preprocess_conv3d_input(x, data_format): tf_data_format = 'NDHWC' if data_format == 'channels_first': if not _has_nchw_support(): x = array_ops.transpose(x, (0, 2, 3, 4, 1)) else: tf_data_format = 'NCDHW' return (x, tf_data_format)
Transpose and cast the input before the conv3d. Args: x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. Returns: A tensor.
github-repos
def get(self): raise NotImplementedError()
Get the current tracking value. Returns: The current tracked value, the type of which depends on the specific tracker implementation.
github-repos
def run(self): qclog = open(self.qclog_file, 'w') p = subprocess.Popen(self.current_command, stdout=qclog) return p
Perform the actual QChem run. Returns: (subprocess.Popen) Used for monitoring.
codesearchnet
def num_nodes(self, leaves=True, internal=True): if not isinstance(leaves, bool): raise TypeError("leaves must be a bool") if not isinstance(internal, bool): raise TypeError("internal must be a bool") num = 0 for node in self.traverse_preorder(): if (leaves and node.is_leaf()) or (internal and not node.is_leaf()): num += 1 return num
Compute the total number of selected nodes in this ``Tree`` Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` Returns: ``int``: The total number of selected nodes in this ``Tree``
juraj-google-style
def plot_dendrogram(ax, obj, show_diameters=True): dnd = Dendrogram(obj, show_diameters=show_diameters) dnd.generate() _render_dendrogram(dnd, ax, 0.0) ax.set_title('Morphology Dendrogram') ax.set_xlabel('micrometers (um)') ax.set_ylabel('micrometers (um)') ax.set_aspect('auto') ax.legend()
Dendrogram of `obj` Args: obj: Neuron or tree \ neurom.Neuron, neurom.Tree show_diameters : boolean \ Determines if node diameters will \ be show or not.
codesearchnet
def handle_error(self, error, download_request): if hasattr(error, "errno") and error.errno == errno.EACCES: self.handle_certificate_problem(str(error)) else: self.handle_general_download_error(str(error), download_request)
Checks what error occured and looks for an appropriate solution. Args: error: Exception The error that has occured. download_request: The request which resulted in the error.
juraj-google-style
def source(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `source`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `source`') self._source = value
Corresponds to IDD Field `source` Args: value (str): value for IDD Field `source` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def GetPasswdMap(self, since=None): return PasswdUpdateGetter().GetUpdates(self._GetClient(), self.conf['bucket'], self.conf['passwd_object'], since)
Return the passwd map from this source. Args: since: Get data only changed since this timestamp (inclusive) or None for all data. Returns: instance of passwd.PasswdMap
github-repos
def sample_mgrid(self, mgrid: np.array) -> np.array: mgrid = np.ascontiguousarray(mgrid, np.float32) if (mgrid.shape[0] != self.dimensions): raise ValueError(('mgrid.shape[0] must equal self.dimensions, %r[0] != %r' % (mgrid.shape, self.dimensions))) out = np.ndarray(mgrid.shape[1:], np.float32) if (mgrid.shape[1:] != out.shape): raise ValueError(('mgrid.shape[1:] must equal out.shape, %r[1:] != %r' % (mgrid.shape, out.shape))) lib.NoiseSampleMeshGrid(self._tdl_noise_c, out.size, ffi.cast('float*', mgrid.ctypes.data), ffi.cast('float*', out.ctypes.data)) return out
Sample a mesh-grid array and return the result. The :any:`sample_ogrid` method performs better as there is a lot of overhead when working with large mesh-grids. Args: mgrid (numpy.ndarray): A mesh-grid array of points to sample. A contiguous array of type `numpy.float32` is preferred. Returns: numpy.ndarray: An array of sampled points. This array has the shape: ``mgrid.shape[:-1]``. The ``dtype`` is `numpy.float32`.
codesearchnet
def angle_3points(p0, p1, p2): vec1 = vector(p1, p0) vec2 = vector(p2, p0) return math.atan2(np.linalg.norm(np.cross(vec1, vec2)), np.dot(vec1, vec2))
compute the angle in radians between three 3D points Calculated as the angle between p1-p0 and p2-p0. Args: p0, p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: Angle in radians between (p1-p0) and (p2-p0). 0.0 if p0==p1 or p0==p2.
codesearchnet
def print_tree_deps_of(module, all_edges=None): if all_edges is None: all_edges = create_reverse_dependency_tree() tree = get_tree_starting_at(module, all_edges) lines = [(tree[0], tree[0])] for index in range(1, len(tree)): edges = tree[index] start_edges = {edge[0] for edge in edges} for start in start_edges: end_edges = {edge[1] for edge in edges if edge[0] == start} pos = 0 while lines[pos][1] != start: pos += 1 lines = lines[:pos + 1] + [(' ' * (2 * index) + end, end) for end in end_edges] + lines[pos + 1:] for line in lines: print(line[0])
Prints the tree of modules depending on a given module. Args: module (`str`): The module that will be the root of the subtree we want. all_eges (`List[Tuple[str, str]]`, *optional*): The list of all edges of the tree. Will be set to `create_reverse_dependency_tree()` if not passed.
github-repos
def trade_day(dt, cal='US'): from xone import calendar dt = pd.Timestamp(dt).date() return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
Latest trading day w.r.t given dt Args: dt: date of reference cal: trading calendar Returns: pd.Timestamp: last trading day Examples: >>> trade_day('2018-12-25').strftime('%Y-%m-%d') '2018-12-24'
juraj-google-style
def set_source_interface(self, name): cmd = self.command_builder('ntp source', value=name) return self.configure(cmd)
Assign the NTP source on the node Args: name (string): The interface port that specifies the NTP source. Returns: True if the operation succeeds, otherwise False.
codesearchnet
def _inter_manager_operations(self, other, how_to_join, func): reindexed_self, reindexed_other_list, joined_index = self.copartition( 0, other, how_to_join, False ) reindexed_other = reindexed_other_list[0] new_columns = self._join_index_objects( 0, other.columns, how_to_join, sort=False ) self_cols = self.columns other_cols = other.columns def inter_data_op_builder(left, right, func): left.columns = self_cols right.columns = other_cols left.index = pandas.RangeIndex(len(left.index)) right.index = pandas.RangeIndex(len(right.index)) result = func(left, right) result.columns = pandas.RangeIndex(len(result.columns)) return result new_data = reindexed_self.inter_data_operation( 1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other ) return self.__constructor__(new_data, joined_index, new_columns)
Inter-data operations (e.g. add, sub). Args: other: The other Manager for the operation. how_to_join: The type of join to join to make (e.g. right, outer). Returns: New DataManager with new data and index.
juraj-google-style
def GetArtifactCollectorArgs(flow_args, knowledge_base): args = rdf_artifacts.ClientArtifactCollectorArgs() args.knowledge_base = knowledge_base args.apply_parsers = flow_args.apply_parsers args.ignore_interpolation_errors = flow_args.ignore_interpolation_errors args.max_file_size = flow_args.max_file_size args.use_tsk = flow_args.use_tsk if not flow_args.recollect_knowledge_base: artifact_names = flow_args.artifact_list else: artifact_names = GetArtifactsForCollection(knowledge_base.os, flow_args.artifact_list) expander = ArtifactExpander(knowledge_base, flow_args.path_type, flow_args.max_file_size) for artifact_name in artifact_names: rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name) if not MeetsConditions(knowledge_base, rdf_artifact): continue if artifact_name in expander.processed_artifacts: continue requested_by_user = artifact_name in flow_args.artifact_list for expanded_artifact in expander.Expand(rdf_artifact, requested_by_user): args.artifacts.append(expanded_artifact) return args
Prepare bundle of artifacts and their dependencies for the client. Args: flow_args: An `ArtifactCollectorFlowArgs` instance. knowledge_base: contains information about the client Returns: rdf value object containing a list of extended artifacts and the knowledge base
juraj-google-style
def combine_columns(columns): columns_zipped = itertools.zip_longest(*columns) return ''.join((x for zipped in columns_zipped for x in zipped if x))
Combine ``columns`` into a single string. Example: >>> combine_columns(['eape', 'xml']) 'example' Args: columns (iterable): ordered columns to combine Returns: String of combined columns
codesearchnet
def at(self, instant): for event in self: if event.begin <= instant <= event.end: yield event
Iterates (in chronological order) over all events that are occuring during `instant`. Args: instant (Arrow object)
juraj-google-style
def list_experiments(self, collection_name): exp = ExperimentResource( name='', collection_name=collection_name, coord_frame='foo') return self._list_resource(exp)
List all experiments that belong to a collection. Args: collection_name (string): Name of the parent collection. Returns: (list) Raises: requests.HTTPError on failure.
juraj-google-style
def var(x, axis=None, keepdims=False): if x.dtype.base_dtype == dtypes_module.bool: x = math_ops.cast(x, floatx()) return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
Variance of a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the variance of elements of `x`.
github-repos
def make_lda_variational(activation, num_topics, layer_sizes): encoder_net = tf.keras.Sequential() for num_hidden_units in layer_sizes: encoder_net.add(tf.keras.layers.Dense(num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) encoder_net.add(tf.keras.layers.Dense(num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) def lda_variational(bag_of_words): concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words)) return ed.Dirichlet(concentration=concentration, name='topics_posterior') return lda_variational
Creates the variational distribution for LDA. Args: activation: Activation function to use. num_topics: The number of topics. layer_sizes: The number of hidden units per layer in the encoder. Returns: lda_variational: A function that takes a bag-of-words Tensor as input and returns a distribution over topics.
codesearchnet
def get_nested_dmaps(dmap): if (not isinstance(dmap, DynamicMap)): return [] dmaps = [dmap] for o in dmap.callback.inputs: dmaps.extend(get_nested_dmaps(o)) return list(set(dmaps))
Recurses DynamicMap to find DynamicMaps inputs Args: dmap: DynamicMap to recurse to look for DynamicMap inputs Returns: List of DynamicMap instances that were found
codesearchnet
def custom_line_color_map(self, values): if not isinstance(values, list): raise TypeError("custom_line_color_map must be a list") self.options["custom_line_color_map"] = values
Set the custom line color map. Args: values (list): list of colors. Raises: TypeError: Custom line color map must be a list.
juraj-google-style
def _GetNameFromProduct(self): product = (self.product or '') product = product.split(' ') product_lower_case = [segment.lower() for segment in product] number_of_segments = len(product) if ('windows' in product_lower_case): segment_index = (product_lower_case.index('windows') + 1) if (product_lower_case[segment_index] in ('(r)', 'server')): segment_index += 1 suffix_segment_index = (segment_index + 1) if ((suffix_segment_index < number_of_segments) and (product_lower_case[suffix_segment_index] == 'r2')): return 'Windows {0:s} R2'.format(product[segment_index]) return 'Windows {0:s}'.format(product[segment_index]) return None
Determines the predefined operating system name from the product. Returns: str: operating system name, such as "macOS Mojave" or "Windows XP" or None if the name cannot be determined. This value is used to programmatically link a parser preset to an operating system and therefore must be one of predefined values.
codesearchnet
def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads): with tf.name_scope('apply_gradients'): var_update_ops = [] for vid, (g, v) in enumerate(ps_var_grads): apply_gradient_op = opt.apply_gradients([(g, v)]) barrier = self._add_sync_queues_and_barrier( 'param_update_barrier_{}'.format(vid), [apply_gradient_op]) with tf.control_dependencies([barrier]), \ tf.device(self.cpu_device): updated_value = v.read_value() for towerid in range(self.nr_gpu): var_update_ops.append( raw_grad_list[towerid][vid][1].assign(updated_value)) return var_update_ops
Apply averaged gradients to ps vars, and then copy the updated variables back to each tower. Args: raw_grad_list: Ngpu x Nvar x 2 gradient list from all towers ps_var_grads: Nvar x 2 (grad, ps_var) Returns: list of copy ops
juraj-google-style
def run(self, fetches, feed_dict=None, options=None, run_metadata=None): return self._sess.run(fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata)
Run ops in the monitored session. This method is completely compatible with the `tf.Session.run()` method. Args: fetches: Same as `tf.Session.run()`. feed_dict: Same as `tf.Session.run()`. options: Same as `tf.Session.run()`. run_metadata: Same as `tf.Session.run()`. Returns: Same as `tf.Session.run()`.
github-repos
def get_incomplete_penetrance_genes(hpo_lines): genes = parse_hpo_genes(hpo_lines) incomplete_penetrance_genes = set() for hgnc_symbol in genes: if genes[hgnc_symbol].get('incomplete_penetrance'): incomplete_penetrance_genes.add(hgnc_symbol) return incomplete_penetrance_genes
Get a set with all genes that have incomplete penetrance according to HPO Args: hpo_lines(iterable(str)) Returns: incomplete_penetrance_genes(set): A set with the hgnc symbols of all genes with incomplete penetrance
juraj-google-style
def add_menu_item(self, command, title): m_item = Gtk.MenuItem() m_item.set_label(title) m_item.connect('activate', command) self.menu.append(m_item) self.menu.show_all()
Add mouse right click menu item. Args: command (callable): function that will be called after left mouse click on title title (str): label that will be shown in menu
juraj-google-style
def _find_paths_referenced(self) -> Tuple[Optional[str], Collection[str]]:
Finds paths for any elements referenced in this expression. Recursively builds paths by visiting each node in the tree. Returns a tuple of (context, paths) where `context` is an identifier which may be part of a dotted path completed by its parent and `paths` are the dotted paths found so far. Implementations must recursively call this method for all child nodes. Returns: A tuple of (context, paths) as described above.
github-repos
def open(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO: return self._path_open(path, 'rb', mime_type, compression_type)
Returns a read channel for the given file path. Args: path: string path of the file object to be written to the system mime_type: MIME type to specify the type of content in the file object compression_type: Type of compression to be used for this object Returns: file handle with a close function for the user to use
github-repos
def process_input_data(filename, imager, grid_data, grid_norm, grid_weights): ms = oskar.MeasurementSet.open(filename) block_start = 0 num_rows = ms.num_rows num_baselines = ((ms.num_stations * (ms.num_stations - 1)) while (block_start < num_rows): block_size = (num_rows - block_start) if (block_size > num_baselines): block_size = num_baselines uvw = ms.read_column('UVW', block_start, block_size) vis_weights = ms.read_column('WEIGHT', block_start, block_size) if (ms.num_pols == 4): vis_weights = (0.5 * (vis_weights[(:, 0)] + vis_weights[(:, 3)])) for j in range(ms.num_channels): coords = ((uvw * (ms.freq_start_hz + (j * ms.freq_inc_hz))) / 299792458.0) vis_data = None if (not imager.coords_only): vis_data = ms.read_vis(block_start, j, 1, block_size) if (ms.num_pols == 4): vis_data = (0.5 * (vis_data[(0, :, 0)] + vis_data[(0, :, 3)])) grid_norm = imager.update_plane(coords[(:, 0)], coords[(:, 1)], coords[(:, 2)], vis_data, vis_weights, grid_data, grid_norm, grid_weights) block_start += block_size return grid_norm
Reads visibility data from a Measurement Set. The visibility grid or weights grid is updated accordingly. Visibility data are read from disk in blocks of size num_baselines. Args: filename (str): Name of Measurement Set to open. imager (oskar.Imager): Handle to configured imager. grid_data (numpy.ndarray or None): Visibility grid to populate. grid_norm (float) Current grid normalisation. grid_weights (numpy.ndarray): Weights grid to populate or read. Returns: grid_norm (float): Updated grid normalisation.
codesearchnet
def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, **kwargs): vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer kwargs = {k: v for k, v in kwargs.items() if v is not None} image_outputs = self.vision_tower(pixel_values, image_sizes=image_sizes, output_hidden_states=True, **kwargs) if isinstance(vision_feature_layer, int): selected_image_feature = image_outputs.hidden_states[vision_feature_layer] else: hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer] selected_image_feature = torch.cat(hs_pool, dim=-1) image_features = self.multi_modal_projector(selected_image_feature.squeeze(0), image_sizes) downsample_ratio = self.vision_tower.patch_size * self.config.spatial_merge_size split_sizes = [height image_features = torch.split(image_features.squeeze(0), split_sizes) return image_features
Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`): The tensors corresponding to the input images. vision_feature_layer (`Union[int, List[int]]`, *optional*): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_sizes (`torch.Tensor`, *optional*): Tensor containing the image sizes as returned by the processor. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
github-repos
def dump_tree(self, statement=None, indent_level=0): out = u"" indent = u" "*indent_level if statement is None: for root_statement in self.statements: out += self.dump_tree(root_statement, indent_level) else: out += indent + str(statement) + u'\n' if len(statement.children) > 0: for child in statement.children: out += self.dump_tree(child, indent_level=indent_level+4) return out
Dump the AST for this parsed file. Args: statement (SensorGraphStatement): the statement to print if this function is called recursively. indent_level (int): The number of spaces to indent this statement. Used for recursively printing blocks of statements. Returns: str: The AST for this parsed sg file as a nested tree with one node per line and blocks indented.
juraj-google-style
def screenrecord(self, bit_rate: int = 5000000, time_limit: int = 180, filename: _PATH = '/sdcard/demo.mp4') -> None: self._execute('-s', self.device_sn, 'shell', 'screenrecord', '--bit-rate', str(bit_rate), '--time-limit', str(time_limit), filename)
Recording the display of devices running Android 4.4 (API level 19) and higher. Args: bit_rate:You can increase the bit rate to improve video quality, but doing so results in larger movie files. time_limit: Sets the maximum recording time, in seconds, and the maximum value is 180 (3 minutes).
juraj-google-style
def add(reader, writer, column, start, stop, value): for (i, row) in enumerate(reader): if ((i >= start) and (i <= stop)): row[column] = (type(value)(row[column]) + value) writer.appendRecord(row)
Adds a value over a range of rows. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. column: The column of data to modify. start: The first row in the range to modify. end: The last row in the range to modify. value: The value to add.
codesearchnet
def _append_commands(dct, module_name, commands): for command in commands: entry_point = '{command}{subcommand} = {module}{callable}'.format(command=command.command, subcommand=(':{}'.format(command.subcommand) if command.subcommand else ''), module=module_name, callable=(':{}'.format(command.callable) if command.callable else '')) dct.setdefault(command.command, set()).add(entry_point)
Append entry point strings representing the given Command objects. Args: dct: The dictionary to append with entry point strings. Each key will be a primary command with a value containing a list of entry point strings representing a Command. module_name: The name of the module in which the command object resides. commands: A list of Command objects to convert to entry point strings.
codesearchnet
def get_last(self, num=10): max_item = self.get_max_item() urls = [urljoin(self.item_url, f'{i}.json') for i in range(((max_item - num) + 1), (max_item + 1))] result = self._run_async(urls=urls) return [Item(r) for r in result if r]
Returns last `num` of HN stories Downloads all the HN articles and returns them as Item objects Returns: `list` object containing ids of HN stories.
codesearchnet
def read_raster(raster_file): ds = gdal_Open(raster_file) band = ds.GetRasterBand(1) data = band.ReadAsArray() xsize = band.XSize ysize = band.YSize nodata_value = band.GetNoDataValue() geotrans = ds.GetGeoTransform() dttype = band.DataType srs = osr_SpatialReference() srs.ImportFromWkt(ds.GetProjection()) if (nodata_value is None): nodata_value = DEFAULT_NODATA band = None ds = None return Raster(ysize, xsize, data, nodata_value, geotrans, srs, dttype)
Read raster by GDAL. Args: raster_file: raster file path. Returns: Raster object.
codesearchnet
def list_mapped_classes(): cls_dict = {key: value for key, value in MODULE.rdfclass.__dict__.items() if not isinstance(value, RdfConfigManager) and key not in ['properties'] and hasattr(value, 'es_defs') and value.es_defs.get('kds_esIndex')} new_dict = {} potential_maps = set([cls_.__name__ for cls_ in cls_dict.values()]) for name, cls_ in cls_dict.items(): parents = set(cls_.hierarchy) if len(parents.intersection(potential_maps)) <= 1: new_dict[name] = cls_ return new_dict
Returns all the rdfclasses that have and associated elasticsearch mapping Args: None
juraj-google-style
def parse_topology(ml_log, log=None, ml_version='1.3.4BETA', print_output=False): topology = {'manifold': True, 'non_manifold_E': 0, 'non_manifold_V': 0} with open(ml_log) as fread: for line in fread: if ('V:' in line): vert_edge_face = line.replace('V:', ' ').replace('E:', ' ').replace('F:', ' ').split() topology['vert_num'] = int(vert_edge_face[0]) topology['edge_num'] = int(vert_edge_face[1]) topology['face_num'] = int(vert_edge_face[2]) if ('Unreferenced Vertices' in line): topology['unref_vert_num'] = int(line.split()[2]) if ('Boundary Edges' in line): topology['boundry_edge_num'] = int(line.split()[2]) if ('Mesh is composed by' in line): topology['part_num'] = int(line.split()[4]) if ('non 2-manifold mesh' in line): topology['manifold'] = False if ('non two manifold edges' in line): topology['non_manifold_edge'] = int(line.split()[2]) if ('non two manifold vertexes' in line): topology['non_manifold_vert'] = int(line.split()[2]) if ('Genus is' in line): topology['genus'] = line.split()[2] if (topology['genus'] != 'undefined'): topology['genus'] = int(topology['genus']) if ('holes' in line): topology['hole_num'] = line.split()[2] if (topology['hole_num'] == 'a'): topology['hole_num'] = 'undefined' else: topology['hole_num'] = int(topology['hole_num']) for (key, value) in topology.items(): if (log is not None): log_file = open(log, 'a') log_file.write('{:16} = {}\n'.format(key, value)) log_file.close() elif print_output: print('{:16} = {}'.format(key, value)) return topology
Parse the ml_log file generated by the measure_topology function. Args: ml_log (str): MeshLab log file to parse log (str): filename to log output Returns: dict: dictionary with the following keys: vert_num (int): number of vertices edge_num (int): number of edges face_num (int): number of faces unref_vert_num (int): number or unreferenced vertices boundry_edge_num (int): number of boundary edges part_num (int): number of parts (components) in the mesh. manifold (bool): True if mesh is two-manifold, otherwise false. non_manifold_edge (int): number of non_manifold edges. non_manifold_vert (int): number of non-manifold verices genus (int or str): genus of the mesh, either a number or 'undefined' if the mesh is non-manifold. holes (int or str): number of holes in the mesh, either a number or 'undefined' if the mesh is non-manifold.
codesearchnet
def WriteMap(self, map_data=None, force_write=False): if map_data is None: writable_map = self.data else: writable_map = map_data entries_written = self.Write(writable_map) if entries_written is None: self.log.warning('cache write failed, exiting') return 1 if force_write or self.Verify(entries_written): self._Commit() self.WriteIndex() return 0 self.log.warning('verification failed, exiting') return 1
Write a map to disk. Args: map_data: optional Map object to overwrite our current data with. force_write: optional flag to indicate verification checks can be ignored. Returns: 0 if succesful, 1 if not
github-repos
def seek(self, offset, whence=os.SEEK_SET): self._checkClosed() if whence == os.SEEK_SET: self._position = offset elif whence == os.SEEK_CUR: self._position += offset elif whence == os.SEEK_END: self._position = self._downloader.size + offset else: raise ValueError('Whence mode %r is invalid.' % whence) self._position = min(self._position, self._downloader.size) self._position = max(self._position, 0) return self._position
Set the stream's current offset. Note if the new offset is out of bound, it is adjusted to either 0 or EOF. Args: offset: seek offset as number. whence: seek mode. Supported modes are os.SEEK_SET (absolute seek), os.SEEK_CUR (seek relative to the current position), and os.SEEK_END (seek relative to the end, offset should be negative). Raises: ``ValueError``: When this stream is closed or if whence is invalid.
github-repos
def api_client(connection, client_class=xbahn.api.Client): return client_class( link=xbahn.connection.link.Link( receive=connection, send=connection ) )
Establishes an API client for one-way communication connection with an API Server Arguments: - connection (xbahn.connection.Connection) Keyword Arguments: - client_class (xbahn.api.Client): if supplied use this class to initantiate the client object. If omitted will use xbahn.api.Client. Returns: - client_class: client instance
juraj-google-style
def get_max_muO2(self, min_voltage=None, max_voltage=None): data = [] for pair in self._select_in_voltage_range(min_voltage, max_voltage): if pair.muO2_discharge is not None: data.extend([d['chempot'] for d in pair.muO2_discharge]) if pair.muO2_charge is not None: data.extend([d['chempot'] for d in pair.muO2_discharge]) return max(data) if len(data) > 0 else None
Maximum critical oxygen chemical potential along path. Args: min_voltage: The minimum allowable voltage. max_voltage: The maximum allowable voltage. Returns: Maximum critical oxygen chemical of all compounds along the insertion path (a subset of the path can be chosen by the optional arguments).
juraj-google-style
def from_attrs(cls, desired_attrs=None, except_attrs=None, critical_attrs=None): if isinstance(desired_attrs, roids.OID): desired_attrs = set([desired_attrs]) if isinstance(except_attrs, roids.OID): except_attrs = set([except_attrs]) if isinstance(critical_attrs, roids.OID): critical_attrs = set([critical_attrs]) if rfc5587 is None: raise NotImplementedError("Your GSSAPI implementation does not " "have support for RFC 5587") mechs = rfc5587.indicate_mechs_by_attrs(desired_attrs, except_attrs, critical_attrs) return (cls(mech) for mech in mechs)
Get a generator of mechanisms supporting the specified attributes. See RFC 5587's :func:`indicate_mechs_by_attrs` for more information. Args: desired_attrs ([OID]): Desired attributes except_attrs ([OID]): Except attributes critical_attrs ([OID]): Critical attributes Returns: [Mechanism]: A set of mechanisms having the desired features. Raises: GSSError :requires-ext:`rfc5587`
juraj-google-style
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding, expected, use_gpu, v2=False, one_dim=False, use_negative_input=False): for data_format, use_gpu_2 in GetTestConfigs(include_nchw_vect_c=True, one_dimensional=one_dim): if use_gpu_2 == use_gpu: self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding, data_format, expected, use_gpu, v2, use_negative_input)
Verifies the output values of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. expected: An array containing the expected operation outputs. use_gpu: Whether we are running on GPU. v2: Whether to use v2 version. one_dim: If one dimensional pools should be done instead of two dimensional pools. use_negative_input: If the input values should be negative.
github-repos
def barycentric_coords(coords, simplex): coords = np.atleast_2d(coords) t = (np.transpose(simplex[(:(- 1), :)]) - np.transpose(simplex[((- 1), :)])[(:, None)]) all_but_one = np.transpose(np.linalg.solve(t, np.transpose((coords - simplex[(- 1)])))) last_coord = (1 - np.sum(all_but_one, axis=(- 1))[(:, None)]) return np.append(all_but_one, last_coord, axis=(- 1))
Converts a list of coordinates to barycentric coordinates, given a simplex with d+1 points. Only works for d >= 2. Args: coords: list of n coords to transform, shape should be (n,d) simplex: list of coordinates that form the simplex, shape should be (d+1, d) Returns: a LIST of barycentric coordinates (even if the original input was 1d)
codesearchnet
def compute_verdict(self, results): if (results['class'] in self.reject_classes): threshold = self.reject_classes[results['class']] if (float(results['confidence']) >= threshold): logger.debug('<{0}> Suggesting to reject the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results)) return self.VERDICT_REJECT if (results['class'] in self.quarantine_classes): threshold = self.quarantine_classes[results['class']] if (float(results['confidence']) >= threshold): logger.debug('<{0}> Suggesting to quarantine the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results)) return self.VERDICT_QUARANTINE if (results['class'] in self.accept_classes): threshold = self.accept_classes[results['class']] if (float(results['confidence']) >= threshold): logger.debug('<{0}> Suggesting to accept the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results)) return self.VERDICT_ACCEPT logger.debug('<{0}> Suggesting to accept the message, no verdict class matched DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results)) return self.VERDICT_ACCEPT
Match results to the configured reject, quarantine and accept classes, and return a verdict based on that. The verdict classes are matched in the order: reject_classes, quarantine_classes, accept_classes. This means that you can configure different verdicts for different confidence results, for instance: reject_classes= Spam:0.99 # Reject obvious spam quarantine_classes = Spam:0.7 # Quarantine spam with confidence # between 0.7 and 0.99 accept_classes = Spam # Accept low confidence spam (good # for FP and retraining) Args: results -- A results dictionary from DspamClient.
codesearchnet
def get_dns_zone_ids(env='dev', facing='internal'): client = boto3.Session(profile_name=env).client('route53') zones = client.list_hosted_zones_by_name(DNSName='.'.join([env, DOMAIN])) zone_ids = [] for zone in zones['HostedZones']: LOG.debug('Found Hosted Zone: %s', zone) if facing == 'external' or zone['Config']['PrivateZone']: LOG.info('Using %(Id)s for "%(Name)s", %(Config)s', zone) zone_ids.append(zone['Id']) LOG.debug('Zone IDs: %s', zone_ids) return zone_ids
Get Route 53 Hosted Zone IDs for _env_. Args: env (str): Deployment environment. facing (str): Type of ELB, external or internal. Returns: list: Hosted Zone IDs for _env_. Only *PrivateZone* when _facing_ is internal.
juraj-google-style
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean, check_inf_nan): grads = [g for g, _ in grad_and_vars] grad = math_ops.add_n(grads) if use_mean and len(grads) > 1: grad = array_ops.multiply(grad, 1.0 / len(grads)) v = grad_and_vars[0][1] if check_inf_nan: has_nan_or_inf = array_ops.logical_not(array_ops.reduce_all(array_ops.is_finite(grads))) return ((grad, v), has_nan_or_inf) else: return ((grad, v), None)
Calculate the average gradient for a shared variable across all replicas. Note that this function provides a synchronization point across all replicas. Args: grad_and_vars: A list or tuple of (gradient, variable) tuples. Each (gradient, variable) pair within the outer list represents the gradient of the variable calculated for a single replica, and the number of pairs equals the number of replicas. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all replicas. The variable is chosen from the first replica. The has_nan_or_inf indicates the grads has nan or inf.
github-repos
def get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None): collection_def = meta_graph_def_to_load.collection_def asset_tensor_dict = {} asset_protos = [] if meta_graph_def_to_load.asset_file_def: asset_protos = meta_graph_def_to_load.asset_file_def elif constants.ASSETS_KEY in collection_def: assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value for asset_any_proto in assets_any_proto: asset_proto = meta_graph_pb2.AssetFileDef() asset_any_proto.Unpack(asset_proto) asset_protos.append(asset_proto) assets_directory = file_io.join(compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY)) for asset_proto in asset_protos: tensor_name = asset_proto.tensor_info.name if import_scope: tensor_name = '%s/%s' % (import_scope, tensor_name) asset_tensor_dict[tensor_name] = file_io.join(compat.as_bytes(assets_directory), compat.as_bytes(asset_proto.filename)) return asset_tensor_dict
Gets the asset tensors, if defined in the meta graph def to load. Args: export_dir: Directory where the SavedModel is located. meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded. import_scope: Optional `string` -- if specified, prepend this followed by '/' to all returned asset tensor names. Returns: A dictionary of asset tensors, keyed by the name of the asset tensor. The value in the map corresponds to the absolute path of the asset file.
github-repos
def get_dataframe(self, force_computation=False): if self.df is not None and not force_computation: return self.df self.df = self.fetch(self.context) self.df = self.preprocess(self.df) self.transform(self.df) return self.df
Preprocesses then transforms the return of fetch(). Args: force_computation (bool, optional) : Defaults to False. If set to True, forces the computation of DataFrame at each call. Returns: pandas.DataFrame: Preprocessed and transformed DataFrame.
juraj-google-style
def _get_block_publisher(self, state_hash): state_view = self._state_view_factory.create_view(state_hash) try: class BatchPublisher: def send(self, transactions): raise InvalidGenesisConsensusError( 'Consensus cannot send transactions during genesis.') consensus = ConsensusFactory.get_configured_consensus_module( NULL_BLOCK_IDENTIFIER, state_view) return consensus.BlockPublisher( BlockCache(self._block_store), state_view_factory=self._state_view_factory, batch_publisher=BatchPublisher(), data_dir=self._data_dir, config_dir=self._config_dir, validator_id=self._identity_signer.get_public_key().as_hex()) except UnknownConsensusModuleError as e: raise InvalidGenesisStateError(e)
Returns the block publisher based on the consensus module set by the "sawtooth_settings" transaction family. Args: state_hash (str): The current state root hash for reading settings. Raises: InvalidGenesisStateError: if any errors occur getting the BlockPublisher.
juraj-google-style
def sendfrom(self, user_id, dest_address, amount, minconf=1): amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN) txhash = self.rpc.call('sendfrom', user_id, dest_address, float(str(amount)), minconf) self.logger.debug(('Send %s %s from %s to %s' % (str(amount), self.coin, str(user_id), dest_address))) self.logger.debug(('Transaction hash: %s' % txhash)) return txhash
Send coins from user's account. Args: user_id (str): this user's unique identifier dest_address (str): address which is to receive coins amount (str or Decimal): amount to send (eight decimal points) minconf (int): ensure the account has a valid balance using this many confirmations (default=1) Returns: str: transaction ID
codesearchnet
def merge(self: 'FetchResponse', other: 'FetchResponse') \ -> 'FetchResponse': if self.seq != other.seq: raise ValueError(other) new_data = OrderedDict(self.data) new_data.update(other.data) return FetchResponse(self.seq, list(new_data.items()))
Merge the other FETCH response, adding any fetch attributes that do not already exist in this FETCH response. For example:: * 3 FETCH (UID 119) * 3 FETCH (FLAGS (\\Seen)) Would merge into:: * 3 FETCH (UID 119 FLAGS (\\Seen)) Args: other: The other response to merge.
juraj-google-style
def is_ipython_subprocess() -> bool: return False
Check if we are in a sub-process launched from within a `ipython` terminal. Returns: `True` only if we are in ipython terminal (e.g. `ml_python`) and inside a sub-process.
github-repos
def parse_input(self): if self._text: lines = iter(self._text.splitlines()) elif self._file: lines = self._file else: lines = () sample_lines = [] for line in lines: if (len(sample_lines) > 100): break sample_lines.append(line) lines = itertools.chain(sample_lines, lines) self.guess_type(sample_lines) datetime_format = wpull.protocol.ftp.ls.date.guess_datetime_format(sample_lines) self.set_datetime_format(datetime_format) return self.parse(lines)
Parse the listings. Returns: iter: A iterable of :class:`.ftp.ls.listing.FileEntry`
codesearchnet
def ScanForStorageMediaImage(self, source_path_spec): try: type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators( source_path_spec, resolver_context=self._resolver_context) except RuntimeError as exception: raise errors.BackEndError(( 'Unable to process source path specification with error: ' '{0!s}').format(exception)) if not type_indicators: file_system = resolver.Resolver.OpenFileSystem( source_path_spec, resolver_context=self._resolver_context) raw_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_RAW, parent=source_path_spec) try: glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec) except errors.PathSpecError: glob_results = None file_system.Close() if not glob_results: return None return raw_path_spec if len(type_indicators) > 1: raise errors.BackEndError( 'Unsupported source found more than one storage media image types.') return path_spec_factory.Factory.NewPathSpec( type_indicators[0], parent=source_path_spec)
Scans the path specification for a supported storage media image format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: storage media image path specification or None if no supported storage media image type was found. Raises: BackEndError: if the source cannot be scanned or more than one storage media image type is found.
juraj-google-style
def new_netting_channel( self, partner: Address, settle_timeout: int, given_block_identifier: BlockSpecification, ) -> ChannelID: checking_block = self.client.get_checking_block() self._new_channel_preconditions( partner=partner, settle_timeout=settle_timeout, block_identifier=given_block_identifier, ) log_details = { 'peer1': pex(self.node_address), 'peer2': pex(partner), } gas_limit = self.proxy.estimate_gas( checking_block, 'openChannel', participant1=self.node_address, participant2=partner, settle_timeout=settle_timeout, ) if not gas_limit: self.proxy.jsonrpc_client.check_for_insufficient_eth( transaction_name='openChannel', transaction_executed=False, required_gas=GAS_REQUIRED_FOR_OPEN_CHANNEL, block_identifier=checking_block, ) self._new_channel_postconditions( partner=partner, block=checking_block, ) log.critical('new_netting_channel call will fail', **log_details) raise RaidenUnrecoverableError('Creating a new channel will fail') log.debug('new_netting_channel called', **log_details) if gas_limit and partner not in self.open_channel_transactions: new_open_channel_transaction = AsyncResult() self.open_channel_transactions[partner] = new_open_channel_transaction gas_limit = safe_gas_limit(gas_limit, GAS_REQUIRED_FOR_OPEN_CHANNEL) try: transaction_hash = self.proxy.transact( 'openChannel', gas_limit, participant1=self.node_address, participant2=partner, settle_timeout=settle_timeout, ) self.client.poll(transaction_hash) receipt_or_none = check_transaction_threw(self.client, transaction_hash) if receipt_or_none: self._new_channel_postconditions( partner=partner, block=receipt_or_none['blockNumber'], ) log.critical('new_netting_channel failed', **log_details) raise RaidenUnrecoverableError('creating new channel failed') except Exception as e: log.critical('new_netting_channel failed', **log_details) new_open_channel_transaction.set_exception(e) raise else: new_open_channel_transaction.set(transaction_hash) finally: self.open_channel_transactions.pop(partner, None) else: self.open_channel_transactions[partner].get() channel_identifier: ChannelID = self._detail_channel( participant1=self.node_address, participant2=partner, block_identifier='latest', ).channel_identifier log_details['channel_identifier'] = str(channel_identifier) log.info('new_netting_channel successful', **log_details) return channel_identifier
Creates a new channel in the TokenNetwork contract. Args: partner: The peer to open the channel with. settle_timeout: The settle timeout to use for this channel. given_block_identifier: The block identifier of the state change that prompted this proxy action Returns: The ChannelID of the new netting channel.
juraj-google-style
def get_results_as_numpy_array(self, parameter_space, result_parsing_function, runs): return np.array(self.get_space(self.db.get_complete_results(), {}, parameter_space, runs, result_parsing_function))
Return the results relative to the desired parameter space in the form of a numpy array. Args: parameter_space (dict): dictionary containing parameter/list-of-values pairs. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. runs (int): number of runs to gather for each parameter combination.
codesearchnet
def default_batch_size(self) -> int: return OnnxConfig.default_fixed_batch
The default batch size to use if no other indication Returns: Integer > 0
github-repos
def append(self, annotation): self._annotations[annotation.id] = annotation self._dirty = True return annotation
Add an annotation. Args: annotation (gkeepapi.node.Annotation): An Annotation object. Returns: gkeepapi.node.Annotation: The Annotation.
juraj-google-style
def pack_small_tensors(tower_grads, max_bytes=0): assert max_bytes >= 0 orig_grads = [g for g, _ in tower_grads[0]] assert all(g.dtype == tf.float32 for g in orig_grads) sizes = [4 * g.shape.num_elements() for g in orig_grads] print_stats(sizes) small_ranges = [] large_indices = [] new_sizes = [] def end_interval(indices, small_ranges, large_indices): if len(indices) > 1: small_ranges.insert(0, [indices[0], indices[-1]]) else: large_indices.insert(0, indices[0]) cur_range = [] cur_size = 0 for i, s in reversed(list(enumerate(sizes))): if cur_size > max_bytes: end_interval(cur_range, small_ranges, large_indices) new_sizes.insert(0, cur_size) cur_range = [] cur_size = 0 cur_range.insert(0, i) cur_size += s end_interval(cur_range, small_ranges, large_indices) new_sizes.insert(0, cur_size) print_stats(new_sizes) num_gv = len(orig_grads) packing = {} if len(small_ranges): new_tower_grads = [] for dev_idx, gv_list in enumerate(tower_grads): assert len(gv_list) == num_gv, ( "Possible cause: " "Networks constructed on different workers " "don't have the same number of variables. " "If you use tf.GraphKeys or tf.global_variables() " "with multiple graphs per worker during network " "construction, you need to use " "appropriate scopes, see " "https: new_gv_list = [] for r in small_ranges: key = "%d:%d" % (dev_idx, len(new_gv_list)) new_gv_list.append((pack_range(key, packing, gv_list, r), "packing_var_placeholder")) for i in large_indices: new_gv_list.append(gv_list[i]) new_tower_grads.append(new_gv_list) return new_tower_grads, packing else: return tower_grads, None
Concatenate gradients together more intelligently. Does binpacking Args: tower_grads: List of lists of (gradient, variable) tuples. max_bytes: Int giving max number of bytes in a tensor that may be considered small.
juraj-google-style
def init_algebra(*, default_hs_cls='LocalSpace'): from qnet.algebra.core.hilbert_space_algebra import LocalSpace from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression default_hs_cls = getattr(importlib.import_module('qnet'), default_hs_cls) if issubclass(default_hs_cls, LocalSpace): QuantumExpression._default_hs_cls = default_hs_cls else: raise TypeError('default_hs_cls must be a subclass of LocalSpace')
Initialize the algebra system Args: default_hs_cls (str): The name of the :class:`.LocalSpace` subclass that should be used when implicitly creating Hilbert spaces, e.g. in :class:`.OperatorSymbol`
codesearchnet
def add_documents(self, docs): for sent in docs: sent = map(self.process_token, sent) self._token_count.update(sent)
Update dictionary from a collection of documents. Each document is a list of tokens. Args: docs (list): documents to add.
codesearchnet
def parse_data_types_from_doc_ref(api, doc, namespace_context, ignore_missing_entries=False): output = [] (data_types, routes_by_ns) = parse_data_types_and_routes_from_doc_ref(api, doc, namespace_context, ignore_missing_entries=ignore_missing_entries) for d in data_types: output.append(d) for (ns_name, routes) in routes_by_ns.items(): try: ns = api.namespaces[ns_name] for r in routes: for d in ns.get_route_io_data_types_for_route(r): output.append(d) except KeyError: if (not ignore_missing_entries): raise return output
Given a documentation string, parse it and return all references to other data types. If there are references to routes, include also the data types of those routes. Args: - api: The API containing this doc ref. - doc: The documentation string to parse. - namespace_context: The namespace name relative to this documentation. - ignore_missing_entries: If set, this will skip references to nonexistent data types instead of raising an exception. Returns: - a list of referenced data types
codesearchnet
def _find_max_beta_token_len(): max_beta_len = (- 1) for (beta, uni) in _map.BETACODE_MAP.items(): if (len(beta) > max_beta_len): max_beta_len = len(beta) return max_beta_len
Finds the maximum length of a single betacode token. Returns: The length of the longest key in the betacode map, which corresponds to the longest single betacode token.
codesearchnet
def _validate_alias_name(alias_name): if (not alias_name): raise CLIError(EMPTY_ALIAS_ERROR) if (not re.match('^[a-zA-Z]', alias_name)): raise CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0]))
Check if the alias name is valid. Args: alias_name: The name of the alias to validate.
codesearchnet
def get_objects_from_form(variant_ids, form_fields, object_type): submission_fields = [] if (object_type == 'variant'): submission_fields = CLINVAR_HEADER else: submission_fields = CASEDATA_HEADER submission_objects = [] for variant_id in variant_ids: subm_obj = {} if ((object_type == 'casedata') and (('casedata_' + variant_id) not in form_fields)): continue subm_obj['csv_type'] = object_type subm_obj['case_id'] = form_fields.get('case_id') subm_obj['category'] = form_fields.get(('category@' + variant_id)) for (key, values) in submission_fields.items(): field_value = form_fields.get(((key + '@') + variant_id)) if (field_value and (not (field_value == '-'))): if (key == 'ref_seq'): refseq_raw = field_value.split('|') subm_obj['ref_seq'] = refseq_raw[0] subm_obj['hgvs'] = refseq_raw[1] else: subm_obj[key] = field_value if (object_type == 'casedata'): subm_obj['_id'] = ((((str(subm_obj['case_id']) + '_') + variant_id) + '_') + str(subm_obj['individual_id'])) else: subm_obj['_id'] = ((str(subm_obj['case_id']) + '_') + variant_id) submission_objects.append(subm_obj) return submission_objects
Extract the objects to be saved in the clinvar database collection. object_type param specifies if these objects are variant or casedata objects Args: variant_ids(list): list of database variant ids form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER object_type(str): either 'variant' or 'case_data' Returns: submission_objects(list): list of submission objects of either type 'variant' or 'casedata'
codesearchnet
def piece_to_id(input, model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_piece_to_id( input, model_file=model_file, model_proto=model_proto, name=name)
Converts piece into vocabulary id. Args: input: An arbitrary tensor of string. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of int32 with the same shape as input.
juraj-google-style
def __init__(self, *schedules: List[Union[ScheduleComponent, Tuple[int, ScheduleComponent]]], name: str = None): self._name = name try: timeslots = [] children = [] for sched_pair in schedules: if not isinstance(sched_pair, (list, tuple)): sched_pair = (0, sched_pair) sched_pair = tuple(sched_pair) insert_time, sched = sched_pair sched_timeslots = sched.timeslots if insert_time: sched_timeslots = sched_timeslots.shift(insert_time) timeslots.append(sched_timeslots.timeslots) children.append(sched_pair) self._timeslots = TimeslotCollection(*itertools.chain(*timeslots)) self._children = tuple(children) except PulseError as ts_err: raise PulseError('Child schedules {0} overlap.'.format(schedules)) from ts_err
Create empty schedule. Args: *schedules: Child Schedules of this parent Schedule. May either be passed as the list of schedules, or a list of (start_time, schedule) pairs name: Name of this schedule Raises: PulseError: If timeslots intercept.
juraj-google-style
def make_client(servers: Sequence[str], *args, **kwargs) -> GMatrixClient: if (len(servers) > 1): sorted_servers = [server_url for (server_url, _) in sort_servers_closest(servers)] log.info('Automatically selecting matrix homeserver based on RTT', sorted_servers=sorted_servers) elif (len(servers) == 1): sorted_servers = servers else: raise TransportError('No valid servers list given') last_ex = None for server_url in sorted_servers: server_url: str = server_url client = GMatrixClient(server_url, *args, **kwargs) try: client.api._send('GET', '/versions', api_path='/_matrix/client') except MatrixError as ex: log.warning('Selected server not usable', server_url=server_url, _exception=ex) last_ex = ex else: break else: raise TransportError('Unable to find a reachable Matrix server. Please check your network connectivity.') from last_ex return client
Given a list of possible servers, chooses the closest available and create a GMatrixClient Params: servers: list of servers urls, with scheme (http or https) Rest of args and kwargs are forwarded to GMatrixClient constructor Returns: GMatrixClient instance for one of the available servers
codesearchnet
def migrate_config(self, current_config, config_to_migrate, always_update, update_defaults): value = self._search_config_for_possible_names(current_config) self._update_config(config_to_migrate, value, always_update, update_defaults)
Migrate config value in current_config, updating config_to_migrate. Given the current_config object, it will attempt to find a value based on all the names given. If no name could be found, then it will simply set the value to the default. If a value is found and is in the list of previous_defaults, it will either update or keep the old value based on if update_defaults is set. If a non-default value is set it will either keep this value or update it based on if ``always_update`` is true. Args: current_config (dict): Current configuration. config_to_migrate (dict): Config to update. always_update (bool): Always update value. update_defaults (bool): Update values found in previous_defaults
codesearchnet
def sort_imports_in_all_inits(check_only=True): failures = [] for root, _, files in os.walk(PATH_TO_TRANSFORMERS): if '__init__.py' in files: result = sort_imports(os.path.join(root, '__init__.py'), check_only=check_only) if result: failures = [os.path.join(root, '__init__.py')] if len(failures) > 0: raise ValueError(f'Would overwrite {len(failures)} files, run `make style`.')
Sort the imports defined in the `_import_structure` of all inits in the repo. Args: check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init.
github-repos
def __getitem__(self, key): if self._dims is not None: if isinstance(key, slice): return TensorShape(self._dims[key]) elif self._v2_behavior: return self._dims[key] else: return self.dims[key] elif isinstance(key, slice): start = key.start if key.start is not None else 0 stop = key.stop if key.step is not None: raise ValueError('Steps are not yet handled') if stop is None: return unknown_shape() elif start < 0 or stop < 0: return unknown_shape() else: return unknown_shape(rank=stop - start) elif self._v2_behavior: return None else: return Dimension(None)
Returns the value of a dimension or a shape, depending on the key. Args: key: If `key` is an integer, returns the dimension at that index; otherwise if `key` is a slice, returns a TensorShape whose dimensions are those selected by the slice from `self`. Returns: An integer if `key` is an integer, or a `TensorShape` if `key` is a slice. Raises: ValueError: If `key` is a slice and `self` is completely unknown and the step is set.
github-repos
def translations(self, **kwargs): path = self._get_id_path('translations') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the translations for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def _is_composite_function(self, func: function_pb2.FunctionDef) -> bool: return func.signature.name.startswith('composite_')
Determine whether a FunctionDef is composite function. Args: func: A FunctionDef object. Returns: True iff `func` is composte function.
github-repos