code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _check_cores_output_sizes(self): for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))): first_core_list = core_sizes[0][1:] for (i, core_list) in enumerate(core_sizes[1:]): if (core_list[1:] != first_core_list): raise ValueError(('The outputs of the provided cores are not able to be concatenated along the first feature dimension. Core 0 has shape %s, whereas Core %d has shape %s - these must only differ in the first dimension' % (core_sizes[0], (i + 1), core_list)))
Checks the output_sizes of the cores of the DeepRNN module. Raises: ValueError: if the outputs of the cores cannot be concatenated along their first dimension.
codesearchnet
def _post_process_apply(self, result_data, axis, try_scale=True): if try_scale: try: internal_index = self.compute_index(0, result_data, True) except IndexError: internal_index = self.compute_index(0, result_data, False) try: internal_columns = self.compute_index(1, result_data, True) except IndexError: internal_columns = self.compute_index(1, result_data, False) else: internal_index = self.compute_index(0, result_data, False) internal_columns = self.compute_index(1, result_data, False) if (not axis): index = internal_index if (len(internal_columns) != len(self.columns)): columns = internal_columns else: columns = self.columns else: columns = internal_columns if (len(internal_index) != len(self.index)): index = internal_index else: index = self.index return self.__constructor__(result_data, index, columns)
Recompute the index after applying function. Args: result_data: a BaseFrameManager object. axis: Target axis along which function was applied. Returns: A new PandasQueryCompiler.
codesearchnet
def tensor_name(self): return _get_tensor_name(self.node_name, self.output_slot)
Name of the tensor watched by the debug op. Returns: (`str`) `Tensor` name, in the form of `node_name`:`output_slot`
github-repos
def find_function(self, context, funname): if (funname in self.builtins): return self.builtins[funname] func = None if isinstance(context, dict): if (funname in context): func = context[funname] if isinstance(func, str): func = self._deferred_add(func) context[funname] = func elif hasattr(context, funname): func = getattr(context, funname) if (func is None): raise NotFoundError('Function not found', function=funname) return func
Find a function in the given context by name. This function will first search the list of builtins and if the desired function is not a builtin, it will continue to search the given context. Args: context (object): A dict or class that is a typedargs context funname (str): The name of the function to find Returns: callable: The found function.
codesearchnet
def update_conversation(self, conversation): new_state = conversation.self_conversation_state old_state = self._conversation.self_conversation_state self._conversation = conversation if not new_state.delivery_medium_option: new_state.delivery_medium_option.extend( old_state.delivery_medium_option ) old_timestamp = old_state.self_read_state.latest_read_timestamp new_timestamp = new_state.self_read_state.latest_read_timestamp if new_timestamp == 0: new_state.self_read_state.latest_read_timestamp = old_timestamp for new_entry in conversation.read_state: tstamp = parsers.from_timestamp(new_entry.latest_read_timestamp) if tstamp == 0: continue uid = parsers.from_participantid(new_entry.participant_id) if uid not in self._watermarks or self._watermarks[uid] < tstamp: self._watermarks[uid] = tstamp
Update the internal state of the conversation. This method is used by :class:`.ConversationList` to maintain this instance. Args: conversation: ``Conversation`` message.
juraj-google-style
def __init__(self, file_object): if not file_object: raise ValueError('Missing file-like object.') self._file_object = file_object tsk_img_type = getattr( pytsk3, 'TSK_IMG_TYPE_EXTERNAL', pytsk3.TSK_IMG_TYPE_RAW) pytsk3.Img_Info.__init__(self, url='', type=tsk_img_type)
Initializes an image object. Args: file_object (FileIO): file-like object. Raises: ValueError: if the file-like object is invalid.
juraj-google-style
def decode_metar(self, metar): try: from metar import Metar except: return 'Unable to parse metars. Please install parser from https: m = Metar.Metar(metar) return m.string()
Simple method that decodes a given metar string. Args: metar (str): The metar data Returns: The metar data in readable format Example:: from pyflightdata import FlightData f=FlightData() f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')
codesearchnet
def GetLVMLogicalVolumeByPathSpec(self, path_spec): volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec) if volume_index is None: return None return self._vslvm_volume_group.get_logical_volume(volume_index)
Retrieves a LVM logical volume for a path specification. Args: path_spec (PathSpec): path specification. Returns: pyvslvm.logical_volume: a LVM logical volume or None if not available.
juraj-google-style
def __init__(self, file_system, mount_point): if not file_system or not mount_point: raise ValueError('Missing file system or mount point value.') if path_spec_factory.Factory.IsSystemLevelTypeIndicator( file_system.type_indicator): if not hasattr(mount_point, 'location'): raise errors.PathSpecError( 'Mount point path specification missing location.') super(FileSystemSearcher, self).__init__() self._file_system = file_system self._mount_point = mount_point
Initializes a file system searcher. Args: file_system (FileSystem): file system. mount_point (PathSpec): mount point path specification that refers to the base location of the file system. Raises: PathSpecError: if the mount point path specification is incorrect. ValueError: when file system or mount point is not set.
juraj-google-style
def _dump_to_pages(dump): pos = 0 ret = [] start_tag = u'<page>\n' end_tag = u'</page>\n' while True: start_pos = dump.find(start_tag, pos) if (start_pos == (- 1)): break start_pos += len(start_tag) end_pos = dump.find(end_tag, start_pos) if (end_pos == (- 1)): break ret.append(dump[start_pos:end_pos]) pos = (end_pos + len(end_tag)) return ret
Extract pages from an xml dump. Args: dump: a unicode string Returns: a list of unicode strings
codesearchnet
def trace2(A, B): r A = asarray(A, float) B = asarray(B, float) layout_error = "Wrong matrix layout." if not (len(A.shape) == 2 and len(B.shape) == 2): raise ValueError(layout_error) if not (A.shape[1] == B.shape[0] and A.shape[0] == B.shape[1]): raise ValueError(layout_error) return _sum(A.T * B)
r"""Trace of :math:`\mathrm A \mathrm B^\intercal`. Args: A (array_like): Left-hand side. B (array_like): Right-hand side. Returns: float: Trace of :math:`\mathrm A \mathrm B^\intercal`.
juraj-google-style
def thread_safe_client(client, lock=None): if lock is None: lock = threading.Lock() return _ThreadSafeProxy(client, lock)
Create a thread-safe proxy which locks every method call for the given client. Args: client: the client object to be guarded. lock: the lock object that will be used to lock client's methods. If None, a new lock will be used. Returns: A thread-safe proxy for the given client.
juraj-google-style
def as_list(self): if self._dims is None: raise ValueError('as_list() is not defined on an unknown TensorShape.') return list(self._dims)
Returns a list of integers or `None` for each dimension. Returns: A list of integers or `None` for each dimension. Raises: ValueError: If `self` is an unknown shape with an unknown rank.
github-repos
def _to_df(self, result, handle_annotations=None): annotations = result._data if (handle_annotations == 'first'): annotations = [annotations[0]] face_results = [] for (i, annotation) in enumerate(annotations): data_dict = {} for (field, val) in annotation.items(): if ('Confidence' in field): data_dict[('face_' + field)] = val elif ('oundingPoly' in field): for (j, vertex) in enumerate(val['vertices']): for dim in ['x', 'y']: name = ('%s_vertex%d_%s' % (field, (j + 1), dim)) val = (vertex[dim] if (dim in vertex) else np.nan) data_dict[name] = val elif (field == 'landmarks'): for lm in val: name = (('landmark_' + lm['type']) + '_%s') lm_pos = {(name % k): v for (k, v) in lm['position'].items()} data_dict.update(lm_pos) else: data_dict[field] = val face_results.append(data_dict) return pd.DataFrame(face_results)
Converts a Google API Face JSON response into a Pandas Dataframe. Args: result (ExtractorResult): Result object from which to parse out a Dataframe. handle_annotations (str): How returned face annotations should be handled in cases where there are multiple faces. 'first' indicates to only use the first face JSON object, all other values will default to including every face.
codesearchnet
def expand_docstring(**kwargs): def _fn_wrapped(fn): doc = inspect.cleandoc(fn.__doc__) for k, v in six.iteritems(kwargs): pattern = r'\$\{' + str(k) + r'\}' doc = re.sub(pattern, lambda match: v, doc) fn.__doc__ = doc return fn return _fn_wrapped
Decorator to programmatically expand the docstring. Args: **kwargs: Keyword arguments to set. For each key-value pair `k` and `v`, the key is found as `${k}` in the docstring and replaced with `v`. Returns: Decorated function.
juraj-google-style
def _match_dbname(self, dbname): for config in self._clusters: if re.match(config['pattern'], dbname): return config raise Exception('No such database %s.' % dbname)
Map a database name to the Cluster that holds the database. Args: dbname: A database name. Returns: A dict containing the information about the Cluster that holds the database.
juraj-google-style
def get_arrive_stop(self, **kwargs): params = {'idStop': kwargs.get('stop_number'), 'cultureInfo': util.language_code(kwargs.get('lang'))} result = self.make_request('geo', 'get_arrive_stop', **params) if (not util.check_result(result, 'arrives')): return (False, 'UNKNOWN ERROR') values = util.response_list(result, 'arrives') return (True, [emtype.Arrival(**a) for a in values])
Obtain bus arrival info in target stop. Args: stop_number (int): Stop number to query. lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Arrival]), or message string in case of error.
codesearchnet
def is_active(self): if ((not self._is_active) and self._is_active_lock.acquire(False)): if self._is_active: self._is_active_lock.release() else: def compute_is_active(): self._is_active = any(self.generate_run_to_tools()) self._is_active_lock.release() new_thread = threading.Thread(target=compute_is_active, name='ProfilePluginIsActiveThread') new_thread.start() return self._is_active
Whether this plugin is active and has any profile data to show. Detecting profile data is expensive, so this process runs asynchronously and the value reported by this method is the cached value and may be stale. Returns: Whether any run has profile data.
codesearchnet
def _get_help_for_command_prefix(self, cmd_prefix): lines = [] resolved_prefix = self._resolve_prefix(cmd_prefix) if not resolved_prefix: lines.append('Invalid command prefix: "%s"' % cmd_prefix) return lines lines.append(resolved_prefix) if resolved_prefix in self._prefix_to_aliases: lines.append(HELP_INDENT + 'Aliases: ' + ', '.join(self._prefix_to_aliases[resolved_prefix])) lines.append('') help_lines = self._prefix_to_help[resolved_prefix].split('\n') for line in help_lines: lines.append(HELP_INDENT + line) return lines
Compile the help information for a given command prefix. Args: cmd_prefix: Command prefix, as the prefix itself or one of its aliases. Returns: A list of str as the help information for cmd_prefix. If the cmd_prefix does not exist, the returned list of str will indicate that.
github-repos
def projection_name(self, **kwargs: Dict[(str, Any)]) -> str: return self.projection_name_format.format(**kwargs)
Define the projection name for this projector. Note: This function is just a basic placeholder and likely should be overridden. Args: kwargs: Projection information dict combined with additional arguments passed to the projection function. Returns: Projection name string formatted with the passed options. By default, it returns ``projection_name_format`` formatted with the arguments to this function.
codesearchnet
def from_prediction(features: FeatureDict, result: ModelOutput, b_factors: Optional[np.ndarray]=None, chain_index: Optional[np.ndarray]=None, remark: Optional[str]=None, parents: Optional[Sequence[str]]=None, parents_chain_index: Optional[Sequence[int]]=None) -> Protein: return Protein(aatype=features['aatype'], atom_positions=result['final_atom_positions'], atom_mask=result['final_atom_mask'], residue_index=features['residue_index'] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask']), chain_index=chain_index, remark=remark, parents=parents, parents_chain_index=parents_chain_index)
Assembles a protein from a prediction. Args: features: Dictionary holding model inputs. result: Dictionary holding model outputs. b_factors: (Optional) B-factors to use for the protein. chain_index: (Optional) Chain indices for multi-chain predictions remark: (Optional) Remark about the prediction parents: (Optional) List of template names Returns: A protein instance.
github-repos
def values(self, column_major=False): if column_major: return list(map(list, zip(*self._values))) return [row[:] for row in self._values]
Return a nested list with the worksheet values. Args: column_major (bool): as list of columns (default list of rows) Returns: list: list of lists with values
juraj-google-style
def plot_carriers(self, temp=300): import matplotlib.pyplot as plt plt.semilogy(self._bz.mu_steps, abs(self._bz._carrier_conc[temp] / (self._bz.vol * 1e-24)), linewidth=3.0, color='r') self._plot_bg_limits() self._plot_doping(temp) plt.xlim(-0.5, self._bz.gap + 0.5) plt.ylim(1e14, 1e22) plt.ylabel("carrier concentration (cm-3)", fontsize=30.0) plt.xlabel("E-E$_f$ (eV)", fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) return plt
Plot the carrier concentration in function of Fermi level Args: temp: the temperature Returns: a matplotlib object
juraj-google-style
def _getlatest_ami_id(context): try: response = context.aws_client("ec2").describe_images( Filters=[ {"Name": "is-public", "Values": ["false"]}, {"Name": "name", "Values": [context.service_name + EFConfig.AMI_SUFFIX + "*"]} ]) except: return None if len(response["Images"]) > 0: return sorted(response["Images"], key=itemgetter('CreationDate'), reverse=True)[0]["ImageId"] else: return None
Get the most recent AMI ID for a service Args: context: a populated EFVersionContext object Returns: ImageId or None if no images exist or on error
juraj-google-style
def read_avro(file_path_or_buffer, schema=None, **kwargs): if isinstance(file_path_or_buffer, six.string_types): with open(file_path_or_buffer, 'rb') as f: return __file_to_dataframe(f, schema, **kwargs) else: return __file_to_dataframe(file_path_or_buffer, schema, **kwargs)
Avro file reader. Args: file_path_or_buffer: Input file path or file-like object. schema: Avro schema. **kwargs: Keyword argument to pandas.DataFrame.from_records. Returns: Class of pd.DataFrame.
juraj-google-style
def MultiOpenOrdered(self, urns, **kwargs): precondition.AssertIterableType(urns, rdfvalue.RDFURN) urn_filedescs = {} for filedesc in self.MultiOpen(urns, **kwargs): urn_filedescs[filedesc.urn] = filedesc filedescs = [] for urn in urns: try: filedescs.append(urn_filedescs[urn]) except KeyError: raise IOError(('No associated AFF4 object for `%s`' % urn)) return filedescs
Opens many URNs and returns handles in the same order. `MultiOpen` can return file handles in arbitrary order. This makes it more efficient and in most cases the order does not matter. However, there are cases where order is important and this function should be used instead. Args: urns: A list of URNs to open. **kwargs: Same keyword arguments as in `MultiOpen`. Returns: A list of file-like objects corresponding to the specified URNs. Raises: IOError: If one of the specified URNs does not correspond to the AFF4 object.
codesearchnet
def __init__(self, name, aliases=None, description=None, urls=None): super(DataTypeDefinition, self).__init__() self.aliases = aliases or [] self.description = description self.name = name self.urls = urls
Initializes a data type definition. Args: name (str): name. aliases (Optional[list[str]]): aliases. description (Optional[str]): description. urls (Optional[list[str]]): URLs.
juraj-google-style
def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): mkdir_or_exist(frame_dir) if max_num == 0: task_num = self.frame_cnt - start else: task_num = min(self.frame_cnt - start, max_num) if task_num <= 0: raise ValueError('start must be less than total frame number') if start > 0: self._set_real_position(start) def write_frame(file_idx): img = self.read() filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) cv2.imwrite(filename, img) if show_progress: track_progress(write_frame, range(file_start, file_start + task_num)) else: for i in range(task_num): img = self.read() if img is None: break filename = osp.join(frame_dir, filename_tmpl.format(i + file_start)) cv2.imwrite(filename, img)
Convert a video to frame images Args: frame_dir (str): Output directory to store all the frame images. file_start (int): Filenames will start from the specified number. filename_tmpl (str): Filename template with the index as the placeholder. start (int): The starting frame index. max_num (int): Maximum number of frames to be written. show_progress (bool): Whether to show a progress bar.
juraj-google-style
def __init__(self, x=0, y=0, w=0, h=0): self._ptr = ffi.new('SDL_Rect *', [x, y, w, h])
Construct a new Rect with the given position and size. Args: x (int): The x position of the upper left corner of the rectangle. y (int): The y position of the upper left corner of the rectangle. w (int): The width of the rectangle. h (int): The height of the rectangle.
juraj-google-style
def run(self, source, **kwargs): kwargs['output'] = self.__graph__() if isinstance(source, str): import json source = json.loads(source) self.source = source super(JSONProcessor, self).run(**kwargs) self.output = kwargs['output'] return output
Method takes a JSON source and any keywords and transforms from JSON to Lean BIBFRAME 2.0 triples Args: ---- source: str, dict
codesearchnet
def process_and_frame(self, doc: Document): nested_docs = self.process_ems(doc) parent_kg = doc.cdr_document.get('knowledge_graph', None) if parent_kg: if nested_docs and len(nested_docs) > 0: for nested_doc in nested_docs: json_doc = nested_doc.cdr_document doc_id = json_doc['doc_id'] if doc_id != doc.doc_id: for field_name in list(parent_kg): field_extractions = parent_kg[field_name] if not isinstance(field_extractions, list): field_extractions = [field_extractions] for i in range(0, len(field_extractions)): field_extraction = field_extractions[i] if 'value' in field_extraction and field_extraction['value'] == doc_id: del field_extractions[i] field_extractions.append( {'value': json_doc, 'key': field_extraction['key'], 'is_nested': True})
Processes a document and if it has child docs, embeds them in the parent document. Only works for 1 level of nesting. Kind of hack, will implement properly later Args: doc: input document to be run etk modules on Returns:
juraj-google-style
def deployment_groups(self): if (not self.__deployment_groups): self.__deployment_groups = DeploymentGroups(self.__connection) return self.__deployment_groups
Gets the Deployment Groups API client. Returns: DeploymentGroups:
codesearchnet
def reduce(x, op='sum'): import warnings warnings.warn( "Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning) from .function_bases import reduce_sum, reduce_mean if op == 'sum': return reduce_sum(x) elif op == 'mean': return reduce_mean(x) raise ValueError()
Reduction function with given operation. Args: x (Variable): An input. op (str): 'sum' or 'mean'. Note: This is deprecated. Use ``mean`` or ``sum`` instead.
juraj-google-style
def filter_with_legacy_function(self, predicate) -> 'DatasetV2': from tensorflow.python.data.ops import filter_op return filter_op._FilterDataset(self, predicate, use_legacy_function=True)
Filters this dataset according to `predicate`. Note: This is an escape hatch for existing uses of `filter` that do not work with V2 functions. New uses are strongly discouraged and existing uses should migrate to `filter` as this method will be removed in V2. Args: predicate: A function mapping a (nested) structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a scalar `tf.bool` tensor. Returns: Dataset: The `Dataset` containing the elements of this dataset for which `predicate` is `True`.
github-repos
def skip(self, delta): def update_fn(v): return self._skip_single_var(v, delta) if values_util.is_saving_non_distributed(): return update_fn(self.state) if self._distribution_strategy is not None: with distribute_lib.enter_or_assert_strategy(self._distribution_strategy): if distribute_lib.in_cross_replica_context(): values_util.mark_as_unsaveable() if distribute_lib.in_cross_replica_context() or 'CentralStorage' in type(self._distribution_strategy).__name__: return distribute_lib.get_strategy().extended.update(self.state, update_fn) return update_fn(self.state)
Advance the counter of a counter-based RNG. Args: delta: the amount of advancement. The state of the RNG after `skip(n)` will be the same as that after `normal([n])` (or any other distribution). The actual increment added to the counter is an unspecified implementation detail. Returns: A `Tensor` of type `int64`.
github-repos
def transpose(self, name=None, activate_final=None): if (name is None): name = (self.module_name + '_transpose') if (activate_final is None): activate_final = self.activate_final output_sizes = [(lambda l=layer: l.input_shape[1]) for layer in self._layers] output_sizes.reverse() return MLP(name=name, output_sizes=output_sizes, activation=self.activation, activate_final=activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)
Returns transposed `MLP`. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. Returns: Matching transposed `MLP` module.
codesearchnet
def clean_df(df, fill_nan=True, drop_empty_columns=True): if fill_nan: df = df.fillna(value=np.nan) if drop_empty_columns: df = df.dropna(axis=1, how='all') return df.sort_index()
Clean a pandas dataframe by: 1. Filling empty values with Nan 2. Dropping columns with all empty values Args: df: Pandas DataFrame fill_nan (bool): If any empty values (strings, None, etc) should be replaced with NaN drop_empty_columns (bool): If columns whose values are all empty should be dropped Returns: DataFrame: cleaned DataFrame
juraj-google-style
def _arguments(code, module): arg_parser = CommandParser.create('') try: builtins = {'source': _table, 'datestring': _datestring} env = {} env.update(builtins) exec(code, env) for key in env: if ((key in builtins) or (key[0] == '_')): continue val = env[key] key = ('--%s' % key) if isinstance(val, bool): if val: arg_parser.add_argument(key, default=val, action='store_true') else: arg_parser.add_argument(key, default=val, action='store_false') elif (isinstance(val, basestring) or isinstance(val, int) or isinstance(val, float) or isinstance(val, int)): arg_parser.add_argument(key, default=val) elif isinstance(val, list): arg_parser.add_argument(key, default=val, nargs='+') elif isinstance(val, tuple): arg_parser.add_argument(key, default=list(val), nargs='+') elif (isinstance(val, dict) and ('type' in val)): if (val['type'] == 'datestring'): arg_parser.add_argument(key, default='', type=_make_string_formatter(val['format'], offset=val['offset'])) elif (val['type'] == 'table'): if (val['format'] is not None): arg_parser.add_argument(key, default='', type=_make_table_formatter(val['format'], offset=val['offset'])) else: arg_parser.add_argument(key, default=val['name'], type=_make_table) else: raise Exception(('Cannot generate argument for %s of type %s' % (key, type(val)))) else: raise Exception(('Cannot generate argument for %s of type %s' % (key, type(val)))) except Exception as e: print(("%%sql arguments: %s from code '%s'" % (str(e), str(code)))) return arg_parser
Define pipeline arguments. Args: code: the Python code to execute that defines the arguments.
codesearchnet
def cost_matrix(self, set_a, set_b, time_a, time_b): costs = np.zeros((len(set_a), len(set_b))) for a, item_a in enumerate(set_a): for b, item_b in enumerate(set_b): costs[a, b] = self.total_cost_function(item_a, item_b, time_a, time_b) return costs
Calculates the costs (distances) between the items in set a and set b at the specified times. Args: set_a: List of STObjects set_b: List of STObjects time_a: time at which objects in set_a are evaluated time_b: time at whcih object in set_b are evaluated Returns: A numpy array with shape [len(set_a), len(set_b)] containing the cost matrix between the items in set a and the items in set b.
juraj-google-style
def rebalance(self, weight, child, base=np.nan, update=True): if (weight == 0): if (child in self.children): return self.close(child) else: return if np.isnan(base): base = self.value if (child not in self.children): c = SecurityBase(child) c.setup(self._universe) c.update(self.now) self._add_child(c) c = self.children[child] delta = (weight - c.weight) c.allocate((delta * base))
Rebalance a child to a given weight. This is a helper method to simplify code logic. This method is used when we want to se the weight of a particular child to a set amount. It is similar to allocate, but it calculates the appropriate allocation based on the current weight. Args: * weight (float): The target weight. Usually between -1.0 and 1.0. * child (str): child to allocate to - specified by name. * base (float): If specified, this is the base amount all weight delta calculations will be based off of. This is useful when we determine a set of weights and want to rebalance each child given these new weights. However, as we iterate through each child and call this method, the base (which is by default the current value) will change. Therefore, we can set this base to the original value before the iteration to ensure the proper allocations are made. * update (bool): Force update?
codesearchnet
def DeleteGRRTempFile(path): precondition.AssertType(path, Text) if not os.path.isabs(path): raise ErrorBadPath("Path must be absolute") prefix = config.CONFIG["Client.tempfile_prefix"] directories = [ GetTempDirForRoot(root) for root in config.CONFIG["Client.tempdir_roots"] ] if not _CheckIfPathIsValidForDeletion( path, prefix=prefix, directories=directories): msg = ("Can't delete temp file %s. Filename must start with %s " "or lie within any of %s.") raise ErrorNotTempFile(msg % (path, prefix, ";".join(directories))) if os.path.exists(path): files.FILE_HANDLE_CACHE.Flush() os.remove(path) else: raise ErrorNotAFile("%s does not exist." % path)
Delete a GRR temp file. To limit possible damage the path must be absolute and either the file must be within any of the Client.tempdir_roots or the file name must begin with Client.tempfile_prefix. Args: path: path string to file to be deleted. Raises: OSError: Permission denied, or file not found. ErrorBadPath: Path must be absolute. ErrorNotTempFile: Filename must start with Client.tempfile_prefix. ErrorNotAFile: File to delete does not exist.
juraj-google-style
def find_usbserial(vendor, product): if platform.system() == 'Linux': vendor, product = [('%04x' % (x)).strip() for x in (vendor, product)] return linux_find_usbserial(vendor, product) elif platform.system() == 'Darwin': return osx_find_usbserial(vendor, product) else: raise NotImplementedError('Cannot find serial ports on %s' % platform.system())
Find the tty device for a given usbserial devices identifiers. Args: vendor: (int) something like 0x0000 product: (int) something like 0x0000 Returns: String, like /dev/ttyACM0 or /dev/tty.usb...
juraj-google-style
def CreateStorageWriter(cls, storage_format, session, path): if storage_format == definitions.STORAGE_FORMAT_SQLITE: return sqlite_writer.SQLiteStorageFileWriter(session, path) return None
Creates a storage writer. Args: session (Session): session the storage changes are part of. path (str): path to the storage file. storage_format (str): storage format. Returns: StorageWriter: a storage writer or None if the storage file cannot be opened or the storage format is not supported.
juraj-google-style
def convex_hull_collide(nodes1, nodes2): polygon1 = _helpers.simple_convex_hull(nodes1) _, polygon_size1 = polygon1.shape polygon2 = _helpers.simple_convex_hull(nodes2) _, polygon_size2 = polygon2.shape if polygon_size1 == 2 and polygon_size2 == 2: return line_line_collide(polygon1, polygon2) else: return _helpers.polygon_collide(polygon1, polygon2)
Determine if the convex hulls of two curves collide. .. note:: This is a helper for :func:`from_linearized`. Args: nodes1 (numpy.ndarray): Control points of a first curve. nodes2 (numpy.ndarray): Control points of a second curve. Returns: bool: Indicating if the convex hulls collide.
juraj-google-style
def _AvgPoolAlongRows(self, input_matrix, row_seq, overlapping): output_image = np.zeros(input_matrix.shape[1]) row_max = row_seq[-1] for i in range(row_seq.shape[0] - 1): row_start = row_seq[i] row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1] row_end = min(row_end, row_max) output_image = np.vstack((output_image, np.mean(input_matrix[row_start:row_end, :], axis=0))) return output_image[1:, :]
Perform average pool along row of a 2-D matrix based on row_seq. Args: input_matrix: A 2-D matrix. row_seq: Cumulative pooling sequence along row. overlapping: Whether or not use overlapping when pooling. Returns: A 2-D matrix, with * num_rows = len(row_seq)-1 * num_cols = input_matrix.num_cols.
github-repos
def _format_time(self, time_per_unit, unit_name): formatted = '' if time_per_unit >= 1 or time_per_unit == 0: formatted += f' {time_per_unit:.0f}s/{unit_name}' elif time_per_unit >= 0.001: formatted += f' {time_per_unit * 1000.0:.0f}ms/{unit_name}' else: formatted += f' {time_per_unit * 1000000.0:.0f}us/{unit_name}' return formatted
format a given duration to display to the user. Given the duration, this function formats it in either milliseconds or seconds and displays the unit (i.e. ms/step or s/epoch). Args: time_per_unit: the duration to display unit_name: the name of the unit to display Returns: A string with the correctly formatted duration and units
github-repos
def __init__(self, hash=None, height=None, items=None): self.TransactionHash = hash self.TransactionHeight = height if items is None: self.Items = [] else: self.Items = items
Create an instance. Args: hash (UInt256): height (int): items (list):
juraj-google-style
def has_current_path(self, path, **kwargs): try: return self.assert_current_path(path, **kwargs) except ExpectationNotMet: return False
Checks if the page has the given path. Args: path (str | RegexObject): The string or regex that the current "path" should match. **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`. Returns: bool: Whether it matches.
juraj-google-style
def list_datasets(self): def _row_gen(attributes): for attr in attributes.values(): (yield (attr.name, attr.display_name)) return pd.DataFrame.from_records(_row_gen(self.datasets), columns=['name', 'display_name'])
Lists available datasets in a readable DataFrame format. Returns: pd.DataFrame: Frame listing available datasets.
codesearchnet
def byte_swap_string_content(buffer, from_endiness, to_endiness): num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness) string_content = bytearray(buffer.data[4 * (num_of_strings + 2):]) prefix_data = b''.join([int.from_bytes(buffer.data[i:i + 4], from_endiness).to_bytes(4, to_endiness) for i in range(0, (num_of_strings + 1) * 4 + 1, 4)]) buffer.data = prefix_data + string_content
Helper function for byte-swapping the string buffer. Args: buffer: TFLite string buffer of from_endiness format. from_endiness: The original endianness format of the string buffer. to_endiness: The destined endianness format of the string buffer.
github-repos
def get_transcript_ids_for_ensembl_gene_ids(self, gene_ids, hgnc_symbols): chroms = {"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", \ "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", \ "X", "Y"} headers = {"content-type": "application/json"} transcript_ids = [] for gene_id in gene_ids: self.attempt = 0 ext = "/overlap/id/{}?feature=transcript".format(gene_id) r = self.ensembl_request(ext, headers) for item in json.loads(r): if item["biotype"] not in ["protein_coding", "polymorphic_pseudogene"]: continue if item["Parent"] != gene_id or item["seq_region_name"] not in \ chroms or \ all([symbol not in item["external_name"] for symbol in hgnc_symbols]): continue transcript_ids.append(item["id"]) return transcript_ids
fetch the ensembl transcript IDs for a given ensembl gene ID. Args: gene_ids: list of Ensembl gene IDs for the gene hgnc_symbols: list of possible HGNC symbols for gene
juraj-google-style
def random_inputs(num_devices, input_shape=gin.REQUIRED, input_dtype=np.int32, input_range=(0, 255), output_shape=gin.REQUIRED, output_dtype=np.int32, output_range=(0, 9)): if ((input_shape[0] % num_devices) != 0): tf.logging.fatal('num_devices[%d] should divide the first dimension of input_shape[%s]', num_devices, input_shape) if ((output_shape[0] % num_devices) != 0): tf.logging.fatal('num_devices[%d] should divide the first dimension of output_shape[%s]', num_devices, output_shape) def random_minibatches(): 'Generate a stream of random mini-batches.' if (input_dtype in [np.float16, np.float32, np.float64]): rand = np.random.uniform else: rand = np.random.random_integers while True: inp = rand(input_range[0], input_range[1], input_shape) inp = inp.astype(input_dtype) out = rand(output_range[0], output_range[1], output_shape) out = out.astype(output_dtype) (yield (inp, out)) input_shape_without_batch = list(input_shape)[1:] return Inputs(train_stream=random_minibatches, train_eval_stream=random_minibatches, eval_stream=random_minibatches, input_shape=input_shape_without_batch)
Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs
codesearchnet
def occurrence(self, file_name=None, path=None, date=None): if self._indicator_data.get('type') != 'File': return None occurrence_obj = FileOccurrence(file_name, path, date) self._occurrences.append(occurrence_obj) return occurrence_obj
Add a file Occurrence. Args: file_name (str, optional): The file name for this occurrence. path (str, optional): The file path for this occurrence. date (str, optional): The datetime expression for this occurrence. Returns: obj: An instance of Occurrence.
juraj-google-style
def _ReadUUIDDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False): return self._ReadFixedSizeDataTypeDefinition(definitions_registry, definition_values, data_types.UUIDDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, default_size=16, is_member=is_member, supported_size_values=(16,))
Reads an UUID data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: UUIDDataTypeDefinition: UUID data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
codesearchnet
def get_recipe(self, recipe_name): if recipe_name.endswith('.yaml'): recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name) else: recipe = self._recipes.get(recipe_name) if (recipe is None): raise RecipeNotFoundError('Could not find recipe', recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()]) return recipe
Get a recipe by name. Args: recipe_name (str): The name of the recipe to fetch. Can be either the yaml file name or the name of the recipe.
codesearchnet
def connection_required(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): if not self.target_connected(): raise errors.JLinkException('Target is not connected.') return func(self, *args, **kwargs) return wrapper
Decorator to specify that a target connection is required in order for the given method to be used. Args: func (function): function being decorated Returns: The wrapper function.
juraj-google-style
def unsplat(f: Callable[[Iterable], A]) -> Callable[..., A]: def unsplatted(*args): return f(args) return unsplatted
Convert a function taking a single iterable argument into a function taking multiple arguments. Args: f: Any function taking a single iterable argument Returns: A function that accepts multiple arguments. Each argument of this function is passed as an element of an iterable to ``f``. Example: $ def f(a): $ return a[0] + a[1] + a[2] $ $ f([1, 2, 3]) # 6 $ g = unsplat(f) $ g(1, 2, 3) # 6
juraj-google-style
def assert_not_visible(self, selector, testid=None, **kwargs): self.info_log( "Assert not visible selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_failure'] ) self.debug_log("effective highlight: %s" % highlight) wait_until_not_visible = kwargs.get( 'wait_until_not_visible', BROME_CONFIG['proxy_driver']['wait_until_not_visible_before_assert_not_visible'] ) self.debug_log( "effective wait_until_not_visible: %s" % wait_until_not_visible ) if wait_until_not_visible: self.wait_until_not_visible(selector, raise_exception=False) element = self.find( selector, raise_exception=False, wait_until_visible=False, wait_until_present=False ) if element and element.is_displayed(raise_exception=False): data = self.execute_script( "return arguments[0].getBoundingClientRect();", element._element ) if highlight: element.highlight( style=BROME_CONFIG['highlight']['style_on_assertion_failure'] ) if testid is not None: self.create_test_result(testid, False, extra_data={ 'bounding_client_rect': data, 'video_x_offset': self.browser_config.get('video_x_offset', 0), 'video_y_offset': self.browser_config.get('video_y_offset', 0) }) return False else: if testid is not None: self.create_test_result(testid, True) return True
Assert that the element is not visible in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_not_visible (bool) highlight (bool) Returns: bool: True is the assertion succeed; False otherwise.
juraj-google-style
def to_step_result(func): @ft.wraps(func) def wrapper(*args, **kwargs): res = func(*args, **kwargs) if not res: res = [StepResult.OK] if not hasattr(res, "__iter__"): res = [res] return res return wrapper
Convert a function return to a list of StepResults. All Step subclasses automatically wrap the result of their __call__ method's result with this wrapper. If the result is not a list of StepResult values, one will be generated. result of `[StepResult.OK]`, or convert the given result into a list. Args: func: The function to wrap.
juraj-google-style
def greater(x, y): return math_ops.greater(x, y)
Element-wise truth value of (x > y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.
github-repos
def register_file_reader(*args): def do_registration(file_reader_fn, is_readable_fn): if (file_reader_fn not in list(zip(*_FILE_READERS))[0]): _FILE_READERS.append((file_reader_fn, is_readable_fn)) if (len(args) == 1): return functools.partial(do_registration, is_readable_fn=args[0]) elif (len(args) == 2): do_registration(*args) else: err_str = 'register_file_reader() takes 1 or 2 arguments ({} given)' raise TypeError(err_str.format(len(args)))
Register a file reader for use in parse_config_file. Registered file readers will be used to try reading files passed to `parse_config_file`. All file readers (beginning with the default `open`) will be tried until one of them succeeds at opening the file. This function may also be be used used as a decorator. For example: @register_file_reader(IOError) def exotic_data_source(filename): ... Args: *args: (When used as a decorator, only the existence check is supplied.) - file_reader_fn: The file reader function to register. This should be a function that can be used as a context manager to open a file and provide a file-like object, similar to Python's built-in `open`. - is_readable_fn: A function taking the file path and returning a boolean indicating whether the file can be read by `file_reader_fn`. Returns: `None`, or when used as a decorator, a function that will perform the registration using the supplied readability predicate.
codesearchnet
def _validate_config(config): if not isinstance(config, list): raise TypeError('Config must be a list') for config_dict in config: if not isinstance(config_dict, dict): raise TypeError('Config must be a list of dictionaries') label = config_dict.keys()[0] cfg = config_dict[label] if not isinstance(cfg, dict): raise TypeError('Config structure is broken') if 'host' not in cfg: raise TypeError('Config entries must have a value for host') if not isinstance(cfg['host'], str) and not isinstance(cfg['host'], list): raise TypeError('Host must be a string or a list.') if 'port' not in cfg: raise TypeError('Config entries must have a value for port') if not isinstance(cfg['port'], int): raise TypeError('Port must be an int') if 'dbpath' not in cfg: raise TypeError('Config entries must have a value for dbpath') if not isinstance(cfg['dbpath'], str): if not isinstance(cfg['dbpath'], list): raise TypeError('Dbpath must either a string or a list of ' 'strings') for dbpath in cfg['dbpath']: if not isinstance(dbpath, str): raise TypeError('Dbpath must either a string or a list ' 'of strings') if ('read_preference' in cfg and not isinstance(cfg['read_preference'], str)): raise TypeError('Read_preference must be a string') if ('replicaSet' in cfg and not isinstance(cfg['replicaSet'], str)): raise TypeError('replicaSet must be a string')
Validate that the provided configurtion is valid. Each dictionary in the configuration list must have the following mandatory entries : {label: {host(string), port(int), dbpath(string|list of strings)}} It can also contain 1 optional key: {read_preference(string)} Args: config: the list of configurations provided at instantiation Raises: TypeError: a fault in the configurations is found
juraj-google-style
def push_error_to_driver(worker, error_type, message, driver_id=None): if driver_id is None: driver_id = ray.DriverID.nil() worker.raylet_client.push_error(driver_id, error_type, message, time.time())
Push an error message to the driver to be printed in the background. Args: worker: The worker to use. error_type (str): The type of the error. message (str): The message that will be printed in the background on the driver. driver_id: The ID of the driver to push the error message to. If this is None, then the message will be pushed to all drivers.
juraj-google-style
def apply_grad_cartesian_tensor(grad_X, zmat_dist): columns = ['bond', 'angle', 'dihedral'] C_dist = zmat_dist.loc[:, columns].values.T try: C_dist = C_dist.astype('f8') C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :]) except (TypeError, AttributeError): C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :]) cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian return Cartesian(atoms=zmat_dist['atom'], coords=cart_dist, index=zmat_dist.index)
Apply the gradient for transformation to cartesian space onto zmat_dist. Args: grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array. The mathematical details of the index layout is explained in :meth:`~chemcoord.Cartesian.get_grad_zmat()`. zmat_dist (:class:`~chemcoord.Zmat`): Distortions in Zmatrix space. Returns: :class:`~chemcoord.Cartesian`: Distortions in cartesian space.
juraj-google-style
def has_no_narrow_start(neuron, frac=0.9): bad_ids = [(neurite.root_node.id, [neurite.root_node.points[1]]) for neurite in neuron.neurites if neurite.root_node.points[1][COLS.R] < frac * neurite.root_node.points[2][COLS.R]] return CheckResult(len(bad_ids) == 0, bad_ids)
Check if neurites have a narrow start Arguments: neuron(Neuron): The neuron object to test frac(float): Ratio that the second point must be smaller than the first Returns: CheckResult with a list of all first segments of neurites with a narrow start
juraj-google-style
def _get_args(cls, args): if not isinstance(args, tuple) or not len(args) == 2: raise TypeError( "{}[...] takes exactly two arguments.".format(cls.__name__) ) return super(_LengthBoundedMeta, cls)._get_args(args + (len,))
Return the parameters necessary to check type boundaries. Args: args: A tuple with two parameters: a type, and a slice representing the minimum and maximum lengths allowed for values of that type. Returns: A tuple with three parameters: a type, a slice, and the len function.
juraj-google-style
def write_content(self, content, destination): directory = os.path.dirname(destination) if (directory and (not os.path.exists(directory))): os.makedirs(directory) with io.open(destination, 'w', encoding='utf-8') as f: f.write(content) return destination
Write given content to destination path. It will create needed directory structure first if it contain some directories that does not allready exists. Args: content (str): Content to write to target file. destination (str): Destination path for target file. Returns: str: Path where target file has been written.
codesearchnet
def find_from(path): realpath = os.path.realpath(path) config_path = os.path.join(realpath, '.ensime') if os.path.isfile(config_path): return config_path elif (realpath == os.path.abspath('/')): return None else: dirname = os.path.dirname(realpath) return ProjectConfig.find_from(dirname)
Find path of an .ensime config, searching recursively upward from path. Args: path (str): Path of a file or directory from where to start searching. Returns: str: Canonical path of nearest ``.ensime``, or ``None`` if not found.
codesearchnet
def enable_argscope_for_module(module, log_shape=True): if is_tfv2() and module == tf.layers: module = tf.compat.v1.layers for name, obj in getmembers(module): if isfunction(obj): setattr(module, name, enable_argscope_for_function(obj, log_shape=log_shape))
Overwrite all functions of a given module to support argscope. Note that this function monkey-patches the module and therefore could have unexpected consequences. It has been only tested to work well with ``tf.layers`` module. Example: .. code-block:: python import tensorflow as tf enable_argscope_for_module(tf.layers) Args: log_shape (bool): print input/output shapes of each function.
juraj-google-style
def now_playing(self, **kwargs): path = self._get_path('now_playing') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of movies playing in theatres. This list refreshes every day. The maximum number of items this list will include is 100. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def _from_any_pb(pb_type, any_pb): msg = pb_type() if not any_pb.Unpack(msg): raise TypeError( "Could not convert {} to {}".format( any_pb.__class__.__name__, pb_type.__name__ ) ) return msg
Converts an Any protobuf to the specified message type Args: pb_type (type): the type of the message that any_pb stores an instance of. any_pb (google.protobuf.any_pb2.Any): the object to be converted. Returns: pb_type: An instance of the pb_type message. Raises: TypeError: if the message could not be converted.
juraj-google-style
def _convert_service_account_credentials(credentials): info = credentials.serialization_data.copy() info['token_uri'] = credentials.token_uri return google.oauth2.service_account.Credentials.from_service_account_info( info)
Converts to :class:`google.oauth2.service_account.Credentials`. Args: credentials (Union[ oauth2client.service_account.ServiceAccountCredentials, oauth2client.service_account._JWTAccessCredentials]): The credentials to convert. Returns: google.oauth2.service_account.Credentials: The converted credentials.
juraj-google-style
class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder): def __init__(self, config: PerceiverConfig, output_shape: List[int], position_encoding_type: str, **decoder_kwargs) -> None: super().__init__() if len(output_shape) != 4: raise ValueError(f'Expected rank 4 output_shape, got {output_shape}.') self.output_shape = output_shape self.output_num_channels = decoder_kwargs['output_num_channels'] self.decoder = PerceiverBasicDecoder(config, output_index_dims=self.output_shape[1:4], position_encoding_type=position_encoding_type, **decoder_kwargs) @property def num_query_channels(self) -> int: return self.decoder.num_query_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): return self.decoder.decoder_query(inputs, modality_sizes=modality_sizes, inputs_without_pos=inputs_without_pos, subsampled_points=subsampled_points) def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None) -> PerceiverDecoderOutput: decoder_outputs = self.decoder(query, z) logits = decoder_outputs.logits logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]]) return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video reshaping logic. Args: config ([*PerceiverConfig*]): Model configuration. output_shape (`List[int]`): Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension. position_encoding_type (`str`): The type of position encoding to use. Can be either "trainable", "fourier", or "none".
github-repos
def get_hash_of_dirs(directory): import hashlib sha = hashlib.sha512() if (not os.path.exists(directory)): return (- 1) for (root, _, files) in os.walk(directory): for name in files: filepath = (local.path(root) / name) if filepath.exists(): with open(filepath, 'rb') as next_file: for line in next_file: sha.update(line) return sha.hexdigest()
Recursively hash the contents of the given directory. Args: directory (str): The root directory we want to hash. Returns: A hash of all the contents in the directory.
codesearchnet
def add_device(self, device, container): if self.findtext("is_smart") == "false": self.add_object_to_path(device, container) else: raise ValueError("Devices may not be added to smart groups.")
Add a device to a group. Wraps JSSObject.add_object_to_path. Args: device: A JSSObject to add (as list data), to this object. location: Element or a string path argument to find()
juraj-google-style
def get_object(self, object_ids): for object_id in object_ids: if not isinstance(object_id, ObjectID): raise TypeError( "Attempting to call `get` on the value {}, " "which is not an ray.ObjectID.".format(object_id)) plain_object_ids = [ plasma.ObjectID(object_id.binary()) for object_id in object_ids ] for i in range(0, len(object_ids), ray._config.worker_fetch_request_size()): self.raylet_client.fetch_or_reconstruct( object_ids[i:(i + ray._config.worker_fetch_request_size())], True) final_results = self.retrieve_and_deserialize(plain_object_ids, 0) unready_ids = { plain_object_ids[i].binary(): i for (i, val) in enumerate(final_results) if val is plasma.ObjectNotAvailable } if len(unready_ids) > 0: while len(unready_ids) > 0: object_ids_to_fetch = [ plasma.ObjectID(unready_id) for unready_id in unready_ids.keys() ] ray_object_ids_to_fetch = [ ObjectID(unready_id) for unready_id in unready_ids.keys() ] fetch_request_size = ray._config.worker_fetch_request_size() for i in range(0, len(object_ids_to_fetch), fetch_request_size): self.raylet_client.fetch_or_reconstruct( ray_object_ids_to_fetch[i:(i + fetch_request_size)], False, self.current_task_id, ) results = self.retrieve_and_deserialize( object_ids_to_fetch, max([ ray._config.get_timeout_milliseconds(), int(0.01 * len(unready_ids)), ]), ) for i, val in enumerate(results): if val is not plasma.ObjectNotAvailable: object_id = object_ids_to_fetch[i].binary() index = unready_ids[object_id] final_results[index] = val unready_ids.pop(object_id) self.raylet_client.notify_unblocked(self.current_task_id) assert len(final_results) == len(object_ids) return final_results
Get the value or values in the object store associated with the IDs. Return the values from the local object store for object_ids. This will block until all the values for object_ids have been written to the local object store. Args: object_ids (List[object_id.ObjectID]): A list of the object IDs whose values should be retrieved.
juraj-google-style
def v4_int_to_packed(address): if (address > _BaseV4._ALL_ONES): raise ValueError('Address too large for IPv4') return Bytes(struct.pack('!I', address))
The binary representation of this address. Args: address: An integer representation of an IPv4 IP address. Returns: The binary representation of this address. Raises: ValueError: If the integer is too large to be an IPv4 IP address.
codesearchnet
def get_embedded_tweet(tweet): if tweet.retweeted_tweet is not None: return tweet.retweeted_tweet elif tweet.quoted_tweet is not None: return tweet.quoted_tweet else: return None
Get the retweeted Tweet OR the quoted Tweet and return it as a dictionary Args: tweet (Tweet): A Tweet object (not simply a dict) Returns: dict (or None, if the Tweet is neither a quote tweet or a Retweet): a dictionary representing the quote Tweet or the Retweet
juraj-google-style
def Serialize(self, writer): writer.WriteByte(self.Usage) if isinstance(self.Data, UIntBase): self.Data = self.Data.Data length = len(self.Data) if length > self.MAX_ATTR_DATA_SIZE: raise Exception("Invalid transaction attribute") if self.Usage == TransactionAttributeUsage.ContractHash or self.Usage == TransactionAttributeUsage.Vote or \ (self.Usage >= TransactionAttributeUsage.Hash1 and self.Usage <= TransactionAttributeUsage.Hash15): writer.WriteBytes(self.Data) elif self.Usage == TransactionAttributeUsage.ECDH02 or self.Usage == TransactionAttributeUsage.ECDH03: writer.WriteBytes(self.Data[1:33]) elif self.Usage == TransactionAttributeUsage.Script: writer.WriteBytes(self.Data) elif self.Usage == TransactionAttributeUsage.DescriptionUrl: writer.WriteVarString(self.Data) elif self.Usage == TransactionAttributeUsage.Description or self.Usage >= TransactionAttributeUsage.Remark: writer.WriteVarString(self.Data) else: logger.error("format error!!!")
Serialize object. Args: writer (neo.IO.BinaryWriter): Raises: Exception: if the length exceeds the maximum allowed number of attributes in a transaction.
juraj-google-style
def shutdown_tpu_system(cluster_resolver=None): tpu_strategy_util.shutdown_tpu_system_impl(cluster_resolver, TPUClusterResolver)
Shuts down the TPU devices. This will clear all caches, even those that are maintained through sequential calls to tf.tpu.experimental.initialize_tpu_system, such as the compilation cache. Args: cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. Raises: RuntimeError: If no TPU devices found for eager execution or if run in a tf.function.
github-repos
def start(self, device): super(NativeBLEVirtualInterface, self).start(device) self.set_advertising(True)
Start serving access to this VirtualIOTileDevice Args: device (VirtualIOTileDevice): The device we will be providing access to
juraj-google-style
def register_subclass(cls, typeid): def decorator(subclass): cls._subcls_lookup[typeid] = subclass subclass.typeid = typeid return subclass return decorator
Register a subclass so from_dict() works Args: typeid (str): Type identifier for subclass
juraj-google-style
def _expand_place_ids(self, terms): place_vids = [] first_type = None for result in self.backend.identifier_index.search(terms): if not first_type: first_type = result.type if result.type != first_type: continue place_vids.append(result.vid) if place_vids: all_set = set(itertools.chain.from_iterable(iallval(GVid.parse(x)) for x in place_vids)) place_vids += list(str(x) for x in all_set) return place_vids else: return terms
Lookups all of the place identifiers to get gvids Args: terms (str or unicode): terms to lookup Returns: str or list: given terms if no identifiers found, otherwise list of identifiers.
juraj-google-style
def _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes): nmr_params = parameters.shape[1] initial_step = np.zeros_like(parameters) if (max_step_sizes is None): max_step_sizes = 0.1 if isinstance(max_step_sizes, Number): max_step_sizes = ([max_step_sizes] * nmr_params) max_step_sizes = np.array(max_step_sizes) for ind in range(parameters.shape[1]): minimum_allowed_step = np.minimum(np.abs((parameters[(:, ind)] - lower_bounds[ind])), np.abs((upper_bounds[ind] - parameters[(:, ind)]))) initial_step[(:, ind)] = np.minimum(minimum_allowed_step, max_step_sizes[ind]) return (initial_step / 2.0)
Get an initial step size to use for every parameter. This chooses the step sizes based on the maximum step size and the lower and upper bounds. Args: parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, p parameters and n samples. lower_bounds (list): lower bounds upper_bounds (list): upper bounds max_step_sizes (list or None): the maximum step size, or the maximum step size per parameter. Defaults to 0.1 Returns: ndarray: for every problem instance the vector with the initial step size for each parameter.
codesearchnet
def inverse(self, name=None): if (self._num_coeff != 6): raise tf.errors.UnimplementedError('AffineGridWarper currently supportsinversion only for the 2D case.') def _affine_grid_warper_inverse(inputs): 'Assembles network to compute inverse affine transformation.\n\n Each `inputs` row potentially contains [a, b, tx, c, d, ty]\n corresponding to an affine matrix:\n\n A = [a, b, tx],\n [c, d, ty]\n\n We want to generate a tensor containing the coefficients of the\n corresponding inverse affine transformation in a constraints-aware\n fashion.\n Calling M:\n\n M = [a, b]\n [c, d]\n\n the affine matrix for the inverse transform is:\n\n A_in = [M^(-1), M^-1 * [-tx, -tx]^T]\n\n where\n\n M^(-1) = (ad - bc)^(-1) * [ d, -b]\n [-c, a]\n\n Args:\n inputs: Tensor containing a batch of transformation parameters.\n\n Returns:\n A tensorflow graph performing the inverse affine transformation\n parametrized by the input coefficients.\n ' batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) constant_shape = tf.concat([batch_size, tf.convert_to_tensor((1,))], 0) index = iter(range(6)) def get_variable(constraint): if (constraint is None): i = next(index) return inputs[(:, i:(i + 1))] else: return tf.fill(constant_shape, tf.constant(constraint, dtype=inputs.dtype)) constraints = chain.from_iterable(self.constraints) (a, b, tx, c, d, ty) = (get_variable(constr) for constr in constraints) det = ((a * d) - (b * c)) a_inv = (d / det) b_inv = ((- b) / det) c_inv = ((- c) / det) d_inv = (a / det) m_inv = basic.BatchReshape([2, 2])(tf.concat([a_inv, b_inv, c_inv, d_inv], 1)) txy = tf.expand_dims(tf.concat([tx, ty], 1), 2) txy_inv = basic.BatchFlatten()(tf.matmul(m_inv, txy)) tx_inv = txy_inv[(:, 0:1)] ty_inv = txy_inv[(:, 1:2)] inverse_gw_inputs = tf.concat([a_inv, b_inv, (- tx_inv), c_inv, d_inv, (- ty_inv)], 1) agw = AffineGridWarper(self.output_shape, self.source_shape) return agw(inverse_gw_inputs) if (name is None): name = (self.module_name + '_inverse') return base.Module(_affine_grid_warper_inverse, name=name)
Returns a `sonnet` module to compute inverse affine transforms. The function first assembles a network that given the constraints of the current AffineGridWarper and a set of input parameters, retrieves the coefficients of the corresponding inverse affine transform, then feeds its output into a new AffineGridWarper setup to correctly warp the `output` space into the `source` space. Args: name: Name of module implementing the inverse grid transformation. Returns: A `sonnet` module performing the inverse affine transform of a reference grid of points via an AffineGridWarper module. Raises: tf.errors.UnimplementedError: If the function is called on a non 2D instance of AffineGridWarper.
codesearchnet
def multi_choice_spec(self) -> Optional['DecisionPoint']: self._ensure_dna_spec() multi_choice_spec = None if self.children: child_spec = self.children[0].spec if child_spec.is_subchoice: multi_choice_spec = child_spec.parent_spec return multi_choice_spec
Returns the multi-choice spec for child DNAs. Returns: If the children of this DNA are decisions of a multi-choice's subchoices, return the multi-choice spec (`pg.geno.Choices`). Otherwise returns None.
github-repos
def live_processes(self): result = [] for (process_type, process_infos) in self.all_processes.items(): for process_info in process_infos: if (process_info.process.poll() is None): result.append((process_type, process_info.process)) return result
Return a list of the live processes. Returns: A list of the live processes.
codesearchnet
def variants(self, case_id, skip=0, count=1000, filters=None): filters = (filters or {}) case_obj = self.case(case_id=case_id) limit = (count + skip) genes = set() if filters.get('gene_ids'): genes = set([gene_id.strip() for gene_id in filters['gene_ids']]) frequency = None if filters.get('frequency'): frequency = float(filters['frequency']) cadd = None if filters.get('cadd'): cadd = float(filters['cadd']) genetic_models = None if filters.get('genetic_models'): genetic_models = set(filters['genetic_models']) sv_len = None if filters.get('sv_len'): sv_len = float(filters['sv_len']) impact_severities = None if filters.get('impact_severities'): impact_severities = set(filters['impact_severities']) vcf_file_path = case_obj.variant_source self.head = get_header(vcf_file_path) self.vep_header = self.head.vep_columns self.snpeff_header = self.head.snpeff_columns variants = self._get_filtered_variants(vcf_file_path, filters) result = [] skip_index = 0 for (index, variant) in enumerate(variants): index += 1 if (skip_index >= skip): variant_obj = self._format_variants(variant=variant, index=index, case_obj=case_obj) if (genes and variant_obj): if (not set(variant_obj['gene_symbols']).intersection(genes)): variant_obj = None if (impact_severities and variant_obj): if (not (variant_obj['impact_severity'] in impact_severities)): variant_obj = None if (frequency and variant_obj): if (variant_obj.max_freq > frequency): variant_obj = None if (cadd and variant_obj): if (variant_obj['cadd_score'] < cadd): variant_obj = None if (genetic_models and variant_obj): models = set(variant_obj.genetic_models) if (not models.intersection(genetic_models)): variant_obj = None if (sv_len and variant_obj): if (variant_obj.sv_len < sv_len): variant_obj = None if variant_obj: skip_index += 1 if (skip_index <= limit): result.append(variant_obj) else: break else: skip_index += 1 return Results(result, len(result))
Return all variants in the VCF. This function will apply the given filter and return the 'count' first variants. If skip the first 'skip' variants will not be regarded. Args: case_id (str): Path to a vcf file (for this adapter) skip (int): Skip first variants count (int): The number of variants to return filters (dict): A dictionary with filters. Currently this will look like: { gene_list: [] (list of hgnc ids), frequency: None (float), cadd: None (float), sv_len: None (float), consequence: [] (list of consequences), is_lof: None (Bool), genetic_models [] (list of genetic models) sv_type: List (list of sv types), } Returns: puzzle.constants.Results : Named tuple with variants and nr_of_variants
codesearchnet
def to_python(self, value: Union[dict, str, None]) -> LocalizedValue: try: deserialized_value = super(LocalizedField, self).to_python(value) except json.JSONDecodeError: deserialized_value = value if not deserialized_value: return self.attr_class() return self.attr_class(deserialized_value)
Turns the specified database value into its Python equivalent. Arguments: value: The value that is stored in the database and needs to be converted to its Python equivalent. Returns: A :see:LocalizedValue instance containing the data extracted from the database.
juraj-google-style
def maybe_get_static_value(x, dtype=None): if x is None: return x try: x_ = tensor_util.constant_value(x) except TypeError: x_ = x if x_ is None or dtype is None: return x_ return np.array(x_, dtype)
Helper which tries to return a static value. Given `x`, extract it's value statically, optionally casting to a specific dtype. If this is not possible, None is returned. Args: x: `Tensor` for which to extract a value statically. dtype: Optional dtype to cast to. Returns: Statically inferred value if possible, otherwise None.
github-repos
def build_metagraph_list(self): ops = [] self.ignore_unknown_dtypes = True for key in sorted(self.meta_params): value = self.convert_data_to_string(self.meta_params[key]) if (len(value) == 0): continue if isinstance(value, str): ops.append(tf.contrib.summary.generic(name=key, tensor=tf.convert_to_tensor(str(value)))) else: ops.append(tf.contrib.summary.generic(name=key, tensor=tf.as_string(tf.convert_to_tensor(value)))) return ops
Convert MetaParams into TF Summary Format and create summary_op. Returns: Merged TF Op for TEXT summary elements, should only be executed once to reduce data duplication.
codesearchnet
def _make_unique_slug(slug: str, language: str, is_unique: Callable[([str], bool)]) -> str: index = 1 unique_slug = slug while (not is_unique(unique_slug, language)): unique_slug = ('%s-%d' % (slug, index)) index += 1 return unique_slug
Guarentees that the specified slug is unique by appending a number until it is unique. Arguments: slug: The slug to make unique. is_unique: Function that can be called to verify whether the generate slug is unique. Returns: A guarenteed unique slug.
codesearchnet
def original(self, index=None): if (index is None): try: return next(self.select(Original, None, False, False)) except StopIteration: raise NoSuchAnnotation else: for e in self.select(Original, None, False, False): return e[index] raise NoSuchAnnotation
Get the old annotation prior to correction. This returns only one annotation if multiple exist, use `index` to select another in the sequence. Returns: an annotation element (:class:`AbstractElement`) Raises: :class:`NoSuchAnnotation`
codesearchnet
def get_psd(self, omega): w = np.asarray(omega) (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients p = get_psd_value( alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, w.flatten(), ) return p.reshape(w.shape)
Compute the PSD of the term for an array of angular frequencies Args: omega (array[...]): An array of frequencies where the PSD should be evaluated. Returns: The value of the PSD for each ``omega``. This will have the same shape as ``omega``.
juraj-google-style
def read_cifar10(filename_queue): class CIFAR10Record(object): pass result = CIFAR10Record() label_bytes = 1 result.height = 32 result.width = 32 result.depth = 3 image_bytes = ((result.height * result.width) * result.depth) record_bytes = (label_bytes + image_bytes) reader = tf.FixedLengthRecordReader(record_bytes=record_bytes) (result.key, value) = reader.read(filename_queue) record_bytes = tf.decode_raw(value, tf.uint8) result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32) depth_major = tf.reshape(tf.strided_slice(record_bytes, [label_bytes], [(label_bytes + image_bytes)]), [result.depth, result.height, result.width]) result.uint8image = tf.transpose(depth_major, [1, 2, 0]) return result
Reads and parses examples from CIFAR10 data files. Recommendation: if you want N-way read parallelism, call this function N times. This will give you N independent Readers reading different files & positions within those files, which will give better mixing of examples. Args: filename_queue: A queue of strings with the filenames to read from. Returns: An object representing a single example, with the following fields: height: number of rows in the result (32) width: number of columns in the result (32) depth: number of color channels in the result (3) key: a scalar string Tensor describing the filename & record number for this example. label: an int32 Tensor with the label in the range 0..9. uint8image: a [height, width, depth] uint8 Tensor with the image data
codesearchnet
def CreateFile(filename): with gcs.open(filename, 'w') as f: f.write('abcde\n') blobstore_filename = '/gs' + filename return blobstore.create_gs_key(blobstore_filename)
Create a GCS file with GCS client lib. Args: filename: GCS filename. Returns: The corresponding string blobkey for this GCS file.
juraj-google-style
def parse_selinux(parts): (owner, group) = parts[:2] selinux = parts[2].split(':') lsel = len(selinux) (path, link) = parse_path(parts[(- 1)]) result = {'owner': owner, 'group': group, 'se_user': selinux[0], 'se_role': (selinux[1] if (lsel > 1) else None), 'se_type': (selinux[2] if (lsel > 2) else None), 'se_mls': (selinux[3] if (lsel > 3) else None), 'name': path} if link: result['link'] = link return result
Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included.
codesearchnet
def poisson_ll(data, means): if sparse.issparse(data): return sparse_poisson_ll(data, means) (genes, cells) = data.shape clusters = means.shape[1] ll = np.zeros((cells, clusters)) for i in range(clusters): means_i = np.tile(means[(:, i)], (cells, 1)) means_i = (means_i.transpose() + eps) ll[(:, i)] = np.sum((xlogy(data, means_i) - means_i), 0) return ll
Calculates the Poisson log-likelihood. Args: data (array): 2d numpy array of genes x cells means (array): 2d numpy array of genes x k Returns: cells x k array of log-likelihood for each cell/cluster pair
codesearchnet
def _MergeField(self, tokenizer, message): message_descriptor = message.DESCRIPTOR if (hasattr(message_descriptor, 'syntax') and message_descriptor.syntax == 'proto3'): self._allow_multiple_scalars = True if tokenizer.TryConsume('['): name = [tokenizer.ConsumeIdentifier()] while tokenizer.TryConsume('.'): name.append(tokenizer.ConsumeIdentifier()) name = '.'.join(name) if not message_descriptor.is_extendable: raise tokenizer.ParseErrorPreviousToken( 'Message type "%s" does not have extensions.' % message_descriptor.full_name) field = message.Extensions._FindExtensionByName(name) if not field: if self.allow_unknown_extension: field = None else: raise tokenizer.ParseErrorPreviousToken( 'Extension "%s" not registered.' % name) elif message_descriptor != field.containing_type: raise tokenizer.ParseErrorPreviousToken( 'Extension "%s" does not extend message type "%s".' % (name, message_descriptor.full_name)) tokenizer.Consume(']') else: name = tokenizer.ConsumeIdentifierOrNumber() if self.allow_field_number and name.isdigit(): number = ParseInteger(name, True, True) field = message_descriptor.fields_by_number.get(number, None) if not field and message_descriptor.is_extendable: field = message.Extensions._FindExtensionByNumber(number) else: field = message_descriptor.fields_by_name.get(name, None) if not field: field = message_descriptor.fields_by_name.get(name.lower(), None) if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP: field = None if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and field.message_type.name != name): field = None if not field: raise tokenizer.ParseErrorPreviousToken( 'Message type "%s" has no field named "%s".' % (message_descriptor.full_name, name)) if field: if not self._allow_multiple_scalars and field.containing_oneof: which_oneof = message.WhichOneof(field.containing_oneof.name) if which_oneof is not None and which_oneof != field.name: raise tokenizer.ParseErrorPreviousToken( 'Field "%s" is specified along with field "%s", another member ' 'of oneof "%s" for message type "%s".' % (field.name, which_oneof, field.containing_oneof.name, message_descriptor.full_name)) if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: tokenizer.TryConsume(':') merger = self._MergeMessageField else: tokenizer.Consume(':') merger = self._MergeScalarField if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and tokenizer.TryConsume('[')): while True: merger(tokenizer, message, field) if tokenizer.TryConsume(']'): break tokenizer.Consume(',') else: merger(tokenizer, message, field) else: assert self.allow_unknown_extension _SkipFieldContents(tokenizer) if not tokenizer.TryConsume(','): tokenizer.TryConsume(';')
Merges a single protocol message field into a message. Args: tokenizer: A tokenizer to parse the field name and values. message: A protocol message to record the data. Raises: ParseError: In case of text parsing problems.
juraj-google-style
def convert_seeded_answers(answers): converted = {} for (index, answer) in enumerate(answers): converted.setdefault(answer['answer'], {}) converted[answer['answer']][('seeded' + str(index))] = answer['rationale'] return converted
Convert seeded answers into the format that can be merged into student answers. Args: answers (list): seeded answers Returns: dict: seeded answers with student answers format: { 0: { 'seeded0': 'rationaleA' } 1: { 'seeded1': 'rationaleB' } }
codesearchnet