code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): local_buffer = utils.BytearrayStream() if self._object_type: self._object_type.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The Create request payload is missing the object type field.') if (kmip_version < enums.KMIPVersion.KMIP_2_0): if self._template_attribute: self._template_attribute.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The Create request payload is missing the template attribute field.') elif self._template_attribute: attributes = objects.convert_template_attribute_to_attributes(self._template_attribute) attributes.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField('The Create request payload is missing the template attribute field.') self.length = local_buffer.length() super(CreateRequestPayload, self).write(output_buffer, kmip_version=kmip_version) output_buffer.write(local_buffer.buffer)
Write the data encoding the Create request payload to a buffer. Args: output_buffer (stream): A data buffer in which to encode object data, supporting a write method. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: InvalidField: Raised if the object type attribute or template attribute is not defined.
codesearchnet
def __init__(self, name, description, *labels): super(Counter, self).__init__('Counter', _counter_methods, len(labels), name, description, *labels)
Creates a new Counter. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric.
github-repos
def _MaxPoolGradGrad(self, orig_input, orig_output, grad, window_rows, window_cols, row_stride, col_stride, padding): return gen_nn_ops.max_pool_grad_grad(orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding)
Max Pooling Second-Order Gradient. Args: orig_input: A float Tensor. The original input tensor. orig_output: A float Tensor. The original output tensor. grad: A float Tensor. The 4D (batch x out_rows x out_cols x depth) output backprop. window_rows: integer. Kernel size along rows dimension. window_cols: integer. Kernel size along cols dimension. row_stride: integer. Stride along rows dimension col_stride: integer. Stride along cols dimension padding: PoolingOpDef.Padding. Padding type. Returns: A Tensor.
github-repos
def in_port(self): in_port = self.match.get_field(OxmOfbMatchField.OFPXMT_OFB_IN_PORT) return int.from_bytes(in_port, 'big')
Retrieve the 'in_port' that generated the PacketIn. This method will look for the OXM_TLV with type OFPXMT_OFB_IN_PORT on the `oxm_match_fields` field from `match` field and return its value, if the OXM exists. Returns: The integer number of the 'in_port' that generated the PacketIn if it exists. Otherwise return None.
codesearchnet
def _poll_server_till_success(max_attempts, sleep_per_poll_sec, debug_server_url, dump_dir, server, gpu_memory_fraction=1.0): poll_count = 0 config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)) with session.Session(config=config) as sess: for poll_count in range(max_attempts): server.clear_data() print('Polling: poll_count = %d' % poll_count) x_init_name = 'x_init_%d' % poll_count x_init = constant_op.constant([42.0], shape=[1], name=x_init_name) x = variables.Variable(x_init, name=x_init_name) run_options = config_pb2.RunOptions() debug_utils.add_debug_tensor_watch(run_options, x_init_name, 0, debug_urls=[debug_server_url]) try: sess.run(x.initializer, options=run_options) except errors.FailedPreconditionError: pass if dump_dir: if os.path.isdir(dump_dir) and debug_data.DebugDumpDir(dump_dir).size > 0: file_io.delete_recursively(dump_dir) print('Poll succeeded.') return True else: print('Poll failed. Sleeping for %f s' % sleep_per_poll_sec) time.sleep(sleep_per_poll_sec) elif server.debug_tensor_values: print('Poll succeeded.') return True else: print('Poll failed. Sleeping for %f s' % sleep_per_poll_sec) time.sleep(sleep_per_poll_sec) return False
Poll server until success or exceeding max polling count. Args: max_attempts: (int) How many times to poll at maximum sleep_per_poll_sec: (float) How many seconds to sleep for after each unsuccessful poll. debug_server_url: (str) gRPC URL to the debug server. dump_dir: (str) Dump directory to look for files in. If None, will directly check data from the server object. server: The server object. gpu_memory_fraction: (float) Fraction of GPU memory to be allocated for the Session used in server polling. Returns: (bool) Whether the polling succeeded within max_polls attempts.
github-repos
def get_session(region, profile=None): if profile is None: logger.debug("No AWS profile explicitly provided. " "Falling back to default.") profile = default_profile logger.debug("Building session using profile \"%s\" in region \"%s\"" % (profile, region)) session = boto3.Session(region_name=region, profile_name=profile) c = session._session.get_component('credential_provider') provider = c.get_provider('assume-role') provider.cache = credential_cache provider._prompter = ui.getpass return session
Creates a boto3 session with a cache Args: region (str): The region for the session profile (str): The profile for the session Returns: :class:`boto3.session.Session`: A boto3 session with credential caching
juraj-google-style
def __init__(self, action_type=None, nw_tos=None): super().__init__(action_type, length=8) self.nw_tos = nw_tos
Create an ActionNWTos with the optional parameters below. Args: action_type (:class:`~pyof.v0x01.common.action.ActionType`): :attr:`~ActionType.OFPAT_SET_NW_SRC` or :attr:`~ActionType.OFPAT_SET_NW_DST`. nw_tos (int): IP ToS (DSCP field, 6 bits).
juraj-google-style
def wait_until(what, times=(- 1)): while times: logger.info('Waiting times left %d', times) try: if (what() is True): return True except: logger.exception('Wait failed') else: logger.warning('Trial[%d] failed', times) times -= 1 time.sleep(1) return False
Wait until `what` return True Args: what (Callable[bool]): Call `wait()` again and again until it returns True times (int): Maximum times of trials before giving up Returns: True if success, False if times threshold reached
codesearchnet
def _sendPostDict(post_dict): downer = Downloader() downer.headers["Referer"] = settings.EDEPOSIT_EXPORT_REFERER data = downer.download(settings.ALEPH_EXPORT_URL, post=post_dict) rheaders = downer.response_headers error_msg = rheaders.get("aleph-info", "").lower().strip() if "aleph-info" in rheaders and error_msg.startswith("error"): raise ExportRejectedException( "Export request was rejected by import webform: %s" % rheaders["aleph-info"] ) return data
Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`. Args: post_dict (dict): dictionary from :class:`PostData.get_POST_data()` Returns: str: Reponse from webform.
juraj-google-style
def Incr(self, x, term=1): self.d[x] = self.d.get(x, 0) + term
Increments the freq/prob associated with the value x. Args: x: number value term: how much to increment by
juraj-google-style
def get_properties(self): names = inspect.getmembers(self, predicate=(lambda x: (not inspect.ismethod(x)))) return [x[0] for x in names if ((not x[0].startswith('_')) and (x[0] not in self._ignored_properties))]
Get a list of all of the public data properties of this class. Returns: list of str: A list of all of the public properties in this class.
codesearchnet
def check_params_sync(model_params, original_params): for mp, op in zip(model_params, original_params): if isinstance(mp, DTensor): mp = mp.to_local() if isinstance(op, DTensor): op = op.to_local() if not torch.allclose(mp.data, op.data, rtol=0, atol=0): raise RuntimeError(f'Parameters out of sync: model param {mp.data} != original param {op.data}') return True
Check if original_params are being updated in sync with model parameters. Args: model_params: Iterator of model parameters after update original_params: List of original parameters before DDP wrapping
github-repos
def convert_polygons_to_lines(src_polygons, dst_lines, crs=None, add_allone_col=False): gdf = gpd.read_file(src_polygons) geom_coords = gdf['geometry'] lines = [] row_ids = [] for (i_row, pol) in tqdm(enumerate(geom_coords), total=len(geom_coords)): boundary = pol.boundary if (boundary.type == 'MultiLineString'): for line in boundary: lines.append(line) row_ids.append(i_row) else: lines.append(boundary) row_ids.append(i_row) gdf_lines = gdf.drop('geometry', axis=1).iloc[(row_ids, :)] gdf_lines['Coordinates'] = lines gdf_lines = gpd.GeoDataFrame(gdf_lines, geometry='Coordinates', crs=gdf.crs) if (crs is not None): gdf_lines = gdf_lines.to_crs(crs) if add_allone_col: gdf_lines['ALLONE'] = 1 Path(dst_lines).parent.mkdir(exist_ok=True, parents=True) gdf_lines.to_file(dst_lines) return 0
Convert polygons to lines. Arguments: src_polygons {path to geopandas-readable file} -- Filename of the the polygon vector dataset to be converted to lines. dst_lines {[type]} -- Filename where to write the line vector dataset to. Keyword Arguments: crs {dict or str} -- Output projection parameters as string or in dictionary format. This will reproject the data when a crs is given (not {None}) (default: {None}). add_allone_col {bool} -- Add an additional attribute column with all ones. This is useful, e.g. in case you want to use the lines with gdal_proximity afterwards (default: {True}). Returns: int -- Exit code 0 if successeful.
codesearchnet
def make_call_types(f, globals_d): arg_spec = getargspec(f) args = [k for k in arg_spec.args if (k != 'self')] defaults = {} if arg_spec.defaults: default_args = args[(- len(arg_spec.defaults)):] for (a, default) in zip(default_args, arg_spec.defaults): defaults[a] = default if (not getattr(f, '__annotations__', None)): annotations = make_annotations(f, globals_d) else: annotations = f.__annotations__ call_types = OrderedDict() for a in args: anno = anno_with_default(annotations[a], defaults.get(a, NO_DEFAULT)) assert isinstance(anno, Anno), ('Argument %r has type %r which is not an Anno' % (a, anno)) call_types[a] = anno return_type = anno_with_default(annotations.get('return', None)) if (return_type is Any): return_type = Anno('Any return value', Any, 'return') assert ((return_type is None) or isinstance(return_type, Anno)), ('Return has type %r which is not an Anno' % (return_type,)) return (call_types, return_type)
Make a call_types dictionary that describes what arguments to pass to f Args: f: The function to inspect for argument names (without self) globals_d: A dictionary of globals to lookup annotation definitions in
codesearchnet
def get_profiles(self, cmd): if cmd not in self._views: raise ValueError('No autoprofiler for command: {}, was run'.format(cmd)) return self._views[cmd]
Returns profiling results for each step at which `cmd` was run. Args: cmd: string, profiling command used in an `add_auto_profiling` call. Returns: dict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which the profiling command was run. Values are the outputs of profiling. For "code" and "op" commands this will be a `MultiGraphNodeProto`, for "scope" and "graph" commands this will be a `GraphNodeProto. Raises: ValueError: if `cmd` was never run (either because no session.run call was made or because there was no `add_auto_profiling` call with the specified `cmd`.
github-repos
def guess_dir_structure(dir): subdir = os.listdir(dir)[0] if subdir.startswith('n') and \ os.path.isdir(os.path.join(dir, subdir)): dir_structure = 'train' else: dir_structure = 'original' logger.info( "[ILSVRC12] Assuming directory {} has '{}' structure.".format( dir, dir_structure)) return dir_structure
Return the directory structure of "dir". Args: dir(str): something like '/path/to/imagenet/val' Returns: either 'train' or 'original'
juraj-google-style
def synchronize_clock(self, offset): self.time_offset = (offset - self.uptime) self.is_utc = True if self.has_rtc: self.stored_offset = self.time_offset
Persistently synchronize the clock to UTC time. Args: offset (int): The number of seconds since 1/1/2000 00:00Z
codesearchnet
def get_members(cls, session, team_or_id): if isinstance(team_or_id, Person): team_or_id = team_or_id.id return cls(('/teams/%d/members.json' % team_or_id), session=session, out_type=User)
List the members for the team. Args: team_or_id (helpscout.models.Person or int): Team or the ID of the team to get the folders for. Returns: RequestPaginator(output_type=helpscout.models.Users): Users iterator.
codesearchnet
def get_pending_file_rename(): vnames = ('PendingFileRenameOperations', 'PendingFileRenameOperations2') key = 'SYSTEM\\CurrentControlSet\\Control\\Session Manager' for vname in vnames: reg_ret = __utils__['reg.read_value']('HKLM', key, vname) if reg_ret['success']: log.debug('Found key: %s', key) if (reg_ret['vdata'] and (reg_ret['vdata'] != '(value not set)')): return True else: log.debug('Unable to access key: %s', key) return False
Determine whether there are pending file rename operations that require a reboot. .. versionadded:: 2016.11.0 Returns: bool: ``True`` if there are pending file rename operations, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' system.get_pending_file_rename
codesearchnet
def describe(o): from inspect import getmodule from acorn.logging.decoration import _fqdn fqdn = _fqdn(o, False) if (fqdn is None): return json_describe(o, str(type(o))) package = fqdn.split('.')[0] global _package_desc if (package not in _package_desc): from acorn.config import descriptors spack = descriptors(package) if (spack is None): _package_desc[package] = None return json_describe(o, fqdn) else: _package_desc[package] = spack if (_package_desc[package] is None): return json_describe(o, fqdn) elif (fqdn in _package_desc[package]): return json_describe(o, fqdn, _package_desc[package][fqdn]) else: return json_describe(o, fqdn)
Describes the object using developer-specified attributes specific to each main object type. Returns: dict: keys are specific attributes tailored to the specific object type, though `fqdn` is common to all descriptions; values are the corresponding attribute values which are *simple* types that can easily be serialized to JSON.
codesearchnet
def cert_chain(certs): if (len(certs) < 2): warnings.warn('Certificate chain contains < 3 certificates.') return False cert = certs[0] today = datetime.datetime.today() if (not (today > cert.not_valid_before)): warnings.warn('Certificate Not Before date is invalid.') return False if (not (today < cert.not_valid_after)): warnings.warn('Certificate Not After date is invalid.') return False oid_san = x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME ext = cert.extensions.get_extension_for_oid(oid_san) sans = ext.value.get_values_for_type(x509.DNSName) if (not ('echo-api.amazon.com' in sans)): return False for i in range((len(certs) - 1)): if (not (certs[i].issuer == certs[(i + 1)].subject)): return False return True
Validate PEM-encoded X.509 certificate chain. See `validate.request` for additional info. Args: certs: list. The certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates. See `validate.retrieve` to create certs obj. Returns: bool: True if valid, False otherwise.
codesearchnet
def cds_identifier_validator(record, result): record_external_identifiers = get_value(record, 'external_system_identifiers', []) result_external_identifiers = get_value(result, '_source.external_system_identifiers', []) record_external_identifiers = {external_id['value'] for external_id in record_external_identifiers if (external_id['schema'] == 'CDS')} result_external_identifiers = {external_id['value'] for external_id in result_external_identifiers if (external_id['schema'] == 'CDS')} return bool((record_external_identifiers & result_external_identifiers))
Ensure that the two records have the same CDS identifier. This is needed because the search is done only for ``external_system_identifiers.value``, which might cause false positives in case the matched record has an identifier with the same ``value`` but ``schema`` different from CDS. Args: record (dict): the given record we are trying to match with similar ones in INSPIRE. result (dict): possible match returned by the ES query that needs to be validated. Returns: bool: validation decision.
codesearchnet
def __init__(self, handle, dtype, session): self._handle = compat.as_str_any(handle) self._resource_handle = None self._dtype = dtype self._session = session self._auto_gc_enabled = True
Constructs a new tensor handle. A tensor handle for a persistent tensor is a python string that has the form of "tensor_name;unique_id;device_name". Args: handle: A tensor handle. dtype: The data type of the tensor represented by `handle`. session: The session in which the tensor is produced.
github-repos
def _create_min_max_boundaries(max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE): bucket_boundaries = [] x = min_boundary while (x < max_length): bucket_boundaries.append(x) x = max((x + 1), int((x * boundary_scale))) buckets_min = ([0] + bucket_boundaries) buckets_max = (bucket_boundaries + [(max_length + 1)]) return (buckets_min, buckets_max)
Create min and max boundary lists up to max_length. For example, when max_length=24, min_boundary=4 and boundary_scale=2, the returned values will be: buckets_min = [0, 4, 8, 16, 24] buckets_max = [4, 8, 16, 24, 25] Args: max_length: The maximum length of example in dataset. min_boundary: Minimum length in boundary. boundary_scale: Amount to scale consecutive boundaries in the list. Returns: min and max boundary lists
codesearchnet
def reaction_formula(reaction, compound_formula): def multiply_formula(compound_list): for (compound, count) in compound_list: (yield (count * compound_formula[compound.name])) for (compound, _) in reaction.compounds: if (compound.name not in compound_formula): return None else: left_form = reduce(operator.or_, multiply_formula(reaction.left), Formula()) right_form = reduce(operator.or_, multiply_formula(reaction.right), Formula()) return (left_form, right_form)
Calculate formula compositions for both sides of the specified reaction. If the compounds in the reaction all have formula, then calculate and return the chemical compositions for both sides, otherwise return `None`. Args: reaction: :class:`psamm.reaction.Reaction`. compound_formula: a map from compound id to formula.
codesearchnet
def reduce(x, op='sum'): import warnings warnings.warn('Deprecated API. Use ``sum`` or ``mean`` instead.', DeprecationWarning) from .function_bases import reduce_sum, reduce_mean if (op == 'sum'): return reduce_sum(x) elif (op == 'mean'): return reduce_mean(x) raise ValueError()
Reduction function with given operation. Args: x (Variable): An input. op (str): 'sum' or 'mean'. Note: This is deprecated. Use ``mean`` or ``sum`` instead.
codesearchnet
def inspect(lines): labels = set() count = 0 exp = re.compile('>.*?<([\\w ]+)>') valid = False for line in lines: if line.startswith('M END\n'): valid = True elif line.startswith('$$$$'): count += 1 valid = False else: result = exp.match(line) if result: labels.add(result.group(1)) if valid: count += 1 return (list(labels), count)
Inspect SDFile list of string Returns: tuple: (data label list, number of records)
codesearchnet
def GetFormattedEventObject(cls, event): time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp) lines_of_text = [ '+-' * 40, '[Timestamp]:', ' {0:s}'.format(time_string)] pathspec = getattr(event, 'pathspec', None) if pathspec: lines_of_text.append('[Pathspec]:') attribute_string = pathspec.comparable.replace('\n', '\n ') attribute_string = ' {0:s}\n'.format(attribute_string) lines_of_text.append(attribute_string) lines_of_text.append('[Reserved attributes]:') out_additional = ['[Additional attributes]:'] for attribute_name, attribute_value in sorted(event.GetAttributes()): if attribute_name not in definitions.RESERVED_VARIABLE_NAMES: attribute_string = ' {{{0!s}}} {1!s}'.format( attribute_name, attribute_value) out_additional.append(attribute_string) elif attribute_name not in ('pathspec', 'tag'): attribute_string = ' {{{0!s}}} {1!s}'.format( attribute_name, attribute_value) lines_of_text.append(attribute_string) lines_of_text.append('') out_additional.append('') lines_of_text.extend(out_additional) return '\n'.join(lines_of_text)
Retrieves a string representation of the event. Args: event (EventObject): event. Returns: str: string representation of the event.
juraj-google-style
def print_graph(self, format=None, output=sys.stdout, depth=0, **kwargs): graph = self.as_graph(depth=depth) graph.print(format=format, output=output, **kwargs)
Print the graph for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. depth (int): depth of the graph.
codesearchnet
def san_managers(self): if (not self.__san_managers): self.__san_managers = SanManagers(self.__connection) return self.__san_managers
Gets the SanManagers API client. Returns: SanManagers:
codesearchnet
def usergroups_create(self, *, name: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({'name': name}) return self.api_call('usergroups.create', json=kwargs)
Create a User Group Args: name (str): A name for the User Group. Must be unique among User Groups. e.g. 'My Test Team'
codesearchnet
def convert(self, calibration_input_fn=None): assert not self._converted device_requested = array_ops.zeros([]).device if 'gpu' not in device_requested.lower(): raise ValueError(f'Specified device is not a GPU: {device_requested}') if 'gpu:0' not in device_requested.lower(): self._device = device_requested logging.info(f'Placing imported graph from `{self._input_saved_model_dir}` on device: {self._device}') if self._need_calibration and (not calibration_input_fn): raise ValueError('Should specify calibration_input_fn because INT8 calibration is needed') if not self._need_calibration and calibration_input_fn: raise ValueError('Should not specify calibration_input_fn because INT8 calibration is not needed') self._saved_model = load.load(self._input_saved_model_dir, self._input_saved_model_tags) func = self._saved_model.signatures[self._input_saved_model_signature_key] if self.freeze: frozen_func = convert_to_constants.convert_variables_to_constants_v2(func) else: inlined_graph_def = _apply_inlining(func) _annotate_variable_ops(func, inlined_graph_def) frozen_func = _construct_function_from_graph_def(func, inlined_graph_def) frozen_graph_def = frozen_func.graph.as_graph_def() logging.info('Clearing prior device assignments in loaded saved model') for node in frozen_graph_def.node: node.device = '' if self._device is None: grappler_meta_graph_def = saver.export_meta_graph(graph_def=frozen_graph_def, graph=frozen_func.graph) else: with ops.Graph().as_default() as graph, ops.device(self._device): importer.import_graph_def(frozen_graph_def, name='') grappler_meta_graph_def = saver.export_meta_graph(graph_def=graph.as_graph_def(), graph=graph) fetch_collection = meta_graph_pb2.CollectionDef() for array in frozen_func.inputs + frozen_func.outputs: fetch_collection.node_list.value.append(array.name) grappler_meta_graph_def.collection_def['train_op'].CopyFrom(fetch_collection) self._converted_graph_def = self._run_conversion(grappler_meta_graph_def) self._converted_func = _construct_function_from_graph_def(func, self._converted_graph_def, frozen_func) if self._need_calibration: if not self._need_trt_profiles(): self._execute_calibration(calibration_input_fn) else: self._calibration_input_fn = calibration_input_fn self._converted = True graphviz_path = os.environ.get('TF_TRT_EXPORT_GRAPH_VIZ_PATH', default=None) if graphviz_path is not None: try: trt_utils.draw_graphdef_as_graphviz(graphdef=self._converted_func.graph.as_graph_def(add_shapes=True), dot_output_filename=graphviz_path) except Exception as e: logging.error(f'An Exception occurred during the export of the graph visualization: {e}') return self._converted_func
Convert the input SavedModel in 2.0 format. Args: calibration_input_fn: a generator function that yields input data as a list or tuple or dict, which will be used to execute the converted signature for calibration. All the returned input data should have the same shape. Example: `def input_fn(): yield input1, input2, input3` If dynamic_shape_mode==False, (or if the graph has static input shapes) then we run calibration and build the calibrated engine during conversion. If dynamic_shape_mode==True (and the graph has any unknown input shape), then the reference to calibration_input_fn is stored, and the calibration is actually performed when we build the engine (see build()). Raises: ValueError: if the input combination is invalid. Returns: The TF-TRT converted Function.
github-repos
def visualize_computed_pcoll(pcoll_name: str, pcoll: beam.pvalue.PCollection, max_n: int, max_duration_secs: float, dynamic_plotting_interval: Optional[int]=None, include_window_info: bool=False, display_facets: bool=False) -> None: pipeline = ie.current_env().user_pipeline(pcoll.pipeline) rm = ie.current_env().get_recording_manager(pipeline, create_if_absent=True) stream = rm.read(pcoll_name, pcoll, max_n=max_n, max_duration_secs=max_duration_secs) if stream: visualize(stream, dynamic_plotting_interval=dynamic_plotting_interval, include_window_info=include_window_info, display_facets=display_facets, element_type=pcoll.element_type)
A simple visualize alternative. When the pcoll_name and pcoll pair identifies a watched and computed PCollection in the current interactive environment without ambiguity, an ElementStream can be built directly from cache. Returns immediately, the visualization is asynchronous, but guaranteed to end in the near future. Args: pcoll_name: the variable name of the PCollection. pcoll: the PCollection to be visualized. max_n: the maximum number of elements to visualize. max_duration_secs: max duration of elements to read in seconds. dynamic_plotting_interval: the interval in seconds between visualization updates if provided; otherwise, no dynamic plotting. include_window_info: whether to include windowing info in the elements. display_facets: whether to display the facets widgets.
github-repos
def _batch_transpose(mat): n = distribution_util.prefer_static_rank(mat) perm = tf.range(n) perm = tf.concat([perm[:-2], [perm[-1], perm[-2]]], axis=0) return tf.transpose(a=mat, perm=perm)
Transpose a possibly batched matrix. Args: mat: A `tf.Tensor` of shape `[..., n, m]`. Returns: A tensor of shape `[..., m, n]` with matching batch dimensions.
juraj-google-style
def is_profile_supported(self, conformance_clause, authentication_suite): return (self.is_conformance_clause_supported(conformance_clause) and self.is_authentication_suite_supported(authentication_suite))
Check if a profile is supported by the client. Args: conformance_clause (ConformanceClause): authentication_suite (AuthenticationSuite): Returns: bool: True if the profile is supported, False otherwise. Example: >>> client.is_profile_supported( ... ConformanceClause.DISCOVER_VERSIONS, ... AuthenticationSuite.BASIC) True
juraj-google-style
def identity(shape: Tuple[int, ...], dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True, fmt: str='quat') -> Rigid: return Rigid(Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt), identity_trans(shape, dtype, device, requires_grad))
Constructs an identity transformation. Args: shape: The desired shape dtype: The dtype of both internal tensors device: The device of both internal tensors requires_grad: Whether grad should be enabled for the internal tensors Returns: The identity transformation
github-repos
def get_defaults(path): defaults = {} if os.path.isfile(path): with open(path) as f: for line in f: line = line.strip() if (('=' not in line) or line.startswith(' continue (k, v) = line.split('=', 1) v = v.strip('"').strip("'") defaults[k] = v return defaults else: return {}
Reads file for configuration defaults. Arguments: - path (str) Absolute filepath (usually ~/.licenser) Returns: - (dict) Defaults for name, email, license, .txt extension
codesearchnet
def fit(self, y): self.ndim_ = y.ndim return self
Fit the transformer to a target y. Returns: TargetReshaper A reference to the current instance of TargetReshaper.
github-repos
def get_input_arrays(self): if self._has_valid_tensors(): return [_get_tensor_name(tensor) for tensor in self._input_tensors] else: return [name for name, _ in self._input_arrays_with_shape]
Returns a list of the names of the input tensors. Returns: List of strings.
github-repos
def CheckSchema(self, database): schema_match = False if self.SCHEMAS: for schema in self.SCHEMAS: if (database and (database.schema == schema)): schema_match = True return schema_match
Checks the schema of a database with that defined in the plugin. Args: database (SQLiteDatabase): database. Returns: bool: True if the schema of the database matches that defined by the plugin, or False if the schemas do not match or no schema is defined by the plugin.
codesearchnet
def Serialize(self, writer): writer.WriteVarBytes(self.Script) writer.WriteVarBytes(self.ParameterList) writer.WriteByte(self.ReturnType)
Serialize full object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def compile_into_spirv(raw, stage, filepath, language='glsl', optimization='size', suppress_warnings=False, warnings_as_errors=False): stage = stages_mapping[stage] lang = languages_mapping[language] opt = opt_mapping[optimization] options = lib.shaderc_compile_options_initialize() lib.shaderc_compile_options_set_source_language(options, lang) lib.shaderc_compile_options_set_optimization_level(options, opt) lib.shaderc_compile_options_set_target_env(options, lib.shaderc_target_env_vulkan, 0) lib.shaderc_compile_options_set_auto_bind_uniforms(options, False) lib.shaderc_compile_options_set_include_callbacks(options, lib.resolve_callback, lib.release_callback, ffi.NULL) if suppress_warnings: lib.shaderc_compile_options_set_suppress_warnings(options) if warnings_as_errors: lib.shaderc_compile_options_set_warnings_as_errors(options) compiler = lib.shaderc_compiler_initialize() result = lib.shaderc_compile_into_spv(compiler, raw, len(raw), stage, str.encode(filepath), b'main', options) status = lib.shaderc_result_get_compilation_status(result) if (status != lib.shaderc_compilation_status_success): msg = _get_log(result) lib.shaderc_compile_options_release(options) lib.shaderc_result_release(result) lib.shaderc_compiler_release(compiler) raise CompilationError(msg) length = lib.shaderc_result_get_length(result) output_pointer = lib.shaderc_result_get_bytes(result) tmp = bytearray(length) ffi.memmove(tmp, output_pointer, length) spirv = bytes(tmp) lib.shaderc_compile_options_release(options) lib.shaderc_result_release(result) lib.shaderc_compiler_release(compiler) return spirv
Compile shader code into Spir-V binary. This function uses shaderc to compile your glsl or hlsl code into Spir-V code. You can refer to the shaderc documentation. Args: raw (bytes): glsl or hlsl code (bytes format, not str) stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom', 'frag', 'comp'] filepath (str): Absolute path of the file (needed for #include) language (str): 'glsl' or 'hlsl' optimization (str): 'zero' (no optimization) or 'size' (reduce size) suppress_warnings (bool): True to suppress warnings warnings_as_errors (bool): Turn warnings into errors Returns: bytes: Compiled Spir-V binary. Raises: CompilationError: If compilation fails.
codesearchnet
def get_integer_index( miller_index: bool, round_dp: int = 4, verbose: bool = True ) -> Tuple[int, int, int]: miller_index = np.asarray(miller_index) miller_index /= min([m for m in miller_index if m != 0]) miller_index /= np.max(np.abs(miller_index)) md = [Fraction(n).limit_denominator(12).denominator for n in miller_index] miller_index *= reduce(lambda x, y: x * y, md) int_miller_index = np.int_(np.round(miller_index, 1)) miller_index /= np.abs(reduce(gcd, int_miller_index)) miller_index = np.array([round(h, round_dp) for h in miller_index]) int_miller_index = np.int_(np.round(miller_index, 1)) if np.any(np.abs(miller_index - int_miller_index) > 1e-6) and verbose: warnings.warn("Non-integer encountered in Miller index") else: miller_index = int_miller_index miller_index += 0 def n_minus(index): return len([h for h in index if h < 0]) if n_minus(miller_index) > n_minus(miller_index * -1): miller_index *= -1 if ( sum(miller_index != 0) == 2 and n_minus(miller_index) == 1 and abs(min(miller_index)) > max(miller_index) ): miller_index *= -1 return tuple(miller_index)
Attempt to convert a vector of floats to whole numbers. Args: miller_index (list of float): A list miller indexes. round_dp (int, optional): The number of decimal places to round the miller index to. verbose (bool, optional): Whether to print warnings. Returns: (tuple): The Miller index.
juraj-google-style
def target_batch_encode_plus(self, answer: List[str], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._target_batch_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
Prepare answer strings for the model. Args: answer `List[str]`: Corresponding answer supervision to the queries for training the model.
github-repos
def from_config(cls, config): return cls(**config)
Instantiates a `LearningRateSchedule` from its config. Args: config: Output of `get_config()`. Returns: A `LearningRateSchedule` instance.
github-repos
def _process_landing_page(self, item, feed_item): if feed_item.get(FieldMap.AD_LANDING_PAGE_ID, ''): landing_page = self._landing_page_dao.get(feed_item, required=True) item['clickThroughUrl'] = {'landingPageId': landing_page['id']} if feed_item.get(FieldMap.AD_URL_SUFFIX, ''): item['clickThroughUrlSuffixProperties'] = {'overrideInheritedSuffix': True, 'clickThroughUrlSuffix': feed_item.get(FieldMap.AD_URL_SUFFIX, '')} else: item['clickThroughUrlSuffixProperties'] = {'overrideInheritedSuffix': False}
Configures ad landing page. Args: item: DCM ad object to update. feed_item: Feed item representing the ad from the Bulkdozer feed
github-repos
def timed_display(msg): def print_header(msg, newline=True): if sys.stdout.isatty(): print('\r', end=Style.BRIGHT + Fore.BLUE) print(' {} '.format(msg).center(_ncols(), '='), end='\n{}'.format(Style.RESET_ALL) if newline else Style.RESET_ALL) sys.stdout.flush() def print_message(msg): if sys.stdout.isatty(): print('\r', end='') msg = msg.ljust(_ncols()) print(msg, end='') sys.stdout.flush() start = time.time() print_header(msg) with hidden_cursor(): try: yield print_message finally: delta = time.time() - start print_header('completed in {:.2f}s'.format(delta), False)
A timed block to run tasks with titles and success/failure messages. Args: msg: The header message to print at the beginning of the timed block.
juraj-google-style
def _from_dataframe(dataframe, default_type='STRING'): type_mapping = {'i': 'INTEGER', 'b': 'BOOLEAN', 'f': 'FLOAT', 'O': 'STRING', 'S': 'STRING', 'U': 'STRING', 'M': 'TIMESTAMP'} fields = [] for (column_name, dtype) in dataframe.dtypes.iteritems(): fields.append({'name': column_name, 'type': type_mapping.get(dtype.kind, default_type)}) return fields
Infer a BigQuery table schema from a Pandas dataframe. Note that if you don't explicitly set the types of the columns in the dataframe, they may be of a type that forces coercion to STRING, so even though the fields in the dataframe themselves may be numeric, the type in the derived schema may not be. Hence it is prudent to make sure the Pandas dataframe is typed correctly. Args: dataframe: The DataFrame. default_type : The default big query type in case the type of the column does not exist in the schema. Defaults to 'STRING'. Returns: A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a BigQuery Tables resource schema.
codesearchnet
def __init__(self, my_api_key): super(self.__class__, self).__init__(my_api_key) self.sort_by_postfix = '?sortBy=' self.boxes_suffix = 'boxes' self.stages_suffix = 'stages' self.pipelines_suffix = 'pipelines' self.search_suffix = 'search?query=' self.snippets_suffix = 'snippets' self.fields_suffix = 'fields' self.newsfeed_suffix = 'newsfeed' self.threads_suffix = 'threads' self.comments_suffix = 'comments' self.files_suffix = 'files' self.file_contents_suffix = 'contents' self.file_link_suffix = 'link' self.reminders_suffix = 'reminders' self.detail_level_suffix = '?detailLevel=' if DEBUG: print((self.api_uri))
Initializes an instance of the class with an api key Allows multiple instances with distinct keys. Args: my_api_key api key for this instance
juraj-google-style
def get_submission_variants(form_fields): clinvars = [] if 'all_vars' in form_fields: for field, value in form_fields.items(): if field.startswith('local_id'): clinvars.append(form_fields[field].replace('local_id@','')) else: clinvars = [form_fields['main_var']] return clinvars
Extracts a list of variant ids from the clinvar submission form in blueprints/variants/clinvar.html (creation of a new clinvar submission). Args: form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER Returns: clinvars: A list of variant IDs
juraj-google-style
def modified_lu(q): q = q.assemble() (m, b) = (q.shape[0], q.shape[1]) S = np.zeros(b) q_work = np.copy(q) for i in range(b): S[i] = ((- 1) * np.sign(q_work[(i, i)])) q_work[(i, i)] -= S[i] q_work[((i + 1):m, i)] /= q_work[(i, i)] q_work[((i + 1):m, (i + 1):b)] -= np.outer(q_work[((i + 1):m, i)], q_work[(i, (i + 1):b)]) L = np.tril(q_work) for i in range(b): L[(i, i)] = 1 U = np.triu(q_work)[(:b, :)] return (ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S)
Perform a modified LU decomposition of a matrix. This takes a matrix q with orthonormal columns, returns l, u, s such that q - s = l * u. Args: q: A two dimensional orthonormal matrix q. Returns: A tuple of a lower triangular matrix l, an upper triangular matrix u, and a a vector representing a diagonal matrix s such that q - s = l * u.
codesearchnet
def is_orthogonal(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool: return ((matrix.shape[0] == matrix.shape[1]) and np.all((np.imag(matrix) == 0)) and np.allclose(matrix.dot(matrix.T), np.eye(matrix.shape[0]), rtol=rtol, atol=atol))
Determines if a matrix is approximately orthogonal. A matrix is orthogonal if it's square and real and its transpose is its inverse. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is orthogonal within the given tolerance.
codesearchnet
def insecure_channel(target, options=None, *, loop=None, executor=None, standalone_pool_for_streaming=False): return Channel(_grpc.insecure_channel(target, options), loop, executor, standalone_pool_for_streaming)
Creates an insecure Channel to a server. Args: target: The server address options: An optional list of key-value pairs (channel args in gRPC runtime) to configure the channel. Returns: A Channel object.
juraj-google-style
def _convert_variables_to_tensors(self): return self
Converts ResourceVariable components to Tensors. Override this method to explicitly convert ResourceVariables embedded in the CompositeTensor to Tensors. By default, it returns the CompositeTensor unchanged. Returns: A CompositeTensor with all its ResourceVariable components converted to Tensors.
github-repos
def recipe_dbm_to_storage(config, auth_read, dbm_report_id, auth_write, dbm_report_name, dbm_bucket, dbm_path): dbm(config, {'auth': auth_read, 'report': {'report_id': dbm_report_id, 'name': dbm_report_name}, 'out': {'storage': {'auth': auth_write, 'bucket': dbm_bucket, 'path': dbm_path}}})
Move existing DV360 report into a Storage bucket. Args: auth_read (authentication) - Credentials used for reading data. dbm_report_id (integer) - DV360 report ID given in UI, not needed if name used. auth_write (authentication) - Credentials used for writing data. dbm_report_name (string) - Name of report, not needed if ID used. dbm_bucket (string) - Google cloud bucket. dbm_path (string) - Path and filename to write to.
github-repos
def __init__( self, lattice, hamiltonian ): expected_hamiltonian_values = [ 'nearest-neighbour' ] if hamiltonian not in expected_hamiltonian_values: raise ValueError( hamiltonian ) self.site_energies = lattice.site_energies self.nn_energy = lattice.nn_energy self.cn_energy = lattice.cn_energies self.connected_site_pairs = lattice.connected_site_pairs() self.max_coordination_per_site = lattice.max_site_coordination_numbers() self.site_specific_coordination_per_site = lattice.site_specific_coordination_numbers() if hamiltonian == 'nearest-neighbour': self.generate_nearest_neighbour_lookup_table()
Initialise a LookupTable object instance. Args: lattice (lattice_mc.Lattice): The lattice object, used to define the allowed jumps. hamiltonian (Str): The model Hamiltonian used to define the jump energies. Allowed values = `nearest-neigbour` Returns: None
juraj-google-style
def to_pandas(self, is_transposed=False): if is_transposed: return self.transpose().to_pandas(False).T else: retrieved_objects = [[obj.to_pandas() for obj in part] for part in self.partitions] if all((isinstance(part, pandas.Series) for row in retrieved_objects for part in row)): axis = 0 elif all((isinstance(part, pandas.DataFrame) for row in retrieved_objects for part in row)): axis = 1 else: ErrorMessage.catch_bugs_and_request_email(True) df_rows = [pandas.concat([part for part in row], axis=axis) for row in retrieved_objects if (not all((part.empty for part in row)))] if (len(df_rows) == 0): return pandas.DataFrame() else: return pandas.concat(df_rows)
Convert this object into a Pandas DataFrame from the partitions. Args: is_transposed: A flag for telling this object that the external representation is transposed, but not the internal. Returns: A Pandas DataFrame
codesearchnet
def is_supported(cls, file=None, request=None, response=None, url_info=None): tests = ( (response, cls.is_response), (file, cls.is_file), (request, cls.is_request), (url_info, cls.is_url) ) for instance, method in tests: if instance: try: result = method(instance) except NotImplementedError: pass else: if result: return True elif result is VeryFalse: return VeryFalse
Given the hints, return whether the document is supported. Args: file: A file object containing the document. request (:class:`.http.request.Request`): An HTTP request. response (:class:`.http.request.Response`): An HTTP response. url_info (:class:`.url.URLInfo`): A URLInfo. Returns: bool: If True, the reader should be able to read it.
juraj-google-style
def _strip_variable_names(self, summaries): result = set() for s in summaries: if '/' not in s.tag: result.add(s) else: split_tag = s.tag.split('/') if 'variable' in split_tag[0]: result.add(s._replace(tag=split_tag[-1])) else: result.add(s) return result
Remove `variable_n` from summary tag `variable_n` tag names are added with random numbers. Removing them ensures deterministic tag names. Args: summaries: A `set` of `_ObservedSummary` values. Returns: A new `set` of `_ObservedSummary` values with layer prefixes removed.
github-repos
def case_to_clinVars(self, case_id): query = dict(case_id=case_id, csv_type='variant') clinvar_objs = list(self.clinvar_collection.find(query)) submitted_vars = {} for clinvar in clinvar_objs: submitted_vars[clinvar.get('local_id')] = clinvar return submitted_vars
Get all variants included in clinvar submissions for a case Args: case_id(str): a case _id Returns: submission_variants(dict): keys are variant ids and values are variant submission objects
codesearchnet
def serialize_cert_to_pem(cert_obj): return cert_obj.public_bytes( encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM )
Serialize certificate to PEM. The certificate can be also be a Certificate Signing Request (CSR). Args: cert_obj: cryptography.Certificate Returns: bytes: PEM encoded certificate
juraj-google-style
def _get_metric_histogram(histogram_proto): ret = dict() ret['min'] = histogram_proto.min ret['max'] = histogram_proto.max ret['num'] = histogram_proto.num ret['sum'] = histogram_proto.sum bucket_limits = histogram_proto.bucket_limit bucket_vals = histogram_proto.bucket ret['histogram'] = {} bucket_limits.insert(0, 0) for lb, ub, val in zip(bucket_limits[:-1], bucket_limits[1:], bucket_vals): ret['histogram'][lb, ub] = val return ret
Convert a histogram proto into a dict. Args: histogram_proto: a proto containing a Sampler metric's result histogram. Returns: A dict containing summary statistics and the raw histogram values.
github-repos
def ParseDownloadsRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = FirefoxDownloadEventData() event_data.full_path = self._GetRowValue(query_hash, row, 'target') event_data.mime_type = self._GetRowValue(query_hash, row, 'mimeType') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.received_bytes = self._GetRowValue(query_hash, row, 'currBytes') event_data.referrer = self._GetRowValue(query_hash, row, 'referrer') event_data.temporary_location = self._GetRowValue(query_hash, row, 'tempPath') event_data.total_bytes = self._GetRowValue(query_hash, row, 'maxBytes') event_data.url = self._GetRowValue(query_hash, row, 'source') timestamp = self._GetRowValue(query_hash, row, 'startTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'endTime') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_END) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a downloads row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def create(self, validated_data): email = validated_data.pop('email') password = validated_data.pop('password') user = get_user_model()(**validated_data) user.set_password(password) user.email = email email_query = models.EmailAddress.objects.filter(email=email) if email_query.exists(): existing_email = email_query.get() existing_email.send_duplicate_notification() else: user.save() email_instance = models.EmailAddress.objects.create(email=email, user=user) email_instance.send_confirmation() signals.user_registered.send(sender=self.__class__, user=user) return user
Create a new user from the data passed to the serializer. If the provided email has not been verified yet, the user is created and a verification email is sent to the address. Otherwise we send a notification to the email address that someone attempted to register with an email that's already been verified. Args: validated_data (dict): The data passed to the serializer after it has been validated. Returns: A new user created from the provided data.
codesearchnet
def from_pymatgen_molecule(cls, molecule): new = cls(atoms=[el.value for el in molecule.species], coords=molecule.cart_coords) return new._to_numeric()
Create an instance of the own class from a pymatgen molecule Args: molecule (:class:`pymatgen.core.structure.Molecule`): Returns: Cartesian:
codesearchnet
def get_dense_tensor(self, transformation_cache, state_manager): return transformation_cache.get(self, state_manager)
Returns dense `Tensor` representing numeric feature. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Dense `Tensor` created within `transform_feature`.
github-repos
def apply(self, dataset, flat=False, expanded=None, ranges={}, all_values=False): dimension = self.dimension if (expanded is None): expanded = (not ((dataset.interface.gridded and (dimension in dataset.kdims)) or (dataset.interface.multi and dataset.interface.isscalar(dataset, dimension)))) if isinstance(dataset, Graph): if ((dimension in dataset.kdims) and all_values): dimension = dataset.nodes.kdims[2] dataset = (dataset if (dimension in dataset) else dataset.nodes) data = dataset.dimension_values(dimension, expanded=expanded, flat=flat) for o in self.ops: args = o['args'] fn_args = [data] for arg in args: if isinstance(arg, dim): arg = arg.apply(dataset, flat, expanded, ranges, all_values) fn_args.append(arg) args = tuple((fn_args[::(- 1)] if o['reverse'] else fn_args)) eldim = dataset.get_dimension(dimension) drange = ranges.get(eldim.name, {}) drange = drange.get('combined', drange) kwargs = o['kwargs'] if ((o['fn'] is norm) and (drange != {}) and (not (('min' in kwargs) and ('max' in kwargs)))): data = o['fn'](data, *drange) else: data = o['fn'](*args, **kwargs) return data
Evaluates the transform on the supplied dataset. Args: dataset: Dataset object to evaluate the expression on flat: Whether to flatten the returned array expanded: Whether to use the expanded expand values ranges: Dictionary for ranges for normalization all_values: Whether to evaluate on all values Whether to evaluate on all available values, for some element types, such as Graphs, this may include values not included in the referenced column Returns: values: NumPy array computed by evaluating the expression
codesearchnet
def extend(self, trajectory): if self.time_step != trajectory.time_step: raise ValueError('Trajectory not extended: Time steps of trajectories is incompatible') if len(self.species) != len(trajectory.species) and self.species != trajectory.species: raise ValueError('Trajectory not extended: species in trajectory do not match') self.to_positions() trajectory.to_positions() self.frac_coords = np.concatenate((self.frac_coords, trajectory.frac_coords), axis=0) self.lattice, self.constant_lattice = self._combine_attribute(self.lattice, trajectory.lattice, self.frac_coords.shape[0], trajectory.frac_coords.shape[0]) self.site_properties = self._combine_attribute(self.site_properties, trajectory.site_properties, self.frac_coords.shape[0], trajectory.frac_coords.shape[0])
Concatenate another trajectory Args: trajectory (Trajectory): Trajectory to add
juraj-google-style
def create_order(self, debtor, is_vat_included=True, due_date=None, heading='', text_line1='', text_line2='', debtor_data=None, delivery_data=None, products=None, project=None, other_reference='', model=models.Order, **extra): debtor_data = (debtor_data or {}) delivery_data = (delivery_data or {}) delivery_date = delivery_data.get('date', datetime.datetime.now()) our_reference = extra.get('our_reference', debtor.our_reference) currency = extra.get('currency', debtor.currency) layout = extra.get('layout', debtor.layout) term_of_payment = extra.get('term_of_payment', debtor.term_of_payment) date = extra.get('date', datetime.datetime.now()) order_input = {'debtor': debtor, 'number': extra.get('number', 1), 'project': project} for dd in ['name', 'address', 'postal_code', 'city', 'country', 'ean']: order_input[('debtor_%s' % dd)] = debtor_data.get(dd, getattr(debtor, dd)) for dd in ['address', 'postal_code', 'city', 'country']: order_input[('delivery_%s' % dd)] = delivery_data.get(dd, getattr(debtor, dd)) order_input.update({'delivery_date': (delivery_date or datetime.datetime.now()), 'heading': heading, 'text_line1': text_line1, 'text_line2': text_line2, 'is_archived': extra.get('is_archived', 0), 'is_sent': extra.get('is_sent', 0), 'net_amount': extra.get('net_amount', 0), 'vat_amount': extra.get('vat_amount', 0), 'gross_amount': extra.get('gross_amount', 0), 'margin': extra.get('margin', 0), 'margin_as_percent': extra.get('margin_as_percent', 0), 'date': date, 'our_reference': our_reference, 'other_reference': other_reference, 'currency': currency, 'exchange_rate': extra.get('exchange_rate', 1.0), 'is_vat_included': is_vat_included, 'layout': layout, 'due_date': (due_date or datetime.datetime.now()), 'term_of_payment': term_of_payment}) order_input.update(extra) order = self.create(model, **order_input) if products: for product in products: self.create_orderline(order, product) return order
Create a new Order. Args: debtor (Debtor): the debtor of the order debtor_data (mapping): map of debtor data {'postal_code: .., 'city': .., 'ean': ..} defaults to values on debitor instance for missing values delivery_data (mapping): map of delivery data {'address': ..., 'postal_code': ...} defaults to values on debitor instance for missing values due_date (datetime): due date heading (string): heading to be displayed in the order pdf text_line1 (string): first order description line text_line2 (string): second order description line other_reference (string): custom string to be used for identification extra (mapping): mapping of extra values to be passed in to the server call Returns: Order instance
codesearchnet
def _IsIdentifier(cls, string): return ( string and not string[0].isdigit() and all(character.isalnum() or character == '_' for character in string))
Checks if a string contains an identifier. Args: string (str): string to check. Returns: bool: True if the string contains an identifier, False otherwise.
juraj-google-style
def _load_yaml_(file_name): if not os.path.exists(file_name): return dict() with open(file_name, 'r', encoding='utf-8') as fp: return YAML().load(stream=fp)
Load assets infomation from file Args: file_name: file name Returns: dict
juraj-google-style
def window_reverse(windows, window_size, height, width): num_channels = windows.shape[-1] windows = windows.view(-1, height windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows
Merges windows to produce higher resolution features. Args: windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`): Input windows window_size (`int`): Window size height (`int`): Height of the resized audio width (`int`): Width of the resized audio
github-repos
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if kwargs.get('token', None) is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') kwargs['token'] = use_auth_token if os.path.isfile(save_directory): raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file') os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) if self._auto_class is not None: custom_object_save(self, save_directory, config=self) output_image_processor_file = os.path.join(save_directory, IMAGE_PROCESSOR_NAME) self.to_json_file(output_image_processor_file) logger.info(f'Image processor saved in {output_image_processor_file}') if push_to_hub: self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token')) return [output_image_processor_file]
Save an image processor object to the directory `save_directory`, so that it can be re-loaded using the [`~image_processing_utils.ImageProcessingMixin.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the image processor JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
github-repos
def format_script(sensor_graph): records = [] records.append(SetGraphOnlineRecord(False, address=8)) records.append(ClearDataRecord(address=8)) records.append(ResetGraphRecord(address=8)) for node in sensor_graph.nodes: records.append(AddNodeRecord(str(node), address=8)) for streamer in sensor_graph.streamers: records.append(AddStreamerRecord(streamer, address=8)) for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()): records.append(SetConstantRecord(stream, value, address=8)) records.append(PersistGraphRecord(address=8)) records.append(ClearConfigVariablesRecord()) for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()): for config_id in sorted(sensor_graph.config_database[slot]): config_type, value = sensor_graph.config_database[slot][config_id] byte_value = _convert_to_bytes(config_type, value) records.append(SetConfigRecord(slot, config_id, byte_value)) app_tag = sensor_graph.metadata_database.get('app_tag') app_version = sensor_graph.metadata_database.get('app_version') if app_tag is not None: records.append(SetDeviceTagRecord(app_tag=app_tag, app_version=app_version)) script = UpdateScript(records) return script.encode()
Create a binary script containing this sensor graph. This function produces a repeatable script by applying a known sorting order to all constants and config variables when iterating over those dictionaries. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: bytearray: The binary script data.
juraj-google-style
def CheckDataVisiblity(self, value): if not self.data_visibility_policy: return None visible, reason = self.data_visibility_policy.IsDataVisible( DetermineType(value)) if visible: return None return { 'status': { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': reason } } }
Returns a status object if the given name is not visible. Args: value: The value to check. The actual value here is not important but the value's metadata (e.g. package and type) will be checked. Returns: None if the value is visible. A variable structure with an error status if the value should not be visible.
juraj-google-style
def remove_service(self, service): url = self._url('/services/{0}', service) resp = self._delete(url) self._raise_for_status(resp) return True
Stop and remove a service. Args: service (str): Service name or ID Returns: ``True`` if successful. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def tables_get(self, table_name): url = Api._ENDPOINT + (Api._TABLES_PATH % table_name) return datalab.utils.Http.request(url, credentials=self._credentials)
Issues a request to retrieve information about a table. Args: table_name: a tuple representing the full name of the table. Returns: A parsed result object. Raises: Exception if there is an error performing the operation.
juraj-google-style
def _execute_command(self, key, *args): client = self.redis_clients[key.redis_shard_hash() % len( self.redis_clients)] return client.execute_command(*args)
Execute a Redis command on the appropriate Redis shard based on key. Args: key: The object ID or the task ID that the query is about. args: The command to run. Returns: The value returned by the Redis command.
juraj-google-style
def sanity_check_states(states_spec): states = copy.deepcopy(states_spec) is_unique = ('shape' in states) if is_unique: states = dict(state=states) for name, state in states.items(): if isinstance(state['shape'], int): state['shape'] = (state['shape'],) if 'type' not in state: state['type'] = 'float' return states, is_unique
Sanity checks a states dict, used to define the state space for an MDP. Throws an error or warns if mismatches are found. Args: states_spec (Union[None,dict]): The spec-dict to check (or None). Returns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space.
juraj-google-style
def inside_function() -> bool: return get_default_graph().building_function
Indicates whether the caller code is executing inside a `tf.function`. Returns: Boolean, True if the caller code is executing inside a `tf.function` rather than eagerly. Example: >>> tf.inside_function() False >>> @tf.function ... def f(): ... print(tf.inside_function()) >>> f() True
github-repos
def key_exists(self, namespace, key): return ((namespace in self.__data) and (key in self.__data[namespace]))
Checks a namespace for the existence of a specific key Args: namespace (str): Namespace to check in key (str): Name of the key to check for Returns: `True` if key exists in the namespace, else `False`
codesearchnet
def __init__(self, reactants, products): self._input_reactants = reactants self._input_products = products self._all_comp = reactants + products els = set() for c in self.all_comp: els.update(c.elements) els = sorted(els) rp_mat = np.array([[c[el] for el in els] for c in self._all_comp]) f_mat = np.concatenate([np.zeros((len(rp_mat), 1)), rp_mat], axis=1) f_mat[len(reactants), 0] = 1 b = np.zeros(len(els) + 1) b[0] = 1 coeffs, res, _, s = np.linalg.lstsq(f_mat.T, b, rcond=None) if sum(np.abs(s) > 1e-12) == len(f_mat): if res.size > 0 and res[0] > self.TOLERANCE ** 2: raise ReactionError("Reaction cannot be balanced.") else: ok = True else: ok = False n_constr = len(rp_mat) - np.linalg.matrix_rank(rp_mat) f_mat = np.concatenate([np.zeros((len(rp_mat), n_constr)), rp_mat], axis=1) b = np.zeros(f_mat.shape[1]) b[:n_constr] = 1 for inds in itertools.combinations(range(len(reactants), len(f_mat)), n_constr): f_mat[:, :n_constr] = 0 for j, i in enumerate(inds): f_mat[i, j] = 1 coeffs, res, _, s = np.linalg.lstsq(f_mat.T, b, rcond=None) if sum(np.abs(s) > 1e-12) == len(self._all_comp) and \ (res.size == 0 or res[0] < self.TOLERANCE ** 2): ok = True break if not ok: r_mat = np.array([[c[el] for el in els] for c in reactants]) reactants_underdetermined = ( np.linalg.lstsq(r_mat.T, np.zeros(len(els)), rcond=None)[2] != len(reactants)) if reactants_underdetermined: raise ReactionError("Reaction cannot be balanced. " "Reactants are underdetermined.") raise ReactionError("Reaction cannot be balanced. " "Unknown error, please report.") self._els = els self._coeffs = coeffs
Reactants and products to be specified as list of pymatgen.core.structure.Composition. e.g., [comp1, comp2] Args: reactants ([Composition]): List of reactants. products ([Composition]): List of products.
juraj-google-style
def validate(self): if (self.value is not None): if (not isinstance(self.value, six.integer_types)): raise TypeError('expected (one of): {0}, observed: {1}'.format(six.integer_types, type(self.value))) elif (self.value > LongInteger.MAX): raise ValueError('long integer value greater than accepted max') elif (self.value < LongInteger.MIN): raise ValueError('long integer value less than accepted min')
Verify that the value of the LongInteger is valid. Raises: TypeError: if the value is not of type int or long ValueError: if the value cannot be represented by a signed 64-bit integer
codesearchnet
def run_coroutine(self, cor, *args, **kwargs): if self.stopping: raise LoopStoppingError(('Could not launch coroutine because loop is shutting down: %s' % cor)) self.start() cor = _instaniate_coroutine(cor, args, kwargs) if self.inside_loop(): raise InternalError('BackgroundEventLoop.run_coroutine called from inside event loop, would have deadlocked.') future = self.launch_coroutine(cor) return future.result()
Run a coroutine to completion and return its result. This method may only be called outside of the event loop. Attempting to call it from inside the event loop would deadlock and will raise InternalError instead. Args: cor (coroutine): The coroutine that we wish to run in the background and wait until it finishes. Returns: object: Whatever the coroutine cor returns.
codesearchnet
def _check_for_definition(iface, cls, tag, defines): attributes = ( attr for attr in iface.__abstractmethods__ if hasattr(getattr(iface, attr), tag) ) for attribute in attributes: for node in cls.__mro__: if hasattr(node, attribute) and defines(getattr(node, attribute)): return True try: attribute return False except NameError: return True
Check for a valid definition of a value. Args: iface (Iface): An Iface specification. cls (type): Some type to check for a definition. tag (str): The name of the tag attribute used to mark the abstract methods. defines (callable): A callable that accepts an attribute and returns True if the attribute is a valid definition. Returns: bool: Whether or not the definition is found.
juraj-google-style
def setup(self, *args): self.setup_formatters(*args) if self.columns: self.print_header() elif self.border and not self.csv: self.print_line(self.make_horizontal_border())
Do preparations before printing the first row Args: *args: first row cells
juraj-google-style
def _AssignTimestamps(pcoll, timestamp: Union[str, dict[str, str]], language: Optional[str]=None): timestamp_fn = _as_callable_for_pcoll(pcoll, timestamp, 'timestamp', language) T = TypeVar('T') return pcoll | beam.Map(lambda x: TimestampedValue(x, timestamp_fn(x))).with_input_types(T).with_output_types(T)
Assigns a new timestamp each element of its input. This can be useful when reading records that have the timestamp embedded in them, for example with various file types or other sources that by default set all timestamps to the infinite past. Note that the timestamp should only be set forward, as setting it backwards may not cause it to hold back an already advanced watermark and the data could become droppably late. Args: timestamp: A field, callable, or expression giving the new timestamp. language: The language of the timestamp expression. error_handling: Whether and how to handle errors during timestamp evaluation.
github-repos
def get_category(self, column): result = pd.Series(index=column.index) for (category, stats) in self.probability_map.items(): (start, end) = stats[0] result[((start < column) & (column < end))] = category return result
Returns categories for the specified numeric values Args: column(pandas.Series): Values to transform into categories Returns: pandas.Series
codesearchnet
def GetRpcServer(options): rpc_server_class = HttpRpcServer def GetUserCredentials(): 'Prompts the user for a username and password.' global global_status st = global_status global_status = None email = options.email if (email is None): email = GetEmail(('Email (login for uploading to %s)' % options.server)) password = getpass.getpass(('Password for %s: ' % email)) global_status = st return (email, password) host = (options.host or options.server).lower() if ((host == 'localhost') or host.startswith('localhost:')): email = options.email if (email is None): email = 'test@example.com' logging.info(('Using debug user %s. Override with --email' % email)) server = rpc_server_class(options.server, (lambda : (email, 'password')), host_override=options.host, extra_headers={'Cookie': ('dev_appserver_login="%s:False"' % email)}, save_cookies=options.save_cookies) server.authenticated = True return server return rpc_server_class(options.server, GetUserCredentials, host_override=options.host, save_cookies=options.save_cookies)
Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made.
codesearchnet
def reduce_mat(mat, mag, r_matrix): max_j = abs(int(round(np.linalg.det(mat) / mag))) reduced = False for h in range(3): k = h + 1 if h + 1 < 3 else abs(2 - h) l = h + 2 if h + 2 < 3 else abs(1 - h) j = np.arange(-max_j, max_j + 1) for j1, j2 in itertools.product(j, repeat=2): temp = mat[h] + j1 * mat[k] + j2 * mat[l] if all([np.round(x, 5).is_integer() for x in list(temp / mag)]): mat_copy = mat.copy() mat_copy[h] = np.array([int(round(ele / mag)) for ele in temp]) new_mat = np.dot(mat_copy, np.linalg.inv(r_matrix.T)) if all([np.round(x, 5).is_integer() for x in list(np.ravel(new_mat))]): reduced = True mat[h] = np.array([int(round(ele / mag)) for ele in temp]) break if reduced: break if not reduced: warnings.warn("Matrix reduction not performed, may lead to non-primitive gb cell.") return mat
Reduce integer array mat's determinant mag times by linear combination of its row vectors, so that the new array after rotation (r_matrix) is still an integer array Args: mat (3 by 3 array): input matrix mag (integer): reduce times for the determinant r_matrix (3 by 3 array): rotation matrix Return: the reduced integer array
juraj-google-style
def get(self, catID, includeRelationships=False): url = '%(base_url)s/record/%(catID)s' % { 'base_url': self.base_url, 'catID': catID } r = self.gbdx_connection.get(url) r.raise_for_status() return r.json()
Retrieves the strip footprint WKT string given a cat ID. Args: catID (str): The source catalog ID from the platform catalog. includeRelationships (bool): whether to include graph links to related objects. Default False. Returns: record (dict): A dict object identical to the json representation of the catalog record
juraj-google-style
def enable_logging(log_level): root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) logfile_handler = logging.StreamHandler(_LOGFILE_STREAM) logfile_handler.setLevel(logging.DEBUG) logfile_handler.setFormatter(logging.Formatter('%(levelname)s [%(asctime)s][%(name)s] %(message)s')) root_logger.addHandler(logfile_handler) if (signal.getsignal(signal.SIGTERM) == signal.SIG_DFL): signal.signal(signal.SIGTERM, _logfile_sigterm_handler) if log_level: handler = logging.StreamHandler() handler.setFormatter(_LogColorFormatter()) root_logger.setLevel(log_level) root_logger.addHandler(handler)
Configure the root logger and a logfile handler. Args: log_level: The logging level to set the logger handler.
codesearchnet
def ParseRecord(self, parser_mediator, key, structure): if key not in self._SUPPORTED_KEYS: raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) self._ParseLogLine(parser_mediator, key, structure)
Parses a log record structure and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the parsed structure. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Raises: ParseError: when the structure type is unknown.
juraj-google-style
def section(self, regex, config='running_config'): if config in ['running_config', 'startup_config']: config = getattr(self, config) match = re.search(regex, config, re.M) if not match: raise TypeError('config section not found') block_start, line_end = match.regs[0] match = re.search(r'^[^\s]', config[line_end:], re.M) if not match: raise TypeError('could not find end block') _, block_end = match.regs[0] block_end = line_end + block_end return config[block_start:block_end]
Returns a section of the config Args: regex (str): A valid regular expression used to select sections of configuration to return config (str): The configuration to return. Valid values for config are "running_config" or "startup_config". The default value is "running_config" Returns: The configuration section as a string object.
juraj-google-style
def get_string(self, sort_keys=False, pretty=False): keys = self.keys() if sort_keys: keys = sorted(keys) lines = [] for k in keys: if k == "MAGMOM" and isinstance(self[k], list): value = [] if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \ (self.get("LSORBIT") or self.get("LNONCOLLINEAR")): value.append(" ".join(str(i) for j in self[k] for i in j)) elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"): for m, g in itertools.groupby(self[k]): value.append("3*{}*{}".format(len(tuple(g)), m)) else: for m, g in itertools.groupby(self[k], lambda x: float(x)): value.append("{}*{}".format(len(tuple(g)), m)) lines.append([k, " ".join(value)]) elif isinstance(self[k], list): lines.append([k, " ".join([str(i) for i in self[k]])]) else: lines.append([k, self[k]]) if pretty: return str(tabulate([[l[0], "=", l[1]] for l in lines], tablefmt="plain")) else: return str_delimited(lines, None, " = ") + "\n"
Returns a string representation of the INCAR. The reason why this method is different from the __str__ method is to provide options for pretty printing. Args: sort_keys (bool): Set to True to sort the INCAR parameters alphabetically. Defaults to False. pretty (bool): Set to True for pretty aligned output. Defaults to False.
juraj-google-style
async def setup_swiss_points(self, match_win: float = None, match_tie: float = None, game_win: float = None, game_tie: float = None, bye: float = None): params = {} if match_win is not None: params['pts_for_match_win'] = match_win if match_win is not None: params['pts_for_match_tie'] = match_tie if match_win is not None: params['pts_for_game_win'] = game_win if match_win is not None: params['pts_for_game_tie'] = game_tie if match_win is not None: params['pts_for_bye'] = bye assert_or_raise(len(params) > 0, ValueError, 'At least one of the points must be given') await self.update(**params)
|methcoro| Args: match_win match_tie game_win game_tie bye Raises: APIException
juraj-google-style
def __init__(self, custom_op_registerers=None, **kwargs): self._custom_op_registerers = custom_op_registerers or [] super(InterpreterWithCustomOps, self).__init__(**kwargs)
Constructor. Args: custom_op_registerers: List of str (symbol names) or functions that take a pointer to a MutableOpResolver and register a custom op. When passing functions, use a pybind function that takes a uintptr_t that can be recast as a pointer to a MutableOpResolver. **kwargs: Additional arguments passed to Interpreter. Raises: ValueError: If the interpreter was unable to create.
github-repos
def signature(self, name, file_name, file_type, file_text, **kwargs): group_obj = Signature(name, file_name, file_type, file_text, **kwargs) return self._group(group_obj)
Add Signature data to Batch object. Valid file_types: + Snort ® + Suricata + YARA + ClamAV ® + OpenIOC + CybOX ™ + Bro + Regex + SPL - Splunk ® Search Processing Language Args: name (str): The name for this Group. file_name (str): The name for the attached signature for this Group. file_type (str): The signature type for this Group. file_text (str): The signature content for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Signature.
codesearchnet
def get_dofn_specs(dofn: 'DoFn') -> tuple[set[StateSpec], set[TimerSpec]]: from apache_beam.runners.common import MethodWrapper from apache_beam.transforms.core import _DoFnParam from apache_beam.transforms.core import _StateDoFnParam from apache_beam.transforms.core import _TimerDoFnParam all_state_specs = set() all_timer_specs = set() for method_name in dir(dofn): if not isinstance(getattr(dofn, method_name, None), types.MethodType): continue method = MethodWrapper(dofn, method_name) param_ids = [d.param_id for d in method.defaults if isinstance(d, _DoFnParam)] if len(param_ids) != len(set(param_ids)): raise ValueError('DoFn %r has duplicate %s method parameters: %s.' % (dofn, method_name, param_ids)) for d in method.defaults: if isinstance(d, _StateDoFnParam): all_state_specs.add(d.state_spec) elif isinstance(d, _TimerDoFnParam): all_timer_specs.add(d.timer_spec) return (all_state_specs, all_timer_specs)
Gets the state and timer specs for a DoFn, if any. Args: dofn (apache_beam.transforms.core.DoFn): The DoFn instance to introspect for timer and state specs.
github-repos
def update(self, *names: str) -> 'ListTree': for name in names: parts = name.split(self._delimiter) self._root.add(*parts) return self
Add all the mailbox names to the tree, filling in any missing nodes. Args: names: The names of the mailboxes.
juraj-google-style