code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def parse_keys(self, sn: "DataNode") -> Dict[InstanceName, ScalarValue]: res = {} for k in self.keys: knod = sn.get_data_child(*k) if knod is None: raise NonexistentSchemaNode(sn.qual_name, *k) kval = knod.type.parse_value(self.keys[k]) if kval is None: raise InvalidKeyValue(self.keys[k]) res[knod.iname()] = kval return res
Parse key dictionary in the context of a schema node. Args: sn: Schema node corresponding to a list.
juraj-google-style
def render_pipeline_graph(self, pipeline_graph: 'PipelineGraph') -> str: raise NotImplementedError
Renders the pipeline graph in HTML-compatible format. Args: pipeline_graph: (pipeline_graph.PipelineGraph) the graph to be rendererd. Returns: unicode, str or bytes that can be expressed as HTML.
github-repos
def commutes( m1: np.ndarray, m2: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (m1.shape[0] == m1.shape[1] and m1.shape == m2.shape and np.allclose(m1.dot(m2), m2.dot(m1), rtol=rtol, atol=atol))
Determines if two matrices approximately commute. Two matrices A and B commute if they are square and have the same size and AB = BA. Args: m1: One of the matrices. m2: The other matrix. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the two matrices have compatible sizes and a commutator equal to zero within tolerance.
juraj-google-style
def decode_jpeg(image_buffer, scope=None): with tf.name_scope(values=[image_buffer], name=scope, default_name='decode_jpeg'): image = tf.image.decode_jpeg(image_buffer, channels=3) image = tf.image.convert_image_dtype(image, dtype=tf.float32) return image
Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for name_scope. Returns: 3-D float Tensor with values ranging from [0, 1).
codesearchnet
def score_one(self, x: beam.Row) -> Optional[float]: raise NotImplementedError
Scores a single data instance for anomalies. Args: x: A `beam.Row` representing the data instance. Returns: The outlier score as a float. None if an exception occurs during scoring, and NaN if the model is not ready.
github-repos
def reverse_bettertransformer(self): if not is_optimum_available(): raise ImportError('The package `optimum` is required to use Better Transformer.') from optimum.version import __version__ as optimum_version if version.parse(optimum_version) < version.parse('1.7.0'): raise ImportError(f'Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found.') from optimum.bettertransformer import BetterTransformer return BetterTransformer.reverse(self)
Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is used, for example in order to save the model. Returns: [`PreTrainedModel`]: The model converted back to the original modeling.
github-repos
def not_evaluator(conditions, leaf_evaluator): if (not (len(conditions) > 0)): return None result = evaluate(conditions[0], leaf_evaluator) return (None if (result is None) else (not result))
Evaluates a list of conditions as if the evaluator had been applied to a single entry and NOT was applied to the result. Args: conditions: List of conditions ex: [operand_1, operand_2]. leaf_evaluator: Function which will be called to evaluate leaf condition values. Returns: Boolean: - True if the operand evaluates to False. - False if the operand evaluates to True. None: if conditions is empty or condition couldn't be evaluated.
codesearchnet
def context(name=None): def _context(cls): annotated(cls, name) cls.context = True return cls return _context
Declare that a class defines a context. Contexts are for use with HierarchicalShell for discovering and using functionality from the command line. Args: name (str): Optional name for this context if you don't want to just use the class name.
juraj-google-style
def get_task(config): path = os.path.join(config['work_dir'], 'task.json') message = "Can't read task from {}!\n%(exc)s".format(path) contents = load_json_or_yaml(path, is_path=True, message=message) return contents
Read the task.json from work_dir. Args: config (dict): the running config, to find work_dir. Returns: dict: the contents of task.json Raises: ScriptWorkerTaskException: on error.
codesearchnet
def _ReadStructureFamilyDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False): if is_member: error_message = 'data type not supported as member' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadLayoutDataTypeDefinition(definitions_registry, definition_values, data_types.StructureFamilyDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_STRUCTURE_FAMILY) runtime = definition_values.get('runtime', None) if (not runtime): error_message = 'missing runtime' raise errors.DefinitionReaderError(definition_name, error_message) runtime_data_type_definition = definitions_registry.GetDefinitionByName(runtime) if (not runtime_data_type_definition): error_message = 'undefined runtime: {0:s}.'.format(runtime) raise errors.DefinitionReaderError(definition_name, error_message) if runtime_data_type_definition.family_definition: error_message = 'runtime: {0:s} already part of a family.'.format(runtime) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.AddRuntimeDefinition(runtime_data_type_definition) members = definition_values.get('members', None) if (not members): error_message = 'missing members' raise errors.DefinitionReaderError(definition_name, error_message) for member in members: member_data_type_definition = definitions_registry.GetDefinitionByName(member) if (not member_data_type_definition): error_message = 'undefined member: {0:s}.'.format(member) raise errors.DefinitionReaderError(definition_name, error_message) if member_data_type_definition.family_definition: error_message = 'member: {0:s} already part of a family.'.format(member) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.AddMemberDefinition(member_data_type_definition) return definition_object
Reads a structure family data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StructureDefinition: structure data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
codesearchnet
def predict_proba(self, a, b, device=None): device = SETTINGS.get_default(device=device) if self.model is None: print('Model has to be trained before doing any predictions') raise ValueError if len(np.array(a).shape) == 1: a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) m = np.hstack((a, b)) m = scale(m) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) if th.cuda.is_available(): m = m.cuda() return (self.model(m).data.cpu().numpy()-.5) * 2
Infer causal directions using the trained NCC pairwise model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 device (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``) Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): mft_metadata_file = pyfsntfs.mft_metadata_file() try: mft_metadata_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) for entry_index in range(0, mft_metadata_file.number_of_file_entries): try: mft_entry = mft_metadata_file.get_file_entry(entry_index) self._ParseMFTEntry(parser_mediator, mft_entry) except IOError as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse MFT entry: {0:d} with error: {1!s}').format( entry_index, exception)) mft_metadata_file.close()
Parses a NTFS $MFT metadata file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
juraj-google-style
def __user_location(__pkg: str, type_) -> str: if (ALLOW_DARWIN and (sys.platform == 'darwin')): user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0]) else: user_dir = getenv('XDG_{}_HOME'.format(type_.upper()), path.sep.join([getenv('HOME', ''), __LOCATIONS[type_][1]])) return path.expanduser(path.sep.join([user_dir, __pkg]))
Utility function to look up XDG basedir locations Args: __pkg: Package name __type: Location type
codesearchnet
def __init__(self, paths, ignore_list, path_segment_separator='/'): super(_PathFilterTable, self).__init__() self._path_segment_separator = path_segment_separator self.path_segments_per_index = {} self.paths = list(paths) for path in self.paths: self._AddPathSegments(path, ignore_list)
Initializes and builds the path filter table from a list of paths. Args: paths: a list of strings containing the paths. ignore_list: a list of path segment indexes to ignore, where 0 is the index of the first path segment relative from the root. path_segment_separator: optional string containing the path segment separator.
juraj-google-style
def Columns(iterable): columns = sorted(iterable) return "({})".format(", ".join("`{}`".format(col) for col in columns))
Returns a string of column names for MySQL INSERTs. To account for Iterables with undefined order (dicts before Python 3.6), this function sorts column names. Examples: >>> Columns({"password": "foo", "name": "bar"}) u'(`name`, `password`)' Args: iterable: The iterable of strings to be used as column names. Returns: A string containing a tuple of sorted comma-separated column names.
juraj-google-style
def ready_size(self, name=None): if name is None: name = '%s_BarrierReadySize' % self._name return gen_data_flow_ops.barrier_ready_size(self._barrier_ref, name=name)
Compute the number of complete elements in the given barrier. Args: name: A name for the operation (optional). Returns: A single-element tensor containing the number of complete elements in the given barrier.
github-repos
def send_password_reset_link(self, username): response = self._post(self.rest_url + "/user/mail/password", params={"username": username}) if response.ok: return True return False
Sends the user a password reset link (by email) Args: username: The account username. Returns: True: Succeeded False: If unsuccessful
juraj-google-style
def tf_preprocess(self, states, actions, reward): for name in sorted(self.states_preprocessing): states[name] = self.states_preprocessing[name].process(tensor=states[name]) if self.reward_preprocessing is not None: reward = self.reward_preprocessing.process(tensor=reward) return states, actions, reward
Applies preprocessing ops to the raw states/action/reward inputs. Args: states (dict): Dict of raw state tensors. actions (dict): Dict or raw action tensors. reward: 1D (float) raw rewards tensor. Returns: The preprocessed versions of the input tensors.
juraj-google-style
def runCmd(cls, cmd): cit.echo(cmd, "command") result = os.system(cmd) cls.checkResult(result)
run command and show if success or failed Args: cmd: string Returns: bool: if this command run successfully
juraj-google-style
def count(self, event_str, inc_int=1): self._event_dict.setdefault(event_str, 0) self._event_dict[event_str] += inc_int
Count an event. Args: event_str: The name of an event to count. Used as a key in the event dict. The same name will also be used in the summary. inc_int: int Optional argument to increase the count for the event by more than 1.
juraj-google-style
def import_file(source, use_32bit_registry=False): cache_path = __salt__['cp.cache_file'](source) if (not cache_path): error_msg = "File/URL '{0}' probably invalid.".format(source) raise ValueError(error_msg) if use_32bit_registry: word_sz_txt = '32' else: word_sz_txt = '64' cmd = 'reg import "{0}" /reg:{1}'.format(cache_path, word_sz_txt) cmd_ret_dict = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = cmd_ret_dict['retcode'] if (retcode != 0): raise CommandExecutionError('reg.exe import failed', info=cmd_ret_dict) return True
Import registry settings from a Windows ``REG`` file by invoking ``REG.EXE``. .. versionadded:: 2018.3.0 Args: source (str): The full path of the ``REG`` file. This can be either a local file path or a URL type supported by salt (e.g. ``salt://salt_master_path``) use_32bit_registry (bool): If the value of this parameter is ``True`` then the ``REG`` file will be imported into the Windows 32 bit registry. Otherwise the Windows 64 bit registry will be used. Returns: bool: True if successful, otherwise an error is raised Raises: ValueError: If the value of ``source`` is an invalid path or otherwise causes ``cp.cache_file`` to return ``False`` CommandExecutionError: If ``reg.exe`` exits with a non-0 exit code CLI Example: .. code-block:: bash salt machine1 reg.import_file salt://win/printer_config/110_Canon/postinstall_config.reg
codesearchnet
def longest_one_seg_prefix(self, word): for i in range(self.longest_seg, 0, (- 1)): if (word[:i] in self.seg_dict): return word[:i] return ''
Return longest Unicode IPA prefix of a word Args: word (unicode): input word as Unicode IPA string Returns: unicode: longest single-segment prefix of `word` in database
codesearchnet
def map_part_function(fn: PartFn, match_fn: MatchFn | None=None) -> StreamFn: match_fn = match_fn or (lambda _: True) return functools.partial(_apply_part_function, (fn, match_fn))
Converts a part function to a function taking a stream of parts. Adds a context if missing to ensure error propagation. Args: fn: a function that can be applied on a single part. match_fn: a function that returns True if the part should be processed by the part function. When the part should not be processed, the part processor will not be called and the part will be passed as is. Returns: A function that is applied concurrently across the parts of the input stream.
github-repos
def is_match(self, subject: Union[Expression, FlatTerm]) -> bool: try: next(self.match(subject)) except StopIteration: return False return True
Check if the given subject matches any pattern in the net. Args: subject: The subject that is matched. Must be constant. Returns: True, if any pattern matches the subject.
juraj-google-style
def logsumexp(x, axis=None, keepdims=None): return tf_np.asarray(math_ops.reduce_logsumexp(input_tensor=x, axis=axis, keepdims=keepdims))
Computes log(sum(exp(elements across dimensions of a tensor))). Reduces `x` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. Args: x: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(x), rank(x))`. keepdims: If true, retains reduced dimensions with length 1. Returns: The reduced tensor.
github-repos
def extractSchedule(self, schedule, period): ret = namedtuple("ret", ["Hour", "Min", "Tariff", "Period", "Schedule"]) work_table = self.m_schd_1_to_4 if Schedules.Schedule_5 <= schedule <= Schedules.Schedule_6: work_table = self.m_schd_5_to_6 period += 1 schedule += 1 ret.Period = str(period) ret.Schedule = str(schedule) if (schedule < 1) or (schedule > Extents.Schedules) or (period < 0) or (period > Extents.Periods): ekm_log("Out of bounds: tariff " + str(period) + " for schedule " + str(schedule)) ret.Hour = ret.Min = ret.Tariff = str(0) return ret idxhr = "Schedule_" + str(schedule) + "_Period_" + str(period) + "_Hour" idxmin = "Schedule_" + str(schedule) + "_Period_" + str(period) + "_Min" idxrate = "Schedule_" + str(schedule) + "_Period_" + str(period) + "_Tariff" if idxhr not in work_table: ekm_log("Incorrect index: " + idxhr) ret.Hour = ret.Min = ret.Tariff = str(0) return ret if idxmin not in work_table: ekm_log("Incorrect index: " + idxmin) ret.Hour = ret.Min = ret.Tariff = str(0) return ret if idxrate not in work_table: ekm_log("Incorrect index: " + idxrate) ret.Hour = ret.Min = ret.Tariff = str(0) return ret ret.Hour = work_table[idxhr][MeterData.StringValue] ret.Min = work_table[idxmin][MeterData.StringValue].zfill(2) ret.Tariff = work_table[idxrate][MeterData.StringValue] return ret
Read a single schedule tariff from meter object buffer. Args: schedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extent.Schedules). tariff (int): A :class:`~ekmmeters.Tariffs` value or in range(Extent.Tariffs). Returns: bool: True on completion.
juraj-google-style
def build(self, client, nobuild=False, usecache=True, pull=False): if not nobuild: self.update_source_images(client, usecache=usecache, pull=pull) width = utils.get_console_width() cprint('\n' + '='*width, color='white', attrs=['bold']) line = 'STARTING BUILD for "%s" (image definition "%s" from %s)\n' % ( self.targetname, self.imagename, self.steps[-1].sourcefile) cprint(_centered(line, width), color='blue', attrs=['bold']) for istep, step in enumerate(self.steps): print(colored('* Step','blue'), colored('%d/%d' % (istep+1, len(self.steps)), 'blue', attrs=['bold']), colored('for image', color='blue'), colored(self.imagename, color='blue', attrs=['bold'])) if not nobuild: if step.bust_cache: stackkey = self._get_stack_key(istep) if stackkey in _rebuilt: step.bust_cache = False step.build(client, usecache=usecache) print(colored("* Created intermediate image", 'green'), colored(step.buildname, 'green', attrs=['bold']), end='\n\n') if step.bust_cache: _rebuilt.add(stackkey) finalimage = step.buildname if not nobuild: self.finalizenames(client, finalimage) line = 'FINISHED BUILDING "%s" (image definition "%s" from %s)'%( self.targetname, self.imagename, self.steps[-1].sourcefile) cprint(_centered(line, width), color='green', attrs=['bold']) cprint('=' * width, color='white', attrs=['bold'], end='\n\n')
Drives the build of the final image - get the list of steps and execute them. Args: client (docker.Client): docker client object that will build the image nobuild (bool): just create dockerfiles, don't actually build the image usecache (bool): use docker cache, or rebuild everything from scratch? pull (bool): try to pull new versions of repository images?
juraj-google-style
def Webhook(self, request, global_params=None): config = self.GetMethodConfig('Webhook') return self._RunMethod(config, request, global_params=global_params)
ReceiveTriggerWebhook [Experimental] is called when the API receives a webhook request targeted at a specific trigger. Args: request: (CloudbuildProjectsLocationsTriggersWebhookRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ReceiveTriggerWebhookResponse) The response message.
github-repos
def sample(self, n): row_total_count = 0 row_counts = [] for file in self.files: with _util.open_local_or_gcs(file, 'r') as f: num_lines = sum(1 for line in f) row_total_count += num_lines row_counts.append(num_lines) names = None dtype = None if self._schema: _MAPPINGS = { 'FLOAT': np.float64, 'INTEGER': np.int64, 'TIMESTAMP': np.datetime64, 'BOOLEAN': np.bool, } names = [x['name'] for x in self._schema] dtype = {x['name']: _MAPPINGS.get(x['type'], object) for x in self._schema} skip_count = row_total_count - n skip_all = sorted(random.sample(range(0, row_total_count), skip_count)) dfs = [] for file, row_count in zip(self.files, row_counts): skip = [x for x in skip_all if x < row_count] skip_all = [x - row_count for x in skip_all if x >= row_count] with _util.open_local_or_gcs(file, 'r') as f: dfs.append(pd.read_csv(f, skiprows=skip, names=names, dtype=dtype, header=None)) return pd.concat(dfs, axis=0, ignore_index=True)
Samples data into a Pandas DataFrame. Args: n: number of sampled counts. Returns: A dataframe containing sampled data. Raises: Exception if n is larger than number of rows.
juraj-google-style
def compile_state_cpfs(self, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[Noise] = None) -> List[CPFPair]: next_state_fluents = [] with self.graph.as_default(): with tf.name_scope('state_cpfs'): for cpf in self.rddl.domain.state_cpfs: cpf_noise = noise.get(cpf.name, None) if noise is not None else None name_scope = utils.identifier(cpf.name) with tf.name_scope(name_scope): t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise) next_state_fluents.append((cpf.name, t)) key = lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0]) next_state_fluents = sorted(next_state_fluents, key=key) return next_state_fluents
Compiles the next state fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
juraj-google-style
def evaluate(self, index): if self.condition_data[index][2] != self.CUSTOM_ATTRIBUTE_CONDITION_TYPE: self.logger.warning(audience_logs.UNKNOWN_CONDITION_TYPE.format(self._get_condition_json(index))) return None condition_match = self.condition_data[index][3] if condition_match is None: condition_match = ConditionMatchTypes.EXACT if condition_match not in self.EVALUATORS_BY_MATCH_TYPE: self.logger.warning(audience_logs.UNKNOWN_MATCH_TYPE.format(self._get_condition_json(index))) return None if condition_match != ConditionMatchTypes.EXISTS: attribute_key = self.condition_data[index][0] if attribute_key not in self.attributes: self.logger.debug(audience_logs.MISSING_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key)) return None if self.attributes.get(attribute_key) is None: self.logger.debug(audience_logs.NULL_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key)) return None return self.EVALUATORS_BY_MATCH_TYPE[condition_match](self, index)
Given a custom attribute audience condition and user attributes, evaluate the condition against the attributes. Args: index: Index of the condition to be evaluated. Returns: Boolean: - True if the user attributes match the given condition. - False if the user attributes don't match the given condition. None: if the user attributes and condition can't be evaluated.
juraj-google-style
def is_mergeable_with(self, timeslots: 'TimeslotCollection') -> bool: for slot in timeslots.timeslots: for interval in self._table[slot.channel]: if slot.interval.has_overlap(interval): return False return True
Return if self is mergeable with `timeslots`. Args: timeslots: TimeslotCollection to be checked
codesearchnet
def mark_backward(output_tensor, used_node_names): op = output_tensor.op if (op.name in used_node_names): return used_node_names.add(op.name) for input_tensor in op.inputs: mark_backward(input_tensor, used_node_names) for control_input_op in op.control_inputs: used_node_names.add(control_input_op.name) for input_tensor in control_input_op.inputs: mark_backward(input_tensor, used_node_names)
Function to propagate backwards in the graph and mark nodes as used. Traverses recursively through the graph from the end tensor, through the op that generates the tensor, and then to the input tensors that feed the op. Nodes encountered are stored in used_node_names. Args: output_tensor: A Tensor which we start the propagation. used_node_names: A list of strings, stores the name of nodes we've marked as visited.
codesearchnet
def auto_to_manual_spmd_partition(tensor, manual_sharding, single_dim=-1, unspecified_dims=None): return tf2xla.spmd_full_to_shard_shape(tensor, manual_sharding=manual_sharding, dim=single_dim, unspecified_dims=unspecified_dims or [])
Switches from automatic SPMD partitioning to manual partitioning. Converts a full-shaped tensor (to be automatically partitioned by SPMD partitioner) to a shard-shaped tensor to be consumed by manually partitioned ops. Args: tensor: A tf.Tensor in full shape. manual_sharding: A serialized string of OpSharding to be used in manual partitioning. single_dim: If >= 0, the conversion will happen only on this dim in subgroups. unspecified_dims: An optional list of dimensions unspecified. Returns: A shard-shaped tensor to be consumed by manually partitioned ops.
github-repos
def get_equivalent_kpoints(self, index): if self.kpoints[index].label is None: return [index] list_index_kpoints = [] for i in range(len(self.kpoints)): if self.kpoints[i].label == self.kpoints[index].label: list_index_kpoints.append(i) return list_index_kpoints
Returns the list of kpoint indices equivalent (meaning they are the same frac coords) to the given one. Args: index: the kpoint index Returns: a list of equivalent indices TODO: now it uses the label we might want to use coordinates instead (in case there was a mislabel)
juraj-google-style
class DatasetInitializer(lookup_ops.TableInitializerBase): def __init__(self, dataset): self.dataset = dataset elem_spec = self.dataset.element_spec _check_table_initializer_element_spec(elem_spec) key_type = elem_spec[0].dtype value_type = elem_spec[1].dtype super(DatasetInitializer, self).__init__(key_type, value_type) def initialize(self, table): lookup_ops.check_table_dtypes(table, self._key_dtype, self._value_dtype) init_op = ged_ops.initialize_table_from_dataset(table.resource_handle, self.dataset._variant_tensor) ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op) return init_op
Creates a table initializer from a `tf.data.Dataset`. Sample usage: >>> keys = tf.data.Dataset.range(100) >>> values = tf.data.Dataset.range(100).map( ... lambda x: tf.strings.as_string(x * 2)) >>> ds = tf.data.Dataset.zip((keys, values)) >>> init = tf.data.experimental.DatasetInitializer(ds) >>> table = tf.lookup.StaticHashTable(init, "") >>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy() array([b'0', b'2', b'4'], dtype=object) Attributes: dataset: A `tf.data.Dataset` object that produces tuples of scalars. The first scalar is treated as a key and the second as value. Raises: ValueError if `dataset` doesn't conform to specifications.
github-repos
def halt(self): res = int(self._dll.JLINKARM_Halt()) if (res == 0): time.sleep(1) return True return False
Halts the CPU Core. Args: self (JLink): the ``JLink`` instance Returns: ``True`` if halted, ``False`` otherwise.
codesearchnet
def retransmit(self, data): if (data['method'] == 'REGISTER'): if ((not self.registered) and (self.register_retries < self.max_retries)): logger.debug((('<%s> Timeout exceeded. ' % str(self.cuuid)) + 'Retransmitting REGISTER request.')) self.register_retries += 1 self.register(data['address'], retry=False) else: logger.debug(('<%s> No need to retransmit.' % str(self.cuuid))) if (data['method'] == 'EVENT'): if (data['euuid'] in self.event_uuids): self.event_uuids[data['euuid']]['retry'] += 1 if (self.event_uuids[data['euuid']]['retry'] > self.max_retries): logger.debug(('<%s> Max retries exceeded. Timed out waiting for server for event: %s' % (data['cuuid'], data['euuid']))) logger.debug(('<%s> <euuid:%s> Deleting event from currently processing event uuids' % (data['cuuid'], str(data['euuid'])))) del self.event_uuids[data['euuid']] else: self.listener.send_datagram(serialize_data(data, self.compression, self.encryption, self.server_key), self.server) logger.debug(('<%s> <euuid:%s> Scheduling to retry in %s seconds' % (data['cuuid'], str(data['euuid']), str(self.timeout)))) self.listener.call_later(self.timeout, self.retransmit, data) else: logger.debug(('<%s> <euuid:%s> No need to retransmit.' % (str(self.cuuid), str(data['euuid']))))
Processes messages that have been delivered from the transport protocol. Args: data (dict): A dictionary containing the packet data to resend. Returns: None Examples: >>> data {'method': 'REGISTER', 'address': ('192.168.0.20', 40080)}
codesearchnet
def _convert_to_sparse_tensor(sp_input): if isinstance(sp_input, sparse_tensor.SparseTensorValue): return sparse_tensor.SparseTensor.from_value(sp_input) if not isinstance(sp_input, sparse_tensor.SparseTensor): raise TypeError('Input must be a SparseTensor.') return sp_input
Convert `sp_input` to `SparseTensor` and return it. Args: sp_input: `SparseTensor` or `SparseTensorValue`. Returns: `sp_input` converted to `SparseTensor`. Raises: ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
github-repos
def not_evaluator(conditions, leaf_evaluator): if not len(conditions) > 0: return None result = evaluate(conditions[0], leaf_evaluator) return None if result is None else not result
Evaluates a list of conditions as if the evaluator had been applied to a single entry and NOT was applied to the result. Args: conditions: List of conditions ex: [operand_1, operand_2]. leaf_evaluator: Function which will be called to evaluate leaf condition values. Returns: Boolean: - True if the operand evaluates to False. - False if the operand evaluates to True. None: if conditions is empty or condition couldn't be evaluated.
juraj-google-style
def getCard(self, name): cards = self.projectCards for card in cards: if (card.name.upper() == name.upper()): return card return None
Retrieve card object for given card name. Args: name (str): Name of card to be retrieved. Returns: :class:`.ProjectCard` or None: Project card object. Will return None if the card is not available.
codesearchnet
def function_completions( completion_text: str, bel_spec: BELSpec, function_list: list, bel_fmt: str, size: int, ) -> list: if isinstance(function_list, list): if bel_fmt in ["short", "medium"]: function_list = [ bel_spec["functions"]["to_short"][fn] for fn in function_list ] else: function_list = [ bel_spec["functions"]["to_long"][fn] for fn in function_list ] elif bel_fmt in ["short", "medium"]: function_list = bel_spec["functions"]["primary"]["list_short"] else: function_list = bel_spec["functions"]["primary"]["list_long"] matches = [] for f in function_list: escaped_completion_text = completion_text.replace(r"(", r"\(").replace( r")", r"\)" ) log.debug(f"Completion match: {escaped_completion_text} F: {f}") if re.match(escaped_completion_text, f): matches.append(f) replace_list = [] for match in matches: if completion_text: highlight = match.replace(completion_text, f"<em>{completion_text}</em>") else: highlight = completion_text replace_list.append( { "replacement": match, "label": f"{match}()", "highlight": highlight, "type": "Function", } ) return replace_list[:size]
Filter BEL functions by prefix Args: prefix: completion string bel_fmt: short, medium, long BEL formats spec: BEL specification Returns: list: list of BEL functions that match prefix
juraj-google-style
def input_selector_schema(config_cls): config_type = resolve_config_cls_arg(config_cls) check.param_invariant(config_type.is_selector, 'config_cls') def _wrap(func): def _selector(context, config_value): (selector_key, selector_value) = single_item(config_value) return func(context, selector_key, selector_value) return _create_input_schema(config_type, _selector) return _wrap
A decorator for annotating a function that can take the selected properties from a ``config_value`` in to an instance of a custom type. Args: config_cls (Selector)
codesearchnet
def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message): try: if (message is None): raise ValueError('message was none') encoded_message = bytes(message, 'utf-8') if (encoded_message is None): raise ValueError('utf-8 encoding of message failed') if domain_name: try: UDP_IP = socket.gethostbyname(domain_name) except Exception: pass if (UDP_IP is None): raise Exception('UDP_IP is None') if (UDP_PORT is None): raise Exception('UDP_PORT is None') sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(sock_timeout) sock.sendto(bytes(message, 'utf-8'), (UDP_IP, UDP_PORT)) sock.close() except socket.timeout: logger.debug('Failed to send usage tracking data: socket timeout') except OSError as e: logger.debug('Failed to send usage tracking data: OSError: {}'.format(e)) except Exception as e: logger.debug('Failed to send usage tracking data: Exception: {}'.format(e))
Send UDP messages to usage tracker asynchronously This multiprocessing based messenger was written to overcome the limitations of signalling/terminating a thread that is blocked on a system call. This messenger is created as a separate process, and initialized with 2 queues, to_send to receive messages to be sent to the internet. Args: - domain_name (str) : Domain name string - UDP_IP (str) : IP address YYY.YYY.YYY.YYY - UDP_PORT (int) : UDP port to send out on - sock_timeout (int) : Socket timeout - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
codesearchnet
def _encode_primitive_regex(self, builder: expressions.Builder, element: ElementDefinition) -> List[validation_pb2.SqlRequirement]: name = builder.fhir_path.split('.')[-1] if _is_disabled(element): return [] if not _is_elem_supported(element): return [] assert not builder.return_type.returns_polymorphic(), f'Polymorphic element builder {builder.fhir_path} not expected in _encode_primitive_regex.' primitive_regex_info = self._get_regex_from_element(builder, element) if primitive_regex_info is None: return [] primitive_regex = primitive_regex_info.regex regex_type_code = primitive_regex_info.type_code constraint_key = f'{name}-matches-{regex_type_code}-regex' if constraint_key in self._options.skip_keys: return [] element_is_repeated = _utils.is_repeated_element(element) fhir_path_builder = builder.matches(primitive_regex) if regex_type_code == 'positiveInt': fhir_path_builder = builder > 0 elif regex_type_code == 'unsignedInt': fhir_path_builder = builder >= 0 if element_is_repeated: fhir_path_builder = builder.all(fhir_path_builder) context_builder = builder.get_parent_builder() if context_builder.return_type.returns_polymorphic(): context_builder = context_builder.get_parent_builder() result = self._encode_fhir_path_builder_constraint(fhir_path_builder, context_builder) if result is None: return [] constraint_key_column_name: str = _key_to_sql_column_name(_path_to_sql_column_name(constraint_key)) column_name_base: str = _path_to_sql_column_name(self._abs_path_invocation(builder.get_parent_builder())) column_name = f'{column_name_base}_{constraint_key_column_name}' if column_name in self._regex_columns_generated: return [] self._regex_columns_generated.add(column_name) return [validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result.sql, fhir_path_sql_expression=result.fhir_path_sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_PRIMITIVE_REGEX, element_path=self._abs_path_invocation(context_builder), description=f'{name} needs to match regex of {regex_type_code}.', fhir_path_key=constraint_key, fhir_path_expression=result.builder.fhir_path, fields_referenced_by_expression=[name])]
Returns regex `SqlRequirement`s for primitive `ElementDefinition`. Args: builder: The current builder to encode regexes for. element: The ElementDefinition at this location Returns: A list of `SqlRequirement`s representing requirements generated from primitive fields on the element that have regexes .
github-repos
def index_library_datasets(self, tick_f=None): dataset_n = 0 partition_n = 0 def tick(d, p): if tick_f: tick_f('datasets: {} partitions: {}'.format(d, p)) for dataset in self.library.datasets: if self.backend.dataset_index.index_one(dataset): dataset_n += 1 tick(dataset_n, partition_n) for partition in dataset.partitions: self.backend.partition_index.index_one(partition) partition_n += 1 tick(dataset_n, partition_n) else: pass
Indexes all datasets of the library. Args: tick_f (callable, optional): callable of one argument. Gets string with index state.
codesearchnet
def parse_time_indices(s): if (not s.startswith('[')): s = (('[' + s) + ']') parsed = command_parser._parse_slices(s) if (len(parsed) != 1): raise ValueError(('Invalid number of slicing objects in time indices (%d)' % len(parsed))) else: return parsed[0]
Parse a string as time indices. Args: s: A valid slicing string for time indices. E.g., '-1', '[:]', ':', '2:10' Returns: A slice object. Raises: ValueError: If `s` does not represent valid time indices.
codesearchnet
def MakeCdfFromItems(items, name=''): runsum = 0 xs = [] cs = [] for value, count in sorted(items): runsum += count xs.append(value) cs.append(runsum) total = float(runsum) ps = [c / total for c in cs] cdf = Cdf(xs, ps, name) return cdf
Makes a cdf from an unsorted sequence of (value, frequency) pairs. Args: items: unsorted sequence of (value, frequency) pairs name: string name for this CDF Returns: cdf: list of (value, fraction) pairs
juraj-google-style
def rpm_name(self, name, python_version=None, pkg_name=False): if pkg_name: return super(DandifiedNameConvertor, self).rpm_name( name, python_version) original_name = name converted = super(DandifiedNameConvertor, self).rpm_name( name, python_version) python_query = self.query.filter(name__substr=[ 'python', 'py', original_name, canonical_form(original_name)]) if converted in [pkg.name for pkg in python_query]: logger.debug("Converted name exists") return converted logger.debug("Converted name not found, searches for correct form") not_versioned_name = NameVariants(self.base_name(original_name), '') versioned_name = NameVariants(self.base_name(original_name), python_version) if self.base_name(original_name).startswith("py"): nonpy_name = NameVariants(self.base_name( original_name)[2:], python_version) for pkg in python_query: versioned_name.find_match(pkg.name) not_versioned_name.find_match(pkg.name) if 'nonpy_name' in locals(): nonpy_name.find_match(pkg.name) if 'nonpy_name' in locals(): versioned_name = versioned_name.merge(nonpy_name) correct_form = versioned_name.merge(not_versioned_name).best_matching logger.debug("Most likely correct form of the name {0}.".format( correct_form)) return correct_form or converted
Checks if name converted using superclass rpm_name_method match name of package in the query. Searches for correct name if it doesn't. Args: name: name to convert python_version: python version for which to retrieve the name of the package pkg_name: flag to perform conversion of rpm package name (foo -> python-foo)
juraj-google-style
def read_graph_execution_traces_event(self, locator): file_index, offset = locator graph_execution_traces_path = self._graph_execution_traces_paths[file_index] with self._reader_read_locks[graph_execution_traces_path]: proto_string = self._get_reader(graph_execution_traces_path).read(offset)[0] return debug_event_pb2.DebugEvent.FromString(proto_string)
Read DebugEvent at given offset from given .graph_execution_traces file. Args: locator: A (file_index, offset) tuple that locates the DebugEvent containing the graph execution trace. Returns: A DebugEventProto. Raises: `errors.DataLossError` if offset is at a wrong location. `IndexError` if offset is out of range of the file.
github-repos
def create_iam_role(self, account): try: iam = self.session.client('iam') trust = get_template('vpc_flow_logs_iam_role_trust.json').render() policy = get_template('vpc_flow_logs_role_policy.json').render() newrole = iam.create_role( Path='/', RoleName=self.role_name, AssumeRolePolicyDocument=trust )['Role']['Arn'] iam.put_role_policy( RoleName=self.role_name, PolicyName='VpcFlowPolicy', PolicyDocument=policy ) self.log.debug('Created VPC Flow Logs role & policy for {}'.format(account.account_name)) auditlog( event='vpc_flow_logs.create_iam_role', actor=self.ns, data={ 'account': account.account_name, 'roleName': self.role_name, 'trustRelationship': trust, 'inlinePolicy': policy } ) return newrole except Exception: self.log.exception('Failed creating the VPC Flow Logs role for {}.'.format(account))
Create a new IAM role. Returns the ARN of the newly created role Args: account (:obj:`Account`): Account where to create the IAM role Returns: `str`
juraj-google-style
def combine(specs): new_specs = {} for spec in specs: if new_specs.get(spec, None) is None: new_specs[spec] = spec else: new_specs[spec].add(spec) return list(new_specs.values())
Combine package specifications' limitations. Args: specs (list of PackageSpec): the package specifications. Returns: list of PackageSpec: the new, merged list of PackageSpec.
juraj-google-style
def get_tensor_name(tensor): parts = tensor.name.split(':') if len(parts) > 2: raise ValueError('Tensor name invalid. Expect 0 or 1 colon, got {0}'.format(len(parts) - 1)) if len(parts) > 1 and parts[1] != '0': return tensor.name return parts[0]
Returns name of the input tensor. Args: tensor: tf.Tensor Returns: str
github-repos
def _logfile_sigterm_handler(*_): logging.error('Received SIGTERM.') write_logfile() print('Received signal. Please see the log file for more information.', file=sys.stderr) sys.exit(signal)
Handle exit signals and write out a log file. Raises: SystemExit: Contains the signal as the return code.
codesearchnet
def probability_density(self, X): self.check_fit() U, V = self.split_matrix(X) if self.theta == 0: return np.multiply(U, V) else: num = np.multiply(np.multiply(-self.theta, self._g(1)), 1 + self._g(np.add(U, V))) aux = np.multiply(self._g(U), self._g(V)) + self._g(1) den = np.power(aux, 2) return num / den
Compute density function for given copula family. Args: X: `np.ndarray` Returns: np.array: probability density
juraj-google-style
def RowWith(self, column, value): for row in self._table[1:]: if row[column] == value: return row return None
Retrieves the first non header row with the column of the given value. Args: column: str, the name of the column to check. value: str, The value of the column to check. Returns: A Row() of the first row found, None otherwise. Raises: IndexError: The specified column does not exist.
juraj-google-style
def appliance_device_snmp_v1_trap_destinations(self): if (not self.__appliance_device_snmp_v1_trap_destinations): self.__appliance_device_snmp_v1_trap_destinations = ApplianceDeviceSNMPv1TrapDestinations(self.__connection) return self.__appliance_device_snmp_v1_trap_destinations
Gets the ApplianceDeviceSNMPv1TrapDestinations API client. Returns: ApplianceDeviceSNMPv1TrapDestinations:
codesearchnet
def _in_gce_environment(): if (SETTINGS.env_name is not None): return (SETTINGS.env_name == 'GCE_PRODUCTION') if ((NO_GCE_CHECK != 'True') and _detect_gce_environment()): SETTINGS.env_name = 'GCE_PRODUCTION' return True return False
Detect if the code is running in the Compute Engine environment. Returns: True if running in the GCE environment, False otherwise.
codesearchnet
def _InternalUnpackAny(msg): from google.protobuf import symbol_database factory = symbol_database.Default() type_url = msg.type_url if (not type_url): return None type_name = type_url.split('/')[(- 1)] descriptor = factory.pool.FindMessageTypeByName(type_name) if (descriptor is None): return None message_class = factory.GetPrototype(descriptor) message = message_class() message.ParseFromString(msg.value) return message
Unpacks Any message and returns the unpacked message. This internal method is different from public Any Unpack method which takes the target message as argument. _InternalUnpackAny method does not have target message type and need to find the message type in descriptor pool. Args: msg: An Any message to be unpacked. Returns: The unpacked message.
codesearchnet
def _write_source_file_content(self, file_path): if file_path in self._source_file_paths: return self._source_file_paths.index(file_path) with self._source_file_paths_lock: if file_path not in self._source_file_paths: lines = None if source_utils.is_extension_uncompiled_python_source(file_path): try: lines, _ = source_utils.load_source(file_path) except IOError as e: logging.warn('Failed to read source code from path: %s. Reason: %s', file_path, e) writer = self.get_writer() writer.WriteSourceFile(debug_event_pb2.SourceFile(file_path=file_path, host_name=self._hostname, lines=lines)) self._source_file_paths.append(file_path) return self._source_file_paths.index(file_path)
Send the content of a source file via debug-events writer. Args: file_path: Path to the source file. Returns: An int index for the file.
github-repos
def clean(decrypted: bytes) -> str: last = decrypted[(- 1)] if isinstance(last, int): return decrypted[:(- last)].decode('utf8') return decrypted[:(- ord(last))].decode('utf8')
r"""Strip padding from decrypted value. Remove number indicated by padding e.g. if last is '\x0e' then ord('\x0e') == 14, so take off 14. Args: decrypted: decrypted value Returns: Decrypted stripped of junk padding
codesearchnet
def get_forward_rate(self, start_date, maturity_date, daycount_fraction=None): start_date = dates.convert_to_date_tensor(start_date) maturity_date = dates.convert_to_date_tensor(maturity_date) if daycount_fraction is None: daycount_fraction = dates.daycount_actual_365_fixed(start_date=start_date, end_date=maturity_date, dtype=self._dtype) else: daycount_fraction = tf.convert_to_tensor(daycount_fraction, self._dtype) dfstart = self.get_discount_factor(start_date) dfmaturity = self.get_discount_factor(maturity_date) return (dfstart / dfmaturity - 1.0) / daycount_fraction
Returns the simply accrued forward rate between [start_dt, maturity_dt]. Args: start_date: A `DateTensor` specifying the start of the accrual period for the forward rate. maturity_date: A `DateTensor` specifying the end of the accrual period for the forward rate. The shape of `maturity_date` must be the same as the shape of the `DateTensor` `start_date`. daycount_fraction: An optional `Tensor` of real dtype specifying the time between `start_date` and `maturity_date` in years computed using the forward rate's day count basis. The shape of the input should be the same as that of `start_date` and `maturity_date`. Default value: `None`, in which case the daycount fraction is computed using `ACTUAL_365` convention. Returns: A real tensor of same shape as the inputs containing the simply compounded forward rate.
github-repos
def launch_external_file(filename: str, raise_if_fails: bool=False) -> None: log.info('Launching external file: {!r}', filename) try: if sys.platform.startswith('linux'): cmdargs = ['xdg-open', filename] subprocess.call(cmdargs) else: os.startfile(filename) except Exception as e: log.critical('Error launching {!r}: error was {}.\n\n{}', filename, str(e), traceback.format_exc()) if raise_if_fails: raise
Launches a file using the operating system's standard launcher. Args: filename: file to launch raise_if_fails: raise any exceptions from ``subprocess.call(["xdg-open", filename])`` (Linux) or ``os.startfile(filename)`` (otherwise)? If not, exceptions are suppressed.
codesearchnet
def clean(exclude): pretend = context.get('pretend', False) exclude = (list(exclude) + conf.get('clean.exclude', [])) clean_patterns = conf.get('clean.patterns', ['*__pycache__*', '*.py[cod]', '*.swp']) num_files = 0 with util.timed_block() as t: files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude) for path in files: try: num_files += 1 if (not isdir(path)): log.info(' <91>[file] <90>{}', path) ((not pretend) and os.remove(path)) else: log.info(' <91>[dir] <90>{}', path) ((not pretend) and rmtree(path)) except OSError: log.info('<33>Failed to remove <90>{}', path) if pretend: msg = 'Would delete <33>{}<32> files. Took <33>{}<32>s' else: msg = 'Deleted <33>{}<32> files in <33>{}<32>s' log.info(msg.format(num_files, t.elapsed_s))
Remove all unnecessary files. Args: pretend (bool): If set to **True**, do not delete any files, just show what would be deleted. exclude (list[str]): A list of path patterns to exclude from deletion.
codesearchnet
def response_data_to_model_instance(self, response_data): response_data['datetime_created'] = dateutil.parser.parse(response_data['datetime_created']) if response_data['datetime_finished']: response_data['datetime_finished'] = dateutil.parser.parse(response_data['datetime_finished']) return super(BaseTaskInstanceManager, self).response_data_to_model_instance(response_data)
Convert response data to a task instance model. Args: response_data (dict): The data from the request's response. Returns: :class:`saltant.models.base_task_instance.BaseTaskInstance`: A task instance model instance representing the task instance from the reponse data.
codesearchnet
def center_crop(self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: output_size = size['shortest_edge'] return center_crop(image, size=(output_size, output_size), data_format=data_format, input_data_format=input_data_format, **kwargs)
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Args: image (`np.ndarray`): Image to center crop. size (`Dict[str, int]`): Size of the output image in the form `{"height": h, "width": w}`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image.
github-repos
def GetOpeningBracket(node): return getattr(node, _NODE_ANNOTATION_PREFIX + 'container_bracket', None)
Get opening bracket value from a node. Arguments: node: the node. Returns: The opening bracket node or None if it couldn't find one.
github-repos
def as_dict(self, verbosity=1, fmt=None, **kwargs): if (fmt == 'abivars'): 'Returns a dictionary with the ABINIT variables.' from pymatgen.io.abinit.abiobjects import structure_to_abivars return structure_to_abivars(self, **kwargs) latt_dict = self._lattice.as_dict(verbosity=verbosity) del latt_dict['@module'] del latt_dict['@class'] d = {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'charge': self._charge, 'lattice': latt_dict, 'sites': []} for site in self: site_dict = site.as_dict(verbosity=verbosity) del site_dict['lattice'] del site_dict['@module'] del site_dict['@class'] d['sites'].append(site_dict) return d
Dict representation of Structure. Args: verbosity (int): Verbosity level. Default of 1 includes both direct and cartesian coordinates for all sites, lattice parameters, etc. Useful for reading and for insertion into a database. Set to 0 for an extremely lightweight version that only includes sufficient information to reconstruct the object. fmt (str): Specifies a format for the dict. Defaults to None, which is the default format used in pymatgen. Other options include "abivars". **kwargs: Allow passing of other kwargs needed for certain formats, e.g., "abivars". Returns: JSON serializable dict representation.
codesearchnet
def __init__(self, auth, **kwargs): self.auth = auth self.protocol = kwargs.get('protocol', 'https') self.domain = kwargs.get('domain', 'api.sumologic.com') self.api = kwargs.get('api', '/api/v1') api_path = '%s' % self.api self.url = '%s: self._debug_mode = kwargs.get('debug', False) self.log = logging.getLogger(__name__) self.log.addHandler(logging.StreamHandler()) self.log.setLevel(get_logging_level(self._debug_mode))
Initializes Client object. Args: auth (tuple): Authentication object api (str): Api endpath
juraj-google-style
def remove_send_last_message(self, connection): if connection in self._send_last_message: del self._send_last_message[connection] LOGGER.debug("Removed send_last_message function " "for connection %s", connection) else: LOGGER.warning("Attempted to remove send_last_message " "function for connection %s, but no " "send_last_message function was registered", connection)
Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages.
juraj-google-style
def split(node, stack): (node, defined, reaching) = _fix(node) node = store_state(node, reaching, defined, stack) anno.clearanno(node) return node
Carry over the state from the primal to the adjoint. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. stack: The stack node to use for storing and restoring state. Returns: func: A `Module` node with two function definitions containing the primal and adjoint respectively.
codesearchnet
def gated_grpc_debug_watches(self): return list(self._gated_grpc_debug_watches)
Get the list of debug watches with attribute gated_grpc=True. Since the server receives `GraphDef` from the debugged runtime, it can only return such debug watches that it has received so far. Returns: A `list` of `DebugWatch` `namedtuples` representing the debug watches with gated_grpc=True. Each `namedtuple` element has the attributes: `node_name` as a `str`, `output_slot` as an `int`, `debug_op` as a `str`.
github-repos
def norm(self, limits=None): kwargs = {} if limits is not None: kwargs = {'min': limits[0], 'max': limits[1]} return dim(self, norm, **kwargs)
Unity-based normalization to scale data into 0-1 range. (values - min) / (max - min) Args: limits: tuple of (min, max) defining the normalization range
juraj-google-style
def _init_obj_attrs(self, obj, user=False): for attr in obj.__class__._tx_attrs.values(): if user: attr_name = "_txa_%s" % attr.name else: attr_name = attr.name if attr.mult in [MULT_ZEROORMORE, MULT_ONEORMORE]: setattr(obj, attr_name, []) elif attr.cls.__name__ in BASE_TYPE_NAMES: if self.auto_init_attributes: setattr(obj, attr_name, python_type(attr.cls.__name__)()) else: if attr.bool_assignment: setattr(obj, attr_name, False) else: setattr(obj, attr_name, None) else: setattr(obj, attr_name, None)
Initialize obj attributes. Args: obj(object): A python object to set attributes to. user(bool): If this object is a user object mangle attribute names.
juraj-google-style
def conf(self): return self.env.get_template('conf.py.j2').render(metadata=self.metadata, package=self.package)
Generate the Sphinx `conf.py` configuration file Returns: (str): the contents of the `conf.py` file.
codesearchnet
def _on_join_leader(self, response): try: group_assignment = self._perform_assignment(response.leader_id, response.group_protocol, response.members) except Exception as e: return Future().failure(e) version = (0 if (self.config['api_version'] < (0, 11, 0)) else 1) request = SyncGroupRequest[version](self.group_id, self._generation.generation_id, self._generation.member_id, [(member_id, (assignment if isinstance(assignment, bytes) else assignment.encode())) for (member_id, assignment) in six.iteritems(group_assignment)]) log.debug('Sending leader SyncGroup for group %s to coordinator %s: %s', self.group_id, self.coordinator_id, request) return self._send_sync_group_request(request)
Perform leader synchronization and send back the assignment for the group via SyncGroupRequest Arguments: response (JoinResponse): broker response to parse Returns: Future: resolves to member assignment encoded-bytes
codesearchnet
def find_common_root(elements): if (not elements): raise UserWarning("Can't find common root - no elements suplied.") root_path = el_to_path_vector(elements.pop()) for el in elements: el_path = el_to_path_vector(el) root_path = common_vector_root(root_path, el_path) if (not root_path): raise UserWarning(('Vectors without common root:\n%s' % str(el_path))) return root_path
Find root which is common for all `elements`. Args: elements (list): List of double-linked HTMLElement objects. Returns: list: Vector of HTMLElement containing path to common root.
codesearchnet
def _L2LossGrad(op: ops.Operation, grad): return op.inputs[0] * grad
Return the gradients for L2Loss. Args: op: The L2LossOp for which we need to generate gradients. grad: Tensor containing a single number. Returns: The gradient, which is (x * grad).
github-repos
def add_query(self, query, join_with=AND): if (not isinstance(query, DomainCondition)): query = DomainCondition.from_tuple(query) if len(self.query): self.query.append(join_with) self.query.append(query)
Join a new query to existing queries on the stack. Args: query (tuple or list or DomainCondition): The condition for the query. If a ``DomainCondition`` object is not provided, the input should conform to the interface defined in :func:`~.domain.DomainCondition.from_tuple`. join_with (str): The join string to apply, if other queries are already on the stack.
codesearchnet
def cleanup(context): for name in 'work_dir', 'artifact_dir', 'task_log_dir': path = context.config[name] if os.path.exists(path): log.debug("rm({})".format(path)) rm(path) makedirs(path)
Clean up the work_dir and artifact_dir between task runs, then recreate. Args: context (scriptworker.context.Context): the scriptworker context.
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An FNet sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def InjectString(self, codestring, wait_for_completion=True): if self.inferior.is_running and self.inferior.gdb.IsAttached(): try: self.inferior.gdb.InjectString( self.inferior.position, codestring, wait_for_completion=wait_for_completion) except RuntimeError: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback) else: logging.error('Not attached to any process.')
Try to inject python code into current thread. Args: codestring: Python snippet to execute in inferior. (may contain newlines) wait_for_completion: Block until execution of snippet has completed.
juraj-google-style
def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'): r if n is None: n = __READ_TAIL_N__ verbose = _rectify_verb_read(verbose) if verbose: print('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n)) try: if not util_path.checkpath(fpath, verbose=verbose, n=n): raise IOError('[io] * FILE DOES NOT EXIST!') with open(fpath, 'rb') as file_: if aslines: if six.PY2: text = [line.decode('utf8', errors=errors) for line in file_.readlines()] else: text = [line.decode('utf8', errors=errors) for line in file_.readlines()] else: if six.PY2: text = file_.read().decode('utf8', errors=errors) else: text = file_.read().decode('utf8', errors=errors) return text except IOError as ex: from utool import util_dbg if verbose or strict: util_dbg.printex(ex, ' * Error reading fpath=%r' % util_path.tail(fpath, n=n), '[io]') if strict: raise
r""" Reads text from a file. Automatically returns utf8. Args: fpath (str): file path aslines (bool): if True returns list of lines verbose (bool): verbosity flag Returns: str: text from fpath (this is unicode) Ignore: x = b'''/whaleshark_003_fors\xc3\xb8g.wmv" />\r\n''' ut.writeto('foo.txt', x) y = ut.readfrom('foo.txt') y.encode('utf8') == x
juraj-google-style
def update_if_absent(self, **kwargs): for arg in kwargs: if hasattr(self, arg): if (getattr(self, arg) is None): setattr(self, arg, kwargs[arg]) else: raise ValueError(('Invalid RayParams parameter in update_if_absent: %s' % arg)) self._check_usage()
Update the settings when the target fields are None. Args: kwargs: The keyword arguments to set corresponding fields.
codesearchnet
def libdmtx_function(fname, restype, *args): prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libdmtx()))
Returns a foreign function exported by `libdmtx`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function.
juraj-google-style
def is_module_function(obj, prop): python_version = sys.version_info[0] if python_version == 3: unicode = str if prop and (isinstance(prop, str) or isinstance(prop, unicode)): if prop in dir(obj): if ( isinstance(getattr(obj, prop), FunctionType) or isinstance(getattr(obj, prop), BuiltinFunctionType) or inspect.ismethod(getattr(obj, prop)) ): return True else: ErrorHandler.prop_is_func_error(obj, prop) else: ErrorHandler.prop_in_obj_error(obj, prop) elif prop: ErrorHandler.prop_type_error(prop) return False
Checking and setting type to MODULE_FUNCTION Args: obj: ModuleType prop: FunctionType Return: Boolean Raise: prop_type_error: When the type of prop is not valid prop_in_obj_error: When prop is not in the obj(module/class) prop_is_func_error: When prop is not a callable stuff
juraj-google-style
def _kl_von_mises_von_mises(d1, d2, name=None): with tf.name_scope(name or "kl_von_mises_von_mises"): i0e_concentration1 = tf.math.bessel_i0e(d1.concentration) i1e_concentration1 = tf.math.bessel_i1e(d1.concentration) i0e_concentration2 = tf.math.bessel_i0e(d2.concentration) return ((d2.concentration - d1.concentration) + tf.math.log(i0e_concentration2 / i0e_concentration1) + (d1.concentration - d2.concentration * tf.cos(d1.loc - d2.loc)) * (i1e_concentration1 / i0e_concentration1))
Batchwise KL divergence KL(d1 || d2) with d1 and d2 von Mises. Args: d1: instance of a von Mises distribution object. d2: instance of a a von Mises distribution object. name: (optional) Name to use for created operations. default is "kl_von_mises_von_mises". Returns: Batchwise KL(d1 || d2)
juraj-google-style
def normalize_name(name, overrides=None): normalized_name = name.title() if overrides: override_map = dict([(name.title(), name) for name in overrides]) return override_map.get(normalized_name, normalized_name) else: return normalized_name
Normalize the key name to title case. For example, ``normalize_name('content-id')`` will become ``Content-Id`` Args: name (str): The name to normalize. overrides (set, sequence): A set or sequence containing keys that should be cased to themselves. For example, passing ``set('WARC-Type')`` will normalize any key named "warc-type" to ``WARC-Type`` instead of the default ``Warc-Type``. Returns: str
juraj-google-style
def wait(self, **kwargs): return self.client.api.wait(self.id, **kwargs)
Block until the container stops, then return its exit code. Similar to the ``docker wait`` command. Args: timeout (int): Request timeout condition (str): Wait until a container state reaches the given condition, either ``not-running`` (default), ``next-exit``, or ``removed`` Returns: (dict): The API's response as a Python dictionary, including the container's exit code under the ``StatusCode`` attribute. Raises: :py:class:`requests.exceptions.ReadTimeout` If the timeout is exceeded. :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
class IncSlidingStdevTracker(IncStdevTracker): def __init__(self, window_size): super().__init__(window_mode=WindowMode.SLIDING, window_size=window_size)
Sliding window standard deviation tracker using incremental calculation. Args: window_size: The size of the sliding window.
github-repos
def perform(self, agent_indices, observ): with tf.name_scope('perform/'): observ = self._observ_filter.transform(observ) if self._last_state is None: state = None else: state = tools.nested.map( lambda x: tf.gather(x, agent_indices), self._last_state) with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'): output = self._network( observ[:, None], tf.ones(observ.shape[0]), state) action = tf.cond( self._is_training, output.policy.sample, output.policy.mode) logprob = output.policy.log_prob(action)[:, 0] summary = tf.cond(self._should_log, lambda: tf.summary.merge([ tf.summary.histogram('mode', output.policy.mode()[:, 0]), tf.summary.histogram('action', action[:, 0]), tf.summary.histogram('logprob', logprob)]), str) if self._last_state is None: assign_state = tf.no_op() else: assign_state = utility.assign_nested_vars( self._last_state, output.state, agent_indices) remember_last_action = tf.scatter_update( self._last_action, agent_indices, action[:, 0]) policy_params = tools.nested.filter( lambda x: isinstance(x, tf.Tensor), output.policy.parameters) assert policy_params, 'Policy has no parameters to store.' remember_last_policy = tools.nested.map( lambda var, val: tf.scatter_update(var, agent_indices, val[:, 0]), self._last_policy, policy_params, flatten=True) with tf.control_dependencies(( assign_state, remember_last_action) + remember_last_policy): return action[:, 0], tf.identity(summary)
Compute batch of actions and a summary for a batch of observation. Args: agent_indices: Tensor containing current batch indices. observ: Tensor of a batch of observations for all agents. Returns: Tuple of action batch tensor and summary tensor.
juraj-google-style
def check_for_lane_permission(self): if self.current.lane_permission: log.debug(('HAS LANE PERM: %s' % self.current.lane_permission)) perm = self.current.lane_permission if (not self.current.has_permission(perm)): raise HTTPError(403, ("You don't have required lane permission: %s" % perm)) if self.current.lane_relations: context = self.get_pool_context() log.debug(('HAS LANE RELS: %s' % self.current.lane_relations)) try: cond_result = eval(self.current.lane_relations, context) except: log.exception(('CONDITION EVAL ERROR : %s || %s' % (self.current.lane_relations, context))) raise if (not cond_result): log.debug(('LANE RELATION ERR: %s %s' % (self.current.lane_relations, context))) raise HTTPError(403, ("You aren't qualified for this lane: %s" % self.current.lane_relations))
One or more permissions can be associated with a lane of a workflow. In a similar way, a lane can be restricted with relation to other lanes of the workflow. This method called on lane changes and checks user has required permissions and relations. Raises: HTTPForbidden: if the current user hasn't got the required permissions and proper relations
codesearchnet
def restore(self, state): selector = DataStreamSelector.FromString(state.get(u'selector')) if (selector != self.selector): raise ArgumentError('Attempted to restore a BufferedStreamWalker with a different selector', selector=self.selector, serialized_data=state) self.seek(state.get(u'offset'), target='offset')
Restore a previous state of this stream walker. Raises: ArgumentError: If the state refers to a different selector or the offset is invalid.
codesearchnet
def convert_sigmoid(params, w_name, scope_name, inputs, layers, weights, names): print('Converting sigmoid ...') if (names == 'short'): tf_name = ('SIGM' + random_string(4)) elif (names == 'keep'): tf_name = w_name else: tf_name = (w_name + str(random.random())) sigmoid = keras.layers.Activation('sigmoid', name=tf_name) layers[scope_name] = sigmoid(layers[inputs[0]])
Convert sigmoid layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def scatter_div(self, sparse_delta, use_locking=False, name=None): if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta) return gen_state_ops.scatter_div(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)
Divide this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to divide this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered division has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def balance(self, as_of=None, raw=False, leg_query=None, **kwargs): balances = [ account.simple_balance(as_of=as_of, raw=raw, leg_query=leg_query, **kwargs) for account in self.get_descendants(include_self=True) ] return sum(balances, Balance())
Get the balance for this account, including child accounts Args: as_of (Date): Only include transactions on or before this date raw (bool): If true the returned balance should not have its sign adjusted for display purposes. kwargs (dict): Will be used to filter the transaction legs Returns: Balance See Also: :meth:`simple_balance()`
juraj-google-style
def compare_count(self): if (self.query.options['count'] is not None): count_opt = int(self.query.options['count']) self._cache_at_least((count_opt + 1)) return cmp(len(self._result_cache), count_opt) if (self.query.options['minimum'] is not None): min_opt = int(self.query.options['minimum']) if (not self._cache_at_least(min_opt)): return (- 1) if (self.query.options['maximum'] is not None): max_opt = int(self.query.options['maximum']) if self._cache_at_least((max_opt + 1)): return 1 if (self.query.options['between'] is not None): between = self.query.options['between'] (min_opt, max_opt) = (between[0], between[(- 1)]) if (not self._cache_at_least(min_opt)): return (- 1) if self._cache_at_least((max_opt + 1)): return 1 return 0 return 0
Returns how the result count compares to the query options. The return value is negative if too few results were found, zero if enough were found, and positive if too many were found. Returns: int: -1, 0, or 1.
codesearchnet
def get_devices(ads, **kwargs): def _get_device_filter(ad): for k, v in kwargs.items(): if not hasattr(ad, k): return False elif getattr(ad, k) != v: return False return True filtered = filter_devices(ads, _get_device_filter) if not filtered: raise Error('Could not find a target device that matches condition: %s.' % kwargs) else: return filtered
Finds a list of AndroidDevice instance from a list that has specific attributes of certain values. Example: get_devices(android_devices, label='foo', phone_number='1234567890') get_devices(android_devices, model='angler') Args: ads: A list of AndroidDevice instances. kwargs: keyword arguments used to filter AndroidDevice instances. Returns: A list of target AndroidDevice instances. Raises: Error: No devices are matched.
github-repos
def stack1d(*points): result = np.empty((2, len(points)), order='F') for (index, point) in enumerate(points): result[(:, index)] = point return result
Fill out the columns of matrix with a series of points. This is because ``np.hstack()`` will just make another 1D vector out of them and ``np.vstack()`` will put them in the rows. Args: points (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e. arrays with shape ``(2,)``. Returns: numpy.ndarray: The array with each point in ``points`` as its columns.
codesearchnet
def calculate_bv_sum_unordered(site, nn_list, scale_factor=1): bvsum = 0 for (specie1, occu1) in site.species.items(): el1 = Element(specie1.symbol) for (nn, dist) in nn_list: for (specie2, occu2) in nn.species.items(): el2 = Element(specie2.symbol) if (((el1 in ELECTRONEG) or (el2 in ELECTRONEG)) and (el1 != el2)): r1 = BV_PARAMS[el1]['r'] r2 = BV_PARAMS[el2]['r'] c1 = BV_PARAMS[el1]['c'] c2 = BV_PARAMS[el2]['c'] R = ((r1 + r2) - (((r1 * r2) * ((sqrt(c1) - sqrt(c2)) ** 2)) / ((c1 * r1) + (c2 * r2)))) vij = exp(((R - (dist * scale_factor)) / 0.31)) bvsum += (((occu1 * occu2) * vij) * (1 if (el1.X < el2.X) else (- 1))) return bvsum
Calculates the BV sum of a site for unordered structures. Args: site: The site nn_list: List of nearest neighbors in the format [(nn_site, dist), ...]. scale_factor: A scale factor to be applied. This is useful for scaling distance, esp in the case of calculation-relaxed structures which may tend to under (GGA) or over bind (LDA).
codesearchnet