code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def reverse_transform(self, col): output = pd.DataFrame() new_name = '?' + self.col_name col.loc[col[new_name] == 0, self.col_name] = np.nan output[self.col_name] = col[self.col_name] return output
Converts data back into original format. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
juraj-google-style
def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False): assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if self.trigram_input: inputs_embeds = tf.concat([tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))), inputs_embeds, tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0)))], axis=2) if self.trigram_input or self.embedding_size != self.hidden_size: inputs_embeds = self.embedding_transformation(inputs_embeds) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings
Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor.
github-repos
def __init__(self, window, index=-1, flags=frozenset()): self._ptr = check_ptr_err(lib.SDL_CreateRenderer(window._ptr, index, enumtools.get_mask(flags)))
Create a 2D rendering context for a window. Args: window (Window): The window where rendering is displayed. index (int): The index of the rendering driver to initialize, or -1 to initialize the first one supporting the requested flags. flags (Set[RendererFlags]): The requested renderer flags. Raises: SDLError: If there was an error creating the renderer.
juraj-google-style
def get_size_ratio(path_a: str, path_b: str) -> float: size_a = get_dir_size(path_a) size_b = get_dir_size(path_b) return size_a / size_b
Return the size ratio of the given paths. Args: path_a: Path of a directory or a file to be the nominator of the ratio. path_b: Path of a directory or a file to be the denominator of the ratio. Returns: Ratio of size of path_a / size of path_b.
github-repos
def GetArtifactsForCollection(os_name, artifact_list): artifact_arranger = ArtifactArranger(os_name, artifact_list) artifact_names = artifact_arranger.GetArtifactsInProperOrder() return artifact_names
Wrapper for the ArtifactArranger. Extend the artifact list by dependencies and sort the artifacts to resolve the dependencies. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names. Returns: A list of artifacts such that if they are collected in the given order their dependencies are resolved.
codesearchnet
def download_mmcif_header(pdb_id, outdir='', force_rerun=False): pdb_id = pdb_id.lower() file_type = 'cif' folder = 'header' outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): download_link = 'http: urlretrieve(download_link, outfile) log.debug('{}: saved header file'.format(outfile)) else: log.debug('{}: header file already saved'.format(outfile)) return outfile
Download a mmCIF header file from the RCSB PDB by ID. Args: pdb_id: PDB ID outdir: Optional output directory, default is current working directory force_rerun: If the file should be downloaded again even if it exists Returns: str: Path to outfile
codesearchnet
def from_hubo(cls, H, offset=None): poly = cls(H, Vartype.BINARY) if offset is not None: poly[()] = poly.get((), 0) + offset return poly
Construct a binary polynomial from a higher-order unconstrained binary optimization (HUBO) problem. Args: H (dict): Coefficients of a higher-order unconstrained binary optimization (HUBO) model. Returns: :obj:`.BinaryPolynomial` Examples: >>> poly = dimod.BinaryPolynomial.from_hubo({('a', 'b', 'c'): -1})
juraj-google-style
def _maybe_broadcast_to_outputs(self, outputs, objects): if not self._should_broadcast(objects): return objects should_copy_objects = len(nest.flatten(outputs)) > 1 def _broadcast_fn(): if should_copy_objects: return nest.map_structure(self._copy_object, objects) return objects return nest.map_structure(lambda _: _broadcast_fn(), outputs)
Determines if losses / metrics should be applied to all outputs. NOTE: This method should only be called for Metrics / Losses, not for y_true / sample_weight. Args: outputs: Model predictions. objects: Arbitrary nested structure (e.g. of losses or metrics) Returns: Arbitrary nested structure of objects, maybe copied to each output. Applies a Loss / Metric to all outputs.
github-repos
def _ip_unnumbered_name(self, **kwargs): method_name = 'interface_%s_ip_ip_config_unnumbered_ip_donor_'\ 'interface_name' % kwargs['int_type'] ip_unnumbered_name = getattr(self._interface, method_name) config = ip_unnumbered_name(**kwargs) if kwargs['delete']: tag = 'ip-donor-interface-name' config.find('. return config
Return the `ip unnumbered` donor name XML. You should not use this method. You probably want `Interface.ip_unnumbered`. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). delete (bool): Remove the configuration if ``True``. ip_donor_interface_name (str): The donor interface name (1, 2, etc) Returns: XML to be passed to the switch. Raises: None
juraj-google-style
def get_model_details(self, model_name): full_name = model_name if not model_name.startswith('projects/'): full_name = ('projects/%s/models/%s' % (self._project_id, model_name)) return self._api.projects().models().get(name=full_name).execute()
Get details of the specified model from CloudML Service. Args: model_name: the name of the model. It can be a model full name ("projects/[project_id]/models/[model_name]") or just [model_name]. Returns: a dictionary of the model details.
juraj-google-style
def change_password(username, new_password): assert username in passwd_reader.load_users(),\ "Username '%s' not found!" % username sh.ftpasswd( "--change-password", passwd=True, name=username, stdin=True, file=settings.LOGIN_FILE, _in=new_password ) reload_configuration()
Change password for given `username`. Args: username (str): User's name. new_password (str): User's new password.
juraj-google-style
def inverse(self): if (not self.definition): raise QiskitError(('inverse() not implemented for %s.' % self.name)) inverse_gate = self.copy(name=(self.name + '_dg')) inverse_gate._definition = [] for (inst, qargs, cargs) in reversed(self._definition): inverse_gate._definition.append((inst.inverse(), qargs, cargs)) return inverse_gate
Invert this instruction. If the instruction is composite (i.e. has a definition), then its definition will be recursively inverted. Special instructions inheriting from Instruction can implement their own inverse (e.g. T and Tdg, Barrier, etc.) Returns: Instruction: a fresh instruction for the inverse Raises: QiskitError: if the instruction is not composite and an inverse has not been implemented for it.
codesearchnet
def patch_request(self, uri, body, custom_headers=None, timeout=-1): logger.debug('Patch resource (uri = %s, data = %s)' % (uri, body)) if not custom_headers: custom_headers = {} if self._connection._apiVersion >= 300 and 'Content-Type' not in custom_headers: custom_headers['Content-Type'] = 'application/json-patch+json' task, entity = self._connection.patch(uri, body, custom_headers=custom_headers) if not task: return entity return self._task_monitor.wait_for_task(task, timeout)
Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args: body (list): Patch request body timeout (int): Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. custom_headers (dict): Allows to add custom http headers. Returns: Updated resource.
juraj-google-style
def __contains__(self, key): path = self.keypath(key) return fs.exists(path)
Check cache contents. Arguments: key: Key. Returns: bool: True if key in cache, else false.
juraj-google-style
class TFCvtEncoder(keras.layers.Layer): config_class = CvtConfig def __init__(self, config: CvtConfig, **kwargs): super().__init__(**kwargs) self.config = config self.stages = [TFCvtStage(config, stage_idx, name=f'stages.{stage_idx}') for stage_idx in range(len(config.depth))] def call(self, pixel_values: TFModelInputType, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None hidden_state = pixel_values hidden_state = tf.transpose(hidden_state, perm=(0, 2, 3, 1)) cls_token = None for _, stage_module in enumerate(self.stages): hidden_state, cls_token = stage_module(hidden_state, training=training) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) hidden_state = tf.transpose(hidden_state, perm=(0, 3, 1, 2)) if output_hidden_states: all_hidden_states = tuple([tf.transpose(hs, perm=(0, 3, 1, 2)) for hs in all_hidden_states]) if not return_dict: return tuple((v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)) return TFBaseModelOutputWithCLSToken(last_hidden_state=hidden_state, cls_token_value=cls_token, hidden_states=all_hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'stages', None) is not None: for layer in self.stages: with tf.name_scope(layer.name): layer.build(None)
Convolutional Vision Transformer encoder. CVT has 3 stages of encoder blocks with their respective number of layers (depth) being 1, 2 and 10. Args: config ([`CvtConfig`]): Model configuration class.
github-repos
def flatten(index, name='segmented_flatten'): batch_size = torch.prod(torch.tensor(list(index.batch_shape()))) offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments offset = offset.view(index.batch_shape()) for _ in range(index.batch_dims, len(index.indices.size())): offset = offset.unsqueeze(-1) indices = offset + index.indices return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0)
Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by *num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the batch. Args: index (`IndexMap`): IndexMap to flatten. name (`str`, *optional*, defaults to 'segmented_flatten'): Name for the operation. Currently not used Returns: (`IndexMap`): The flattened IndexMap.
github-repos
def load_schema(schema_name, resolved=False): schema_data = '' with open(get_schema_path(schema_name, resolved)) as schema_fd: schema_data = json.loads(schema_fd.read()) return schema_data
Load the given schema from wherever it's installed. Args: schema_name(str): Name of the schema to load, for example 'authors'. resolved(bool): If True will return the resolved schema, that is with all the $refs replaced by their targets. Returns: dict: the schema with the given name.
juraj-google-style
async def inspect(self, *, node_id: str) -> Mapping[(str, Any)]: response = (await self.docker._query_json('nodes/{node_id}'.format(node_id=node_id), method='GET')) return response
Inspect a node Args: node_id: The ID or name of the node
codesearchnet
def run(self, text): for pp in self.pre_processors: text = pp.run(text) return text
Run each substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied.
codesearchnet
def test_rpc_stage_dependencies(self, mock_handle_resp, mock_decode_resp_str, mock_send_request, mock_gen_request, mock_precheck): self.client.initialize() expected_response_str = '{"id": 0, "result": 123, "error": null, "callback": null}' expected_response_dict = {'id': 0, 'result': 123, 'error': None, 'callback': None} expected_request = '{"id": 10, "method": "some_rpc", "params": [1, 2],"kwargs": {"test_key": 3}' expected_result = 123 mock_gen_request.return_value = expected_request mock_send_request.return_value = expected_response_str mock_decode_resp_str.return_value = expected_response_dict mock_handle_resp.return_value = expected_result rpc_result = self.client.some_rpc(1, 2, test_key=3) mock_precheck.assert_called() mock_gen_request.assert_called_with(0, 'some_rpc', 1, 2, test_key=3) mock_send_request.assert_called_with(expected_request) mock_decode_resp_str.assert_called_with(0, expected_response_str) mock_handle_resp.assert_called_with('some_rpc', expected_response_dict) self.assertEqual(rpc_result, expected_result)
Test the internal dependencies when sending an RPC. When sending an RPC, it calls multiple functions in specific order, and each function uses the output of the previously called function. This test case checks above dependencies. Args: mock_handle_resp: the mock function of FakeClient._handle_rpc_response. mock_decode_resp_str: the mock function of FakeClient._decode_response_string_and_validate_format. mock_send_request: the mock function of FakeClient.send_rpc_request. mock_gen_request: the mock function of FakeClient._gen_rpc_request. mock_precheck: the mock function of FakeClient.check_server_proc_running.
github-repos
def get_cluster_interfaces(cluster, extra_cond=lambda nic: True): nics = get_nics(cluster) nics = [(nic['device'], nic['name']) for nic in nics if nic['mountable'] and nic['interface'] == 'Ethernet' and not nic['management'] and extra_cond(nic)] nics = sorted(nics) return nics
Get the network interfaces names corresponding to a criteria. Note that the cluster is passed (not the individual node names), thus it is assumed that all nodes in a cluster have the same interface names same configuration. In addition to ``extra_cond``, only the mountable and Ehernet interfaces are returned. Args: cluster(str): the cluster to consider extra_cond(lambda): boolean lambda that takes the nic(dict) as parameter
juraj-google-style
def __instantiate_page_object(page_obj_class, webdriver, **kwargs): try: page = page_obj_class(webdriver, **kwargs) return page except InvalidPageError: return True except TypeError: return False except Exception as e: raise e
Attempts to instantiate a page object. Args: page_obj_class (PageObject) - PageObject to instantiate. webdriver (WebDriver) - Selenium webdriver to associate with the PageObject Returns: PageObject - If page object instantiation succeeded. True - If page object instantiation failed, but validation was called. None - If validation did not occur.
juraj-google-style
def _is_molecule_linear(self, mol): if mol.NumAtoms() < 3: return True a1 = mol.GetAtom(1) a2 = mol.GetAtom(2) for i in range(3, mol.NumAtoms()+1): angle = float(mol.GetAtom(i).GetAngle(a2, a1)) if angle < 0.0: angle = -angle if angle > 90.0: angle = 180.0 - angle if angle > self._angle_tolerance: return False return True
Is the molecule a linear one Args: mol: The molecule. OpenBabel OBMol object. Returns: Boolean value.
juraj-google-style
def get(self, filter=False): result = {} for (k, v) in self.elements().items(): intermediate = v.get(filter=filter) if intermediate: result[k] = intermediate return result
Returns a dictionary with the values of the model. Note that the values of the leafs are YANG classes. Args: filter (bool): If set to ``True``, show only values that have been set. Returns: dict: A dictionary with the values of the model. Example: >>> pretty_print(config.get(filter=True)) >>> { >>> "interfaces": { >>> "interface": { >>> "et1": { >>> "config": { >>> "description": "My description", >>> "mtu": 1500 >>> }, >>> "name": "et1" >>> }, >>> "et2": { >>> "config": { >>> "description": "Another description", >>> "mtu": 9000 >>> }, >>> "name": "et2" >>> } >>> } >>> } >>> }
codesearchnet
async def report_winner(self, winner: Participant, scores_csv: str): await self._report(scores_csv, winner._id)
report scores and give a winner |methcoro| Args: winner: :class:Participant instance scores_csv: Comma separated set/game scores with player 1 score first (e.g. "1-3,3-0,3-2") Raises: ValueError: scores_csv has a wrong format APIException
juraj-google-style
def deserialize(segment): link_target = segment.link_data.link_target return ChatMessageSegment( segment.text, segment_type=segment.type, is_bold=segment.formatting.bold, is_italic=segment.formatting.italic, is_strikethrough=segment.formatting.strikethrough, is_underline=segment.formatting.underline, link_target=None if link_target == '' else link_target )
Construct :class:`ChatMessageSegment` from ``Segment`` message. Args: segment: ``Segment`` message to parse. Returns: :class:`ChatMessageSegment` object.
juraj-google-style
def _begin(self, retry_id=None): if self.in_progress: msg = _CANT_BEGIN.format(self._id) raise ValueError(msg) transaction_response = self._client._firestore_api.begin_transaction( self._client._database_string, options_=self._options_protobuf(retry_id), metadata=self._client._rpc_metadata, ) self._id = transaction_response.transaction
Begin the transaction. Args: retry_id (Optional[bytes]): Transaction ID of a transaction to be retried. Raises: ValueError: If the current transaction has already begun.
juraj-google-style
def _AvgPoolAlongCols(self, input_matrix, col_seq, overlapping): input_matrix = input_matrix.transpose() output_matrix = self._AvgPoolAlongRows(input_matrix, col_seq, overlapping) return output_matrix.transpose()
Perform average pool along column of a 2-D matrix based on col_seq. Args: input_matrix: A 2-D matrix. col_seq: Cumulative pooling sequence along column. overlapping: Whether or not use overlapping when pooling. Returns: A 2-D matrix, with * num_rows = input_matrix.num_rows * num_cols = len(col_seq)-1.
github-repos
def _SetExtractionParsersAndPlugins(self, configuration, session): names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames( parser_filter_expression=configuration.parser_filter_expression) session.enabled_parser_names = list(names_generator) session.parser_filter_expression = configuration.parser_filter_expression
Sets the parsers and plugins before extraction. Args: configuration (ProcessingConfiguration): processing configuration. session (Session): session.
juraj-google-style
def rebuild(cls, session, tree_id=None): trees = session.query(cls).filter_by(parent_id=None) if tree_id: trees = trees.filter_by(tree_id=tree_id) for tree in trees: cls.rebuild_tree(session, tree.tree_id)
This function rebuid tree. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session Kwargs: tree_id (int or str): id of tree, default None Example: * :mod:`sqlalchemy_mptt.tests.TestTree.test_rebuild`
codesearchnet
def get_example_from_prop_spec(self, prop_spec, from_allof=False): easy_keys = ['example', 'x-example', 'default'] for key in easy_keys: if key in prop_spec.keys() and self.use_example: return prop_spec[key] if 'enum' in prop_spec.keys(): return prop_spec['enum'][0] if '$ref' in prop_spec.keys(): return self._example_from_definition(prop_spec) if 'allOf' in prop_spec.keys(): return self._example_from_allof(prop_spec) if 'type' not in prop_spec: return self._example_from_complex_def(prop_spec) if prop_spec['type'] == 'object': example, additional_properties = self._get_example_from_properties(prop_spec) if additional_properties or from_allof: return example return [example] if prop_spec['type'] == 'array' or (isinstance(prop_spec['type'], list) and prop_spec['type'][0] == 'array'): return self._example_from_array_spec(prop_spec) if prop_spec['type'] == 'file': return (StringIO('my file contents'), 'hello world.txt') if 'format' in prop_spec.keys() and prop_spec['format'] == 'date-time': return self._get_example_from_basic_type('datetime')[0] if isinstance(prop_spec['type'], list): return self._get_example_from_basic_type(prop_spec['type'][0])[0] logging.info("falling back to basic type, no other match found") return self._get_example_from_basic_type(prop_spec['type'])[0]
Return an example value from a property specification. Args: prop_spec: the specification of the property. from_allof: whether these properties are part of an allOf section Returns: An example value
juraj-google-style
def __recognize_scalar(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a scalar') if (isinstance(node, yaml.ScalarNode) and (node.tag == scalar_type_to_tag[expected_type])): return ([expected_type], '') message = 'Failed to recognize a {}\n{}\n'.format(type_to_desc(expected_type), node.start_mark) return ([], message)
Recognize a node that we expect to be a scalar. Args: node: The node to recognize. expected_type: The type it is expected to be. Returns: A list of recognized types and an error message
codesearchnet
def to_qasm(self, header: Optional[str]=None, precision: int=10, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT) -> str: return str(self._to_qasm_output(header, precision, qubit_order))
Returns QASM equivalent to the circuit. Args: header: A multi-line string that is placed in a comment at the top of the QASM. Defaults to a cirq version specifier. precision: Number of digits to use when representing numbers. qubit_order: Determines how qubits are ordered in the QASM register.
codesearchnet
def VisitUnionType(self, union): intersection = self.hierarchy.ExpandSuperClasses(str(union.type_list[0])) for t in union.type_list[1:]: intersection.intersection_update(self.hierarchy.ExpandSuperClasses(str(t))) new_type_list = tuple((pytd.NamedType(cls) for cls in intersection if not self.hierarchy.HasSubClassInSet(cls, intersection))) if not new_type_list: return union return pytd_utils.JoinTypes(new_type_list)
Given a union type, try to find a simplification by using superclasses. This is a lossy optimization that tries to map a list of types to a common base type. For example, int and bool are both base classes of int, so it would convert "Union[int, bool]" to "int". Arguments: union: A union type. Returns: A simplified type, if available.
github-repos
def __init__(self, vendor_identification=None, attribute_name=None): super(AttributeReference, self).__init__( tag=enums.Tags.ATTRIBUTE_REFERENCE ) self._vendor_identification = None self._attribute_name = None self.vendor_identification = vendor_identification self.attribute_name = attribute_name
Construct an AttributeReference structure. Args: vendor_identification (string): A string identifying the vendor associated with the attribute. Optional, defaults to None. Required for read/write. attribute_name (string): A string containing the attribute name. Optional, defaults to None. Required for read/write.
juraj-google-style
def launch(self, workflow): try: r = self.gbdx_connection.post(self.workflows_url, json=workflow) try: r.raise_for_status() except: print(('GBDX API Status Code: %s' % r.status_code)) print(('GBDX API Response: %s' % r.text)) r.raise_for_status() workflow_id = r.json()['id'] return workflow_id except TypeError: self.logger.debug('Workflow not launched!')
Launches GBDX workflow. Args: workflow (dict): Dictionary specifying workflow tasks. Returns: Workflow id (str).
codesearchnet
def DisjoinCalendars(self, cutoff): def TruncatePeriod(service_period, start, end): 'Truncate the service period to into the range [start, end].\n\n Args:\n service_period: The service period to truncate.\n start: The start date as a string in YYYYMMDD format.\n end: The end date as a string in YYYYMMDD format.\n ' service_period.start_date = max(service_period.start_date, start) service_period.end_date = min(service_period.end_date, end) dates_to_delete = [] for k in service_period.date_exceptions: if ((k < start) or (k > end)): dates_to_delete.append(k) for k in dates_to_delete: del service_period.date_exceptions[k] year = int(cutoff[:4]) month = int(cutoff[4:6]) day = int(cutoff[6:8]) cutoff_date = datetime.date(year, month, day) one_day_delta = datetime.timedelta(days=1) before = (cutoff_date - one_day_delta).strftime('%Y%m%d') for a in self.feed_merger.a_schedule.GetServicePeriodList(): TruncatePeriod(a, 0, before) for b in self.feed_merger.b_schedule.GetServicePeriodList(): TruncatePeriod(b, cutoff, ('9' * 8))
Forces the old and new calendars to be disjoint about a cutoff date. This truncates the service periods of the old schedule so that service stops one day before the given cutoff date and truncates the new schedule so that service only begins on the cutoff date. Args: cutoff: The cutoff date as a string in YYYYMMDD format. The timezone is the same as used in the calendar.txt file.
codesearchnet
def present_weather_codes(self, value=None): if (value is not None): try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int for field `present_weather_codes`'.format(value)) self._present_weather_codes = value
Corresponds to IDD Field `present_weather_codes` Args: value (int): value for IDD Field `present_weather_codes` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def format_terminal_row(headers, example_row): def format_column(col): if isinstance(col, str): return '{{:{w}.{w}}}' return '{{:<{w}}}' widths = [max(len(h), len(str(d))) for h, d in zip(headers, example_row)] original_last_width = widths[-1] if sys.stdout.isatty(): widths[-1] = max( len(headers[-1]), tty.width() - sum(w + 2 for w in widths[0:-1]) - 3) cols = [format_column(c).format(w=w) for c, w in zip(example_row, widths)] format_string = ' '.join(cols) if original_last_width > widths[-1]: format_string += '...' return format_string
Uses headers and a row of example data to generate a format string for printing a single row of data. Args: headers (tuple of strings): The headers for each column of data example_row (tuple): A representative tuple of strings or ints Returns string: A format string with a size for each column
juraj-google-style
def _begin_connection_action(self, action): connection_id = action.data['connection_id'] internal_id = action.data['internal_id'] callback = action.data['callback'] if self._get_connection_state(connection_id) != self.Disconnected: callback(connection_id, self.id, False, 'Connection ID is already in use for another connection') return if self._get_connection_state(internal_id) != self.Disconnected: callback(connection_id, self.id, False, 'Internal ID is already in use for another connection') return conn_data = { 'state': self.Connecting, 'microstate': None, 'connection_id': connection_id, 'internal_id': internal_id, 'action': action, 'context': action.data['context'] } self._connections[connection_id] = conn_data self._int_connections[internal_id] = conn_data
Begin a connection attempt Args: action (ConnectionAction): the action object describing what we are connecting to
juraj-google-style
def make(target='all', dir='.', **kwargs): if (not fs.isfile(fs.path(dir, 'Makefile'))): raise NoMakefileError("No makefile in '{}'".format(fs.abspath(dir))) fs.cd(dir) if ('timeout' not in kwargs): kwargs['timeout'] = 300 (ret, out, err) = system.run(['make', target], **kwargs) fs.cdpop() if (ret > 0): if re.search(_BAD_TARGET_RE, err): raise NoTargetError("No rule for target '{}'".format(target)) else: raise MakeError("Target '{}' failed".format(target)) raise MakeError('Failed') return (ret, out, err)
Run make. Arguments: target (str, optional): Name of the target to build. Defaults to "all". dir (str, optional): Path to directory containing Makefile. **kwargs (optional): Any additional arguments to be passed to system.run(). Returns: (int, str, str): The first element is the return code of the make command. The second and third elements are the stdout and stderr of the process. Raises: NoMakefileError: In case a Makefile is not found in the target directory. NoTargetError: In case the Makefile does not support the requested target. MakeError: In case the target rule fails.
codesearchnet
def __init__(self, callback, *args, interval=5): self.interval = interval self.cb_args = args self.callback = callback self._wake_up_time = time.time() + 1 self._kill_event = threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True self._thread.start()
Initialize the flowcontrol object We start the timer thread here Args: - dfk (DataFlowKernel) : DFK object to track parsl progress KWargs: - threshold (int) : Tasks after which the callback is triggered - interval (int) : seconds after which timer expires
juraj-google-style
def least_squares_effective_mass(cartesian_k_points, eigenvalues): if (not points_are_in_a_straight_line(cartesian_k_points)): raise ValueError('k-points are not collinear') dk = (cartesian_k_points - cartesian_k_points[0]) mod_dk = np.linalg.norm(dk, axis=1) delta_e = (eigenvalues - eigenvalues[0]) effective_mass = (1.0 / ((np.polyfit(mod_dk, eigenvalues, 2)[0] * ev_to_hartree) * 2.0)) return effective_mass
Calculate the effective mass using a least squares quadratic fit. Args: cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit. Returns: (float): The fitted effective mass Notes: If the k-points do not sit on a straight line a ValueError will be raised.
codesearchnet
def _broadcast_dynamic_shape_one_layer(a, b): a_0 = a[0] b_0 = b[0] def broadcast_from_a(): a_layer = array_ops.zeros(b_0, dtype=b_0.dtype) b_layer = math_ops.range(b_0) target = b return [a_layer, b_layer, target] a_static = tensor_util.constant_value(a) if a_static is not None and a_static[0] == 1: [a_gi, b_gi, target] = broadcast_from_a() a_layer = _LayerBroadcaster.from_gather_index(a_gi) b_layer = _LayerBroadcaster.from_gather_index(b_gi) return [a_layer, b_layer, target] def broadcast_from_b(): a_layer = math_ops.range(a_0) b_layer = array_ops.zeros(a_0, dtype=a_0.dtype) target = a return [a_layer, b_layer, target] b_static = tensor_util.constant_value(b) if b_static is not None and b_static[0] == 1: [a_gi, b_gi, target] = broadcast_from_b() a_layer = _LayerBroadcaster.from_gather_index(a_gi) b_layer = _LayerBroadcaster.from_gather_index(b_gi) return [a_layer, b_layer, target] def broadcast_noop(): a_layer = math_ops.range(a_0) b_layer = math_ops.range(b_0) target = b return [a_layer, b_layer, target] can_broadcast_from_a = math_ops.equal(a_0, 1) can_broadcast_from_b = math_ops.equal(b_0, 1) def broadcast_not_from_a(): return cond.cond(can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop) nrows_equal = math_ops.equal(a_0, b_0) can_broadcast = math_ops.logical_or(can_broadcast_from_a, math_ops.logical_or(can_broadcast_from_b, nrows_equal)) check_can_broadcast = check_ops.assert_equal(can_broadcast, True, message='Cannot broadcast') results = cond.cond(can_broadcast_from_a, true_fn=broadcast_from_a, false_fn=broadcast_not_from_a) results = [control_flow_ops.with_dependencies([check_can_broadcast], x) for x in results] [a_gi, b_gi, target] = results a_layer = _LayerBroadcaster.from_gather_index(a_gi) b_layer = _LayerBroadcaster.from_gather_index(b_gi) return [a_layer, b_layer, target]
Broadcast two vectors, given their shapes. Args: a: the number of rows in a. b: the number of rows in b. Returns: (layer_a, layer_b, target_shape) layer_a is a _LayerBroadcaster from a to the target_shape. layer_b is a _LayerBroadcaster from b to the target_shape. target_shape is the target_shape Raises: InvalidArgumentError if the shapes are not consistent.
github-repos
def exchange(self, pubkey): try: return self.priv.exchange(c_ec.ECDH(), pubkey.publ) except ValueError as e: raise s_exc.BadEccExchange(mesg=str(e))
Perform a ECDH key exchange with a public key. Args: pubkey (PubKey): A PubKey to perform the ECDH with. Returns: bytes: The ECDH bytes. This is deterministic for a given pubkey and private key.
codesearchnet
def __init__(self, learning_rate, global_step, initial_gradient_squared_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='AdagradDA'): if initial_gradient_squared_accumulator_value <= 0.0: raise ValueError('initial_gradient_squared_accumulator_value must be positive: %s' % initial_gradient_squared_accumulator_value) super(AdagradDAOptimizer, self).__init__(use_locking, name) self._learning_rate = learning_rate self._initial_gradient_squared_accumulator_value = initial_gradient_squared_accumulator_value self._learning_rate_tensor = None self._l1_regularization_strength = l1_regularization_strength self._l2_regularization_strength = l2_regularization_strength self._global_step = global_step self._global_step_on_worker = None
Construct a new AdagradDA optimizer. Args: learning_rate: A `Tensor` or a floating point value. The learning rate. global_step: A `Tensor` containing the current training step number. initial_gradient_squared_accumulator_value: A floating point value. Starting value for the accumulators, must be positive. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. use_locking: If `True` use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to "AdagradDA". Raises: ValueError: If the `initial_gradient_squared_accumulator_value` is invalid.
github-repos
def send_cmd(cmd, args, ret): from dvc.daemon import daemon if not Analytics._is_enabled(cmd): return analytics = Analytics() analytics.collect_cmd(args, ret) daemon(["analytics", analytics.dump()])
Collect and send analytics for CLI command. Args: args (list): parsed args for the CLI command. ret (int): return value of the CLI command.
juraj-google-style
def get_signature(self, base_commit=None): if (base_commit is None): base_commit = 'HEAD' self.run('add', '-A', self.path) sha = self.run('rev-parse', '--verify', base_commit).strip() diff = self.run('diff', sha).strip() if (len(diff) == 0): try: return self.get_signature((base_commit + '~1')) except CommandError: pass h = hashlib.sha1() h.update(sha) h.update(diff) return h.hexdigest()
Get the signature of the current state of the repository TODO right now `get_signature` is an effectful process in that it adds all untracked file to staging. This is the only way to get accruate diff on new files. This is ok because we only use it on a disposable copy of the repo. Args: base_commit - the base commit ('HEAD', sha, etc.) Returns: str
codesearchnet
def _execute_primitives(self, commands): for p in commands: if self._scanchain and self._scanchain._debug: print(" Executing", p) p.execute(self)
Run a list of executable primitives on this controller, and distribute the returned data to the associated TDOPromises. Args: commands: A list of Executable Primitives to be run in order.
juraj-google-style
def _multiplex(self, target_gate, list_of_angles): list_len = len(list_of_angles) local_num_qubits = (int(math.log2(list_len)) + 1) q = QuantumRegister(local_num_qubits) circuit = QuantumCircuit(q, name=('multiplex' + local_num_qubits.__str__())) lsb = q[0] msb = q[(local_num_qubits - 1)] if (local_num_qubits == 1): circuit.append(target_gate(list_of_angles[0]), [q[0]]) return circuit angle_weight = scipy.kron([[0.5, 0.5], [0.5, (- 0.5)]], np.identity((2 ** (local_num_qubits - 2)))) list_of_angles = angle_weight.dot(np.array(list_of_angles)).tolist() multiplex_1 = self._multiplex(target_gate, list_of_angles[0:(list_len circuit.append(multiplex_1.to_instruction(), q[0:(- 1)]) circuit.append(CnotGate(), [msb, lsb]) multiplex_2 = self._multiplex(target_gate, list_of_angles[(list_len if (list_len > 1): circuit.append(multiplex_2.to_instruction().mirror(), q[0:(- 1)]) else: circuit.append(multiplex_2.to_instruction(), q[0:(- 1)]) circuit.append(CnotGate(), [msb, lsb]) return circuit
Return a recursive implementation of a multiplexor circuit, where each instruction itself has a decomposition based on smaller multiplexors. The LSB is the multiplexor "data" and the other bits are multiplexor "select". Args: target_gate (Gate): Ry or Rz gate to apply to target qubit, multiplexed over all other "select" qubits list_of_angles (list[float]): list of rotation angles to apply Ry and Rz Returns: DAGCircuit: the circuit implementing the multiplexor's action
codesearchnet
def dtype_checker_df(df, dtype, return_=None): dtype_range = dtype_ranges[dtype] df_out_of_range = (((df < dtype_range[0]) | (df > dtype_range[1])) | (~ np.isfinite(df))) if df_out_of_range.any().any(): if (return_ == 'colsums'): df_out_of_range = df_out_of_range.apply(sum, axis=0) elif (return_ == 'rowsums'): df_out_of_range = df_out_of_range.apply(sum, axis=1) elif (return_ == 'all'): df_out_of_range = df_out_of_range else: df_out_of_range = 1 else: df_out_of_range = 0 return df_out_of_range
Check if there are NaN values of values outside of a given datatype range. Arguments: df {dataframe} -- A dataframe. dtype {str} -- The datatype to check for. Keyword Arguments: return_ {str} -- Returns a boolean dataframe with the values not in the range of the dtype ('all'), the row ('rowsums') or column ('colsums') sums of that dataframe or an exit code 1 (None, default) if any of the values is not in the range. Returns: [int or DataFrame or Series] -- If no value is out of the range exit code 0 is returned, else depends on return_.
codesearchnet
def cancel_signature_request(self, signature_request_id): request = self._get_request() request.post(url=self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id, get_json=False)
Cancels a SignatureRequest Cancels a SignatureRequest. After canceling, no one will be able to sign or access the SignatureRequest or its documents. Only the requester can cancel and only before everyone has signed. Args: signing_request_id (str): The id of the signature request to cancel Returns: None
juraj-google-style
def _render_timestep(self, t: int, s: Fluents, a: Fluents, f: Fluents, r: np.float32) -> None: print('============================') print('TIME = {}'.format(t)) print('============================') fluent_variables = self._compiler.rddl.action_fluent_variables self._render_fluent_timestep('action', a, fluent_variables) fluent_variables = self._compiler.rddl.interm_fluent_variables self._render_fluent_timestep('interms', f, fluent_variables) fluent_variables = self._compiler.rddl.state_fluent_variables self._render_fluent_timestep('states', s, fluent_variables) self._render_reward(r)
Prints fluents and rewards for the given timestep `t`. Args: t (int): timestep s (Sequence[Tuple[str], np.array]: State fluents. a (Sequence[Tuple[str], np.array]: Action fluents. f (Sequence[Tuple[str], np.array]: Interm state fluents. r (np.float32): Reward.
codesearchnet
def score_prediction_adapter(keyed_prediction: tuple[KeyT, PredictionResult]) -> tuple[KeyT, AnomalyPrediction]: key, prediction = keyed_prediction score = prediction.inference assert isinstance(score, SupportsFloat) return (key, AnomalyPrediction(score=float(score)))
Extracts a float score from `PredictionResult.inference` and wraps it. Takes a keyed `PredictionResult` from common ModelHandler output, assumes its `inference` attribute is a float-convertible score, and returns the key paired with an `AnomalyPrediction` containing that float score. Args: keyed_prediction: tuple of `(key, PredictionResult)`. `PredictionResult` must have an `inference` attribute supporting float conversion. Returns: tuple of `(key, AnomalyPrediction)` with the extracted score. Raises: AssertionError: If `PredictionResult.inference` doesn't support float().
github-repos
def optional(self, value = None): if value is None: return this._optional else: this._optional = value and True or False
Optional Getter/Setter method for optional flag Args: value (bool): If set, the method is a setter Returns: bool | None
juraj-google-style
def unload(self, keepables=None): to_del = [ds_id for ds_id, projectable in self.datasets.items() if ds_id not in self.wishlist and (not keepables or ds_id not in keepables)] for ds_id in to_del: LOG.debug("Unloading dataset: %r", ds_id) del self.datasets[ds_id]
Unload all unneeded datasets. Datasets are considered unneeded if they weren't directly requested or added to the Scene by the user or they are no longer needed to generate composites that have yet to be generated. Args: keepables (iterable): DatasetIDs to keep whether they are needed or not.
juraj-google-style
def __getitem__(self, index): rank = self.rank if isinstance(index, slice): if index.step is not None and index.step != 1: raise IndexError('Cannot stride through a shape') start = index.start stop = index.stop if start is None: start = 0 start = _fix_start_index(start, rank, self.num_row_partitions) stop = _fix_stop_index(stop, rank) return self._slice_shape(start, stop) elif isinstance(index, int): if index < 0: if rank is None: raise ValueError('Rank must be known to use __getitem__ with a negative index.') return self._dimension(rank + index) return self._dimension(index) else: raise TypeError('Argument is not an int or a slice')
Returns a dimension or a slice of the shape. Ragged shapes can have ragged dimensions that depend upon other dimensions. Therefore, if you ask for a dimension that is ragged, this function returns a ValueError. For similar reasons, if a slice is selected that includes a ragged dimension without including the zero dimension, then this fails. Any slice that does not start at zero will return a shape with num_row_partitions == 0. Args: index: the index: can be an int or a slice. Raises: IndexError: if the index is not in range. ValueError: if the rank is unknown, or a ragged rank is requested incorrectly.
github-repos
def get_access_token_from_cli(): if (('ACC_CLOUD' in os.environ) and ('MSI_ENDPOINT' in os.environ)): endpoint = os.environ['MSI_ENDPOINT'] headers = {'Metadata': 'true'} body = {'resource': 'https: ret = requests.post(endpoint, headers=headers, data=body) return ret.json()['access_token'] else: home = os.path.expanduser('~') sub_username = '' azure_profile_path = ((((home + os.sep) + '.azure') + os.sep) + 'azureProfile.json') if (os.path.isfile(azure_profile_path) is False): print(('Error from get_access_token_from_cli(): Cannot find ' + azure_profile_path)) return None with codecs.open(azure_profile_path, 'r', 'utf-8-sig') as azure_profile_fd: subs = json.load(azure_profile_fd) for sub in subs['subscriptions']: if (sub['isDefault'] == True): sub_username = sub['user']['name'] if (sub_username == ''): print(('Error from get_access_token_from_cli(): Default subscription not found in ' + azure_profile_path)) return None access_keys_path = ((((home + os.sep) + '.azure') + os.sep) + 'accessTokens.json') if (os.path.isfile(access_keys_path) is False): print(('Error from get_access_token_from_cli(): Cannot find ' + access_keys_path)) return None with open(access_keys_path, 'r') as access_keys_fd: keys = json.load(access_keys_fd) for key in keys: if (key['userId'] == sub_username): if ('accessToken' not in keys[0]): print(('Error from get_access_token_from_cli(): accessToken not found in ' + access_keys_path)) return None if ('tokenType' not in keys[0]): print(('Error from get_access_token_from_cli(): tokenType not found in ' + access_keys_path)) return None if ('expiresOn' not in keys[0]): print(('Error from get_access_token_from_cli(): expiresOn not found in ' + access_keys_path)) return None expiry_date_str = key['expiresOn'] if ('T' in expiry_date_str): exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%dT%H:%M:%S.%fZ') else: exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%d %H:%M:%S.%f') if (exp_date < dt.now()): continue else: return key['accessToken'] print("Error from get_access_token_from_cli(): token expired. Run 'az login'") return None
Get an Azure authentication token from CLI's cache. Will only work if CLI local cache has an unexpired auth token (i.e. you ran 'az login' recently), or if you are running in Azure Cloud Shell (aka cloud console) Returns: An Azure authentication token string.
codesearchnet
def add_argument(self, parser, bootstrap=False): if self.cli_expose: for child in self.children.values(): child.add_argument(parser, bootstrap)
Add dict-style item as an argument to the given parser. The dict item will take all the nested items in the dictionary and namespace them with the dict name, adding each child item as their own CLI argument. Examples: A non-nested dict item with the name 'db' and children named 'port' and 'host' will result in the following being valid CLI args: ['--db-host', 'localhost', '--db-port', '1234'] Args: parser (argparse.ArgumentParser): The parser to add this item to. bootstrap (bool): Flag to indicate whether you only want to mark this item as required or not.
codesearchnet
def _pypi_push(dist): for filename in os.listdir(dist): full_path = os.path.join(dist, filename) if os.path.isfile(full_path): _shell('twine register ' + shlex.quote(full_path), check=False) _shell('twine upload ' + shlex.quote(dist + '/*'))
Push created package to PyPI. Requires the following defined environment variables: - TWINE_USERNAME: The PyPI username to upload this package under - TWINE_PASSWORD: The password to the user's account Args: dist (str): The distribution to push. Must be a valid directory; shell globs are NOT allowed.
juraj-google-style
def list_registered_stateful_ops_without_inputs(): return set([name for (name, op) in op_def_registry.get_registered_ops().items() if (op.is_stateful and (not op.input_arg))])
Returns set of registered stateful ops that do not expect inputs. This list is used to identify the ops to be included in the state-graph and that are subsequently fed into the apply-graphs. Returns: A set of strings.
codesearchnet
def test_src_dir_path(relative_path): return _googletest.test_src_dir_path(relative_path)
Creates an absolute test srcdir path given a relative path. Args: relative_path: a path relative to tensorflow root. e.g. "core/platform". Returns: An absolute path to the linked in runfiles.
github-repos
def _get_parameter_info(param_name, documented_params, source_args_dict, param_type, optional): description = None shape = None shape_string = '' is_documented = True additional_info = None if param_name in documented_params: if param_type == '' and documented_params[param_name].get('type', None) is not None: param_type = documented_params[param_name]['type'] optional = documented_params[param_name]['optional'] shape = documented_params[param_name]['shape'] shape_string = shape if shape else '' additional_info = documented_params[param_name]['additional_info'] or '' description = f'{documented_params[param_name]['description']}\n' elif param_name in source_args_dict: shape = source_args_dict[param_name]['shape'] shape_string = ' ' + shape if shape else '' description = source_args_dict[param_name]['description'] additional_info = None else: is_documented = False optional_string = ', *optional*' if optional else '' return (param_type, optional_string, shape_string, additional_info, description, is_documented)
Get parameter documentation details from the appropriate source. Tensor shape, optional status and description are taken from the custom docstring in priority if available. Type is taken from the function signature first, then from the custom docstring if missing from the signature Args: param_name (`str`): Name of the parameter documented_params (`dict`): Dictionary of documented parameters (manually specified in the docstring) source_args_dict (`dict`): Default source args dictionary to use if not in documented_params param_type (`str`): Current parameter type (may be updated) optional (`bool`): Whether the parameter is optional (may be updated)
github-repos
def _padding_value_to_tensor(value, output_type): value = ops.convert_to_tensor(value, name='padding_value') if not value.shape.is_compatible_with(tensor_shape.TensorShape([])): raise ValueError(f'Invalid `padding_values`. `padding_values` values should be scalars, but got {value.shape}.') if value.dtype != output_type: raise TypeError(f'Invalid `padding_values`. `padding_values` values type {value.dtype} does not match type {output_type} of the corresponding input component.') return value
Converts the padding value to a tensor. Args: value: The padding value. output_type: Its expected dtype. Returns: A scalar `Tensor`. Raises: ValueError: if the padding value is not a scalar. TypeError: if the padding value's type does not match `output_type`.
github-repos
def assert_text(self, *args, **kwargs): query = TextQuery(*args, **kwargs) @self.synchronize(wait=query.wait) def assert_text(): count = query.resolve_for(self) if not (matches_count(count, query.options) and (count > 0 or expects_none(query.options))): raise ExpectationNotMet(query.failure_message) return True return assert_text()
Asserts that the page or current node has the given text content, ignoring any HTML tags. Args: *args: Variable length argument list for :class:`TextQuery`. **kwargs: Arbitrary keyword arguments for :class:`TextQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
juraj-google-style
def __init__(self, inputs=[], outputs=[], attributes=[], scripts=[]): super(Transaction, self).__init__() self.inputs = inputs self.outputs = outputs self.Attributes = attributes self.scripts = scripts self.InventoryType = 0x01 self.__references = None
Create an instance. Args: inputs (list): of neo.Core.CoinReference.CoinReference. outputs (list): of neo.Core.TX.Transaction.TransactionOutput items. attributes (list): of neo.Core.TX.TransactionAttribute. scripts:
juraj-google-style
def equal_distribution_folds(y, folds=2): n, classes = y.shape dist = y.sum(axis=0).astype('float') dist /= dist.sum() index_list = [] fold_dist = np.zeros((folds, classes), dtype='float') for _ in range(folds): index_list.append([]) for i in range(n): if i < folds: target_fold = i else: normed_folds = fold_dist.T / fold_dist.sum(axis=1) how_off = normed_folds.T - dist target_fold = np.argmin( np.dot((y[i] - .5).reshape(1, -1), how_off.T)) fold_dist[target_fold] += y[i] index_list[target_fold].append(i) logger.debug("Fold distributions:") logger.debug(fold_dist) return index_list
Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions.
juraj-google-style
def __init__(self,consumer_key,consumer_secret,access_token=None): self.consumer = oauth.Consumer(consumer_key, consumer_secret) if access_token: self.setAccessToken(access_token)
Initializes the splitwise class. Sets consumer and access token Args: consumer_key (str) : Consumer Key provided by Spliwise consumer_secret (str): Consumer Secret provided by Splitwise access_token (:obj: `dict`) Access Token is a combination of oauth_token and oauth_token_secret Returns: A Splitwise Object
juraj-google-style
def UpdateNumberOfEventTags( self, number_of_consumed_event_tags, number_of_produced_event_tags): consumed_event_tags_delta = 0 if number_of_consumed_event_tags is not None: if number_of_consumed_event_tags < self.number_of_consumed_event_tags: raise ValueError( 'Number of consumed event tags smaller than previous update.') consumed_event_tags_delta = ( number_of_consumed_event_tags - self.number_of_consumed_event_tags) self.number_of_consumed_event_tags = number_of_consumed_event_tags self.number_of_consumed_event_tags_delta = consumed_event_tags_delta produced_event_tags_delta = 0 if number_of_produced_event_tags is not None: if number_of_produced_event_tags < self.number_of_produced_event_tags: raise ValueError( 'Number of produced event tags smaller than previous update.') produced_event_tags_delta = ( number_of_produced_event_tags - self.number_of_produced_event_tags) self.number_of_produced_event_tags = number_of_produced_event_tags self.number_of_produced_event_tags_delta = produced_event_tags_delta return consumed_event_tags_delta > 0 or produced_event_tags_delta > 0
Updates the number of event tags. Args: number_of_consumed_event_tags (int): total number of event tags consumed by the process. number_of_produced_event_tags (int): total number of event tags produced by the process. Returns: bool: True if either number of event tags has increased. Raises: ValueError: if the consumed or produced number of event tags is smaller than the value of the previous update.
juraj-google-style
def get_plot_frame(map_obj, key_map, cached=False): if map_obj.kdims and len(map_obj.kdims) == 1 and map_obj.kdims[0] == 'Frame': return map_obj.last key = tuple(key_map[kd.name] for kd in map_obj.kdims if kd.name in key_map) if key in map_obj.data and cached: return map_obj.data[key] else: try: return map_obj[key] except KeyError: return None except StopIteration as e: raise e except Exception: print(traceback.format_exc()) return None
Returns the current frame in a mapping given a key mapping. Args: obj: Nested Dimensioned object key_map: Dictionary mapping between dimensions and key value cached: Whether to allow looking up key in cache Returns: The item in the mapping corresponding to the supplied key.
juraj-google-style
def get(self, node_id): return self.prepare_model(self.client.api.inspect_node(node_id))
Get a node. Args: node_id (string): ID of the node to be inspected. Returns: A :py:class:`Node` object. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def create_template(self, s, provider_name=None): if provider_name is None: provider_name = self.supported_providers[0] return template_exception_handler( lambda: self.get_provider(provider_name).create_template(s), self.error_context )
Creates a template from the given string based on the specified provider or the provider with highest precedence. Args: s: The string to convert to a template. provider_name: The name of the provider to use to create the template.
juraj-google-style
def create_file(self, filename): self.response.write(('Creating file %s\n' % filename)) write_retry_params = gcs.RetryParams(backoff_factor=1.1) gcs_file = gcs.open(filename, 'w', content_type='text/plain', options={'x-goog-meta-foo': 'foo', 'x-goog-meta-bar': 'bar'}, retry_params=write_retry_params) gcs_file.write('abcde\n') gcs_file.write(((('f' * 1024) * 4) + '\n')) gcs_file.close() self.tmp_filenames_to_clean_up.append(filename)
Create a file. The retry_params specified in the open call will override the default retry params for this particular file handle. Args: filename: filename.
codesearchnet
def diff_lineMode(self, text1, text2, deadline): (text1, text2, linearray) = self.diff_linesToChars(text1, text2) diffs = self.diff_main(text1, text2, False, deadline) self.diff_charsToLines(diffs, linearray) self.diff_cleanupSemantic(diffs) diffs.append((self.DIFF_EQUAL, '')) pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while (pointer < len(diffs)): if (diffs[pointer][0] == self.DIFF_INSERT): count_insert += 1 text_insert += diffs[pointer][1] elif (diffs[pointer][0] == self.DIFF_DELETE): count_delete += 1 text_delete += diffs[pointer][1] elif (diffs[pointer][0] == self.DIFF_EQUAL): if ((count_delete >= 1) and (count_insert >= 1)): subDiff = self.diff_main(text_delete, text_insert, False, deadline) diffs[((pointer - count_delete) - count_insert):pointer] = subDiff pointer = (((pointer - count_delete) - count_insert) + len(subDiff)) count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' pointer += 1 diffs.pop() return diffs
Do a quick line-level diff on both strings, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time when the diff should be complete by. Returns: Array of changes.
codesearchnet
def _transform_filter_to_sql(filter_block, node, context): expression = filter_block.predicate return _expression_to_sql(expression, node, context)
Transform a Filter block to its corresponding SQLAlchemy expression. Args: filter_block: Filter, the Filter block to transform. node: SqlNode, the node Filter block applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression equivalent to the Filter.predicate expression.
juraj-google-style
def __init__(self, scope, parent): CodeStatement.__init__(self, scope, parent) self.variables = []
Constructor for declaration statements. Args: scope (CodeEntity): The program scope where this object belongs. parent (CodeEntity): This object's parent in the program tree.
juraj-google-style
def _AddForwardedIps(self, forwarded_ips, interface): for address in forwarded_ips: self.ip_forwarding_utils.AddForwardedIp(address, interface)
Configure the forwarded IP address on the network interface. Args: forwarded_ips: list, the forwarded IP address strings to configure. interface: string, the output device to use.
codesearchnet
def convert_error(exc_src, exc_dest): def wrap(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except exc_dest: raise except exc_src as err: reraise(exc_dest, err, sys.exc_info()[2]) return wrapper return wrap
A decorator for reraising exceptions with a different type. Mostly useful for IOError. Args: exc_src (type): The source exception type exc_dest (type): The target exception type.
codesearchnet
def _on_write_request(self, request): if request['connection_handle'] != self._connection_handle: return False attribute_handle = request['attribute_handle'] config_handles = [ ReceiveHeaderChar.config_handle, ReceivePayloadChar.config_handle, StreamingChar.config_handle, TracingChar.config_handle ] if attribute_handle in config_handles: notification_enabled, _ = struct.unpack('<BB', request['value']) if attribute_handle in [ReceiveHeaderChar.config_handle, ReceivePayloadChar.config_handle] and notification_enabled: if attribute_handle == ReceiveHeaderChar.config_handle: self.header_notif = True elif attribute_handle == ReceivePayloadChar.config_handle: self.payload_notif = True if self.header_notif and self.payload_notif: self.device.open_rpc_interface() self._audit("RPCInterfaceOpened") elif attribute_handle == StreamingChar.config_handle: if notification_enabled and not self.streaming: self.streaming = True reports = self.device.open_streaming_interface() if reports is not None: self._queue_reports(*reports) self._audit('StreamingInterfaceOpened') elif not notification_enabled and self.streaming: self.streaming = False self.device.close_streaming_interface() self._audit('StreamingInterfaceClosed') elif attribute_handle == TracingChar.config_handle: if notification_enabled and not self.tracing: self.tracing = True traces = self.device.open_tracing_interface() if traces is not None: self._queue_traces(*traces) self._audit('TracingInterfaceOpened') elif not notification_enabled and self.tracing: self.tracing = False self.device.close_tracing_interface() self._audit('TracingInterfaceClosed') return True elif attribute_handle in [SendHeaderChar.value_handle, SendPayloadChar.value_handle]: if attribute_handle == SendPayloadChar.value_handle: self.rpc_payload = bytearray(request['value']) if len(self.rpc_payload) < 20: self.rpc_payload += bytearray(20 - len(self.rpc_payload)) elif attribute_handle == SendHeaderChar.value_handle: self._defer(self._call_rpc, [bytearray(request['value'])]) return True else: return False
Callback function called when a write request has been received. It is executed in the baBLE working thread: should not be blocking. Args: request (dict): Information about the request - connection_handle (int): The connection handle that sent the request - attribute_handle (int): The attribute handle to write - value (bytes): The value to write
juraj-google-style
def get_servo_status(self): data = [] data.append(9) data.append(self.servoid) data.append(RAM_READ_REQ) data.append(STATUS_ERROR_RAM) data.append(BYTE1) send_data(data) rxdata = [] try: rxdata = SERPORT.read(12) return (ord(rxdata[9]) & 255) except: raise HerkulexError('could not communicate with motors')
Get the error status of servo This function gets the error status (if any) of the servo Args: none Returns: int: an integer corresponding to the servo status * refer datasheet
codesearchnet
def LessThan(self, value): self._awql = self._CreateSingleValueCondition(value, '<') return self._query_builder
Sets the type of the WHERE clause as "less than". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
juraj-google-style
def get_event_consumer(config, success_channel, error_channel, metrics, **kwargs): builder = event_consumer.GPSEventConsumerBuilder(config, success_channel, error_channel, metrics, **kwargs) return builder.build_event_consumer()
Get a GPSEventConsumer client. A factory function that validates configuration, creates schema validator and parser clients, creates an auth and a pubsub client, and returns an event consumer (:interface:`gordon.interfaces. IRunnable` and :interface:`gordon.interfaces.IMessageHandler`) provider. Args: config (dict): Google Cloud Pub/Sub-related configuration. success_channel (asyncio.Queue): Queue to place a successfully consumed message to be further handled by the ``gordon`` core system. error_channel (asyncio.Queue): Queue to place a message met with errors to be further handled by the ``gordon`` core system. metrics (obj): :interface:`IMetricRelay` implementation. kwargs (dict): Additional keyword arguments to pass to the event consumer. Returns: A :class:`GPSEventConsumer` instance.
codesearchnet
def _ConvertListToObject(cls, json_list): list_value = [] for json_list_element in json_list: if isinstance(json_list_element, dict): list_value.append(cls._ConvertDictToObject(json_list_element)) elif isinstance(json_list_element, list): list_value.append(cls._ConvertListToObject(json_list_element)) else: list_value.append(json_list_element) return list_value
Converts a JSON list into an object. Args: json_list (list[object]): JSON serialized objects. Returns: list[object]: a deserialized list.
codesearchnet
def get_choices_for(self, field): choices = self._fields[field].choices if isinstance(choices, six.string_types): return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)] else: return choices
Get the choices for the given fields. Args: field (str): Name of field. Returns: List of tuples. [(name, value),...]
codesearchnet
def dump_size_bytes(self): return self._dump_size_bytes
Size of the dump file. Unit: byte. Returns: If the dump file exists, size of the dump file, in bytes. If the dump file does not exist, None.
github-repos
def load_maps(maps_dir): maps_dir = os.path.abspath(maps_dir) maps = {} for root, dirnames, filenames in os.walk(maps_dir): for filename in filenames: if filename.endswith(".xml"): xml_file = os.path.join(root, filename) map = MapSource.from_xml(xml_file, maps_dir) if map.id in maps: raise MapSourceException("duplicate map id: {} in file {}".format(map.id, xml_file)) else: maps[map.id] = map return maps
Load all xml map sources from a given directory. Args: maps_dir: path to directory to search for maps Returns: dict of MapSource:
juraj-google-style
def _get_authenticated_session(self): session = requests.Session() session.auth = self.auth return session
Return an authenticated requests session. Returns: requests.Session: Authenticated session for use.
codesearchnet
def FindMessageTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._descriptors: self.FindFileContainingSymbol(full_name) return self._descriptors[full_name]
Loads the named descriptor from the pool. Args: full_name: The full name of the descriptor to load. Returns: The descriptor for the named type.
juraj-google-style
def create_sns_topic(self, region): sns = self.session.client('sns', region_name=region) self.log.info('Creating SNS topic for {}/{}'.format(self.account, region)) res = sns.create_topic(Name=self.topic_name) arn = res['TopicArn'] tmpl = get_template('cloudtrail_sns_policy.json') policy = tmpl.render(region=region, account_id=self.account.account_number, topic_name=self.topic_name) sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue=policy) auditlog( event='cloudtrail.create_sns_topic', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return arn
Creates an SNS topic if needed. Returns the ARN if the created SNS topic Args: region (str): Region name Returns: `str`
juraj-google-style
def _start_app_and_connect(self): self._check_app_installed() self.disable_hidden_api_blacklist() persists_shell_cmd = self._get_persist_command() self.log.info('Launching snippet apk %s with protocol %d.%d', self.package, _PROTOCOL_MAJOR_VERSION, _PROTOCOL_MINOR_VERSION) cmd = _LAUNCH_CMD.format(shell_cmd=persists_shell_cmd, user=self._get_user_command_string(), snippet_package=self.package) start_time = time.perf_counter() self._proc = self._do_start_app(cmd) line = self._read_protocol_line() match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$', line) if not match or match.group(1) != '1': raise ProtocolVersionError(self._ad, line) line = self._read_protocol_line() match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line) if not match: raise ProtocolVersionError(self._ad, line) self.device_port = int(match.group(1)) self.host_port = utils.get_available_host_port() self._adb.forward(['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port]) self.connect() self.log.debug('Snippet %s started after %.1fs on host port %s', self.package, time.perf_counter() - start_time, self.host_port)
Starts snippet apk on the device and connects to it. After prechecks, this launches the snippet apk with an adb cmd in a standing subprocess, checks the cmd response from the apk for protocol version, then sets up the socket connection over adb port-forwarding. Args: ProtocolVersionError, if protocol info or port info cannot be retrieved from the snippet apk.
github-repos
def _as_document(self, identifier): return { 'identifier': u('{}').format(identifier['identifier']), 'type': u('{}').format(identifier['type']), 'name': u('{}').format(identifier['name']) }
Converts given identifier to the document indexed by FTS backend. Args: identifier (dict): identifier to convert. Dict contains at least 'identifier', 'type' and 'name' keys. Returns: dict with structure matches to BaseIdentifierIndex._schema.
juraj-google-style
def test(verbosity=1): import unittest from .tests import test_suite unittest.TextTestRunner(verbosity=verbosity).run(test_suite)
Executes all the tests for pyplink. Args: verbosity (int): The verbosity level for :py:mod:`unittest`. Just set ``verbosity`` to an integer higher than ``1`` to have more information about the tests.
juraj-google-style
def link(target, link_to): assert isinstance(target, str) assert os.path.exists(target) assert isinstance(link_to, str) abs_path = os.path.dirname(os.path.abspath(link_to)) if (not os.path.isdir(abs_path)): os.makedirs(abs_path) chmod(target) os.symlink(target, link_to)
Create a link to a target file or a folder. For simplicity sake, both target and link_to must be absolute path and must include the filename of the file or folder. Also do not include any trailing slash. e.g. link('/path/to/file', '/path/to/link') But not: link('/path/to/file', 'path/to/') or link('/path/to/folder/', '/path/to/link') Args: target (str): file or folder the link will point to link_to (str): Link to create
codesearchnet
def list(self, pattern='*'): if self._descriptors is None: self._descriptors = self._client.list_resource_descriptors( filter_string=self._filter_string) return [resource for resource in self._descriptors if fnmatch.fnmatch(resource.type, pattern)]
Returns a list of resource descriptors that match the filters. Args: pattern: An optional pattern to further filter the descriptors. This can include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``. Returns: A list of ResourceDescriptor objects that match the filters.
juraj-google-style
def __init__(self, workflow, generator, work): super(Barrier, self).__init__() self.workflow = workflow self.generator = generator if isinstance(work, (list, tuple)): self[:] = list(work) self.was_list = True self.wait_any = False elif isinstance(work, WaitAny): self[:] = list(work.items) self.was_list = True self.wait_any = True else: self[:] = [work] self.was_list = False self.wait_any = False for item in self: assert isinstance(item, WorkItem) item.parent = workflow
Initializer. Args: workflow: WorkflowItem instance this is for. generator: Current state of the WorkflowItem's generator. work: Next set of work to do. May be a single WorkItem object or a list or tuple that contains a set of WorkItems to run in parallel.
juraj-google-style
def _add_sub_parsers(self, top_level_parser, methods_to_parse, class_name): description = 'Accessible methods of {}'.format(class_name) sub_parsers = top_level_parser.add_subparsers(description=description, dest='method') parser_to_method = {} for (method_name, parser) in methods_to_parse.items(): parser_name = (parser.get_name() or method_name) if parser_name.startswith('_'): if (not self._parse_private): continue parser_name = parser_name.strip('_') parser_name = parser_name.replace('_', '-') parser_to_method[parser_name] = method_name sub_parsers.add_parser(parser_name, parents=[parser], add_help=False, description=parser.description) return parser_to_method
Add all the sub-parsers to the top_level_parser. Args: top_level_parser: the top level parser methods_to_parse: dict of method name pointing to their associated argument parser class_name: name of the decorated class Returns: a dict of registered name of the parser i.e. sub command name pointing to the method real name
codesearchnet
def save(self, force=False): from time import time from datetime import datetime savefreq = TaskDB.get_option('savefreq', 2, int) if (self.lastsave is not None): delta = (datetime.fromtimestamp(time()) - datetime.fromtimestamp(self.lastsave)) elapsed = int((delta.total_seconds() / 60)) else: elapsed = (savefreq + 1) if ((elapsed > savefreq) or force): if (not writeable): self.lastsave = time() msg.std('Skipping database write to disk by setting.', 2) return import json try: (entities, compkeys) = _json_clean(self.entities) jdb = {'entities': entities, 'compkeys': compkeys, 'uuids': self.uuids} with open(self.dbpath, 'w') as f: json.dump(jdb, f) except: from acorn.msg import err import sys raise err('{}: {}'.format(*sys.exc_info()[0:2])) self.lastsave = time()
Serializes the database file to disk. Args: force (bool): when True, the elapsed time since last save is ignored and the database is saved anyway (subject to global :data:`writeable` setting).
codesearchnet
class Activation(Layer): def __init__(self, activation, **kwargs): super(Activation, self).__init__(**kwargs) self.supports_masking = True self.activation = activations.get(activation) def call(self, inputs): return self.activation(inputs) def compute_output_shape(self, input_shape): return input_shape def get_config(self): config = {'activation': activations.serialize(self.activation)} base_config = super(Activation, self).get_config() return dict(list(base_config.items()) + list(config.items()))
Applies an activation function to an output. Args: activation: Activation function, such as `tf.nn.relu`, or string name of built-in activation function, such as "relu". Usage: >>> layer = tf.keras.layers.Activation('relu') >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 2.0] >>> layer = tf.keras.layers.Activation(tf.nn.relu) >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 2.0] Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the batch axis) when using this layer as the first layer in a model. Output shape: Same shape as input.
github-repos
def _make_request(self, url, method="get", data=None, extra_headers=None): attempts = 0 while attempts < 1: if not self._is_authenticated: self._authenticate() try: return self._send_request(url, method, data, extra_headers) except HTTPError as e: if e.response.status_code == 403: logger.info("Authenticated session against NetMRI timed out. Retrying.") self._is_authenticated = False attempts += 1 else: raise
Prepares the request, checks for authentication and retries in case of issues Args: url (str): URL of the request method (str): Any of "get", "post", "delete" data (any): Possible extra data to send with the request extra_headers (dict): Possible extra headers to send along in the request Returns: dict
juraj-google-style
def _secured_storage_parameters(self): parameters = (self._storage_parameters or dict()) if self._unsecure: parameters = parameters.copy() parameters['protocol'] = 'http' return parameters
Updates storage parameters with unsecure mode. Returns: dict: Updated storage_parameters.
codesearchnet