code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def scatter_add(self, sparse_delta, use_locking=False, name=None): raise NotImplementedError
Adds `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be added to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def _parse_datetime(self, value): offset = 0 pattern = r"\s+([+-]{1}\d+)\Z" matches = re.search(pattern, value) if matches: value = re.sub(pattern, '', value) offset = datetime.timedelta(hours=int(matches.group(1))/100) return datetime.datetime.strptime(value, "%Y/%m/%d %H:%M:%S") - offset
Parses a datetime string from "YYYY/MM/DD HH:MM:SS +HHMM" format Args: value (str): String Returns: datetime. Datetime
juraj-google-style
def _aggregate_grads(gradients): assert gradients, 'No gradients to aggregate' if len(gradients) == 1: return gradients[0] if all((isinstance(g, tensor_lib.Tensor) for g in gradients)): return gen_math_ops.add_n(gradients) else: assert all((isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) for g in gradients)) return backprop_util.AggregateIndexedSlicesGradients(gradients)
Aggregate gradients from multiple sources. Args: gradients: A list of 'Tensor' or 'IndexedSlices' gradients. Returns: If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'. Otherwise returns an aggregated 'IndexedSlices'.
github-repos
def _batched_mask_to_box_tf(masks: 'tf.Tensor'): if tf.size(masks) == 0: return tf.zeros([*masks.shape[:-2], 4]) shape = shape_list(masks) height, width = shape[-2:] in_height = tf.reduce_max(masks, axis=-1) in_height_coords = in_height * tf.range(height)[None, :] bottom_edges = tf.reduce_max(in_height_coords, axis=-1) in_height_coords = in_height_coords + height * ~in_height top_edges = tf.reduce_min(in_height_coords, axis=-1) in_width, _ = tf.reduce_max(masks, axis=-2) in_width_coords = in_width * tf.range(width)[None, :] right_edges, _ = tf.reduce_max(in_width_coords, axis=-1) in_width_coords = in_width_coords + width * ~in_width left_edges, _ = tf.reduce_min(in_width_coords, axis=-1) empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1) out = out * tf.expand_dims(~empty_filter, -1) out = tf.reshape(out, *shape[:-2], 4) return out
Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which corresponds the following required indices: - LEFT: left hand side of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BOTTOM: bottom of the bounding box Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape is channel_1 x channel_2 x ... x 4. Args: - masks (`tf.Tensor` of shape `(batch, nb_mask, height, width)`)
github-repos
def move(self, destination): self.relocate(destination) shutil.move(self.path, destination) self._path = destination
Reconfigure and move the virtual environment to another path. Args: destination (str): The target path of the virtual environment. Note: Unlike `relocate`, this method *will* move the virtual to the given path.
codesearchnet
def bessel_y0(x, name=None): with ops.name_scope(name, 'bessel_y0', [x]): return gen_special_math_ops.bessel_y0(x)
Computes the Bessel y0 function of `x` element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_y0([0.5, 1., 2., 4.]).numpy() array([-0.44451873, 0.08825696, 0.51037567, -0.01694074], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.y0 @end_compatibility
github-repos
def xcompile(source_code, args=0, optimize=True): code = crianza.compile(crianza.parse(source_code), optimize=optimize) return crianza.native.compile(code, args=args)
Parses Crianza source code and returns a native Python function. Args: args: The resulting function's number of input parameters. Returns: A callable Python function.
juraj-google-style
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, output_attentions: Optional[bool]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.attention_norm(hidden_states) hidden_states, attn_weights = self.attention(hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, **kwargs) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.ffn_norm(hidden_states) hidden_states = self.feed_forward(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(batch, seq_len, embed_dim)`. attention_mask (`torch.FloatTensor`): Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def run_restore_ops(self, session=None): raise AssertionError('No checkpoint specified, so no restore ops are available (save_path=None to Saver.restore).')
For consistency with `CheckpointLoadStatus`. Use `initialize_or_restore` for initializing if no checkpoint was passed to `Saver.restore` and restoring otherwise. Args: session: Not used.
github-repos
def collapse_short_branches(self, threshold): if not isinstance(threshold,float) and not isinstance(threshold,int): raise RuntimeError("threshold must be an integer or a float") elif threshold < 0: raise RuntimeError("threshold cannot be negative") q = deque(); q.append(self.root) while len(q) != 0: next = q.popleft() if next.edge_length is None or next.edge_length <= threshold: if next.is_root(): next.edge_length = None elif not next.is_leaf(): parent = next.parent; parent.remove_child(next) for c in next.children: parent.add_child(c) q.extend(next.children)
Collapse internal branches (not terminal branches) with length less than or equal to ``threshold``. A branch length of ``None`` is considered 0 Args: ``threshold`` (``float``): The threshold to use when collapsing branches
juraj-google-style
def _file_exists_in_gcs(gcs_file_path, credentials=None): gcs_service = _get_storage_service(credentials) (bucket_name, object_name) = gcs_file_path[len('gs: request = gcs_service.objects().get(bucket=bucket_name, object=object_name, projection='noAcl') try: request.execute() return True except errors.HttpError: return False
Check whether the file exists, in GCS. Args: gcs_file_path: The target file path; should have the 'gs://' prefix. credentials: Optional credential to be used to load the file from gcs. Returns: True if the file's there.
codesearchnet
def Deserialize(self, reader): super(Block, self).Deserialize(reader) self.Transactions = [] byt = reader.ReadVarInt() transaction_length = byt if transaction_length < 1: raise Exception('Invalid format') for i in range(0, transaction_length): tx = Transaction.DeserializeFrom(reader) self.Transactions.append(tx) if MerkleTree.ComputeRoot([tx.Hash for tx in self.Transactions]) != self.MerkleRoot: raise Exception("Merkle Root Mismatch")
Deserialize full object. Args: reader (neo.IO.BinaryReader):
juraj-google-style
def remove_room_alias(self, room_alias): try: self.api.remove_room_alias(room_alias) return True except MatrixRequestError: return False
Remove mapping of an alias Args: room_alias(str): The alias to be removed. Returns: bool: True if the alias is removed, False otherwise.
juraj-google-style
def validate(self, profile): ij = self.load_install_json(profile.get('install_json')) print('{}{}Profile: "{}".'.format(c.Style.BRIGHT, c.Fore.BLUE, profile.get('profile_name'))) for arg in self.profile_settings_args_install_json(ij, None): if (profile.get('args', {}).get('app', {}).get(arg) is None): print('{}{}Input "{}" not found.'.format(c.Style.BRIGHT, c.Fore.YELLOW, arg))
Check to see if any args are "missing" from profile. Validate all args from install.json are in the profile. This can be helpful to validate that any new args added to App are included in the profiles. .. Note:: This method does not work with layout.json Apps. Args: profile (dict): The current profile to validate.
codesearchnet
def create_alias(target_path, alias_path): if platform.system() == 'Windows' and not alias_path.endswith('.lnk'): alias_path += '.lnk' if os.path.lexists(alias_path): os.remove(alias_path) if platform.system() == 'Windows': from win32com import client shell = client.Dispatch('WScript.Shell') shortcut = shell.CreateShortCut(alias_path) shortcut.Targetpath = target_path shortcut.save() else: os.symlink(target_path, alias_path)
Creates an alias at 'alias_path' pointing to the file 'target_path'. On Unix, this is implemented via symlink. On Windows, this is done by creating a Windows shortcut file. Args: target_path: Destination path that the alias should point to. alias_path: Path at which to create the new alias.
juraj-google-style
def get_named_parent(decl): if not decl: return None parent = decl.parent while parent and (not parent.name or parent.name == '::'): parent = parent.parent return parent
Returns a reference to a named parent declaration. Args: decl (declaration_t): the child declaration Returns: declaration_t: the declaration or None if not found.
juraj-google-style
def _MergeEntities(self, a, b): distance = transitfeed.ApproximateDistanceBetweenStops(a, b) if (distance > self.largest_stop_distance): raise MergeError(('Stops are too far apart: %.1fm (largest_stop_distance is %.1fm).' % (distance, self.largest_stop_distance))) scheme = {'stop_id': self._MergeIdentical, 'stop_name': self._MergeIdenticalCaseInsensitive, 'zone_id': self._MergeIdentical, 'location_type': self._MergeIdentical} return self._SchemedMerge(scheme, a, b)
Merges two stops. For the stops to be merged, they must have: - the same stop_id - the same stop_name (case insensitive) - the same zone_id - locations less than largest_stop_distance apart The other attributes can have arbitary changes. The merged attributes are taken from the new stop. Args: a: The first stop. b: The second stop. Returns: The merged stop. Raises: MergeError: The stops could not be merged.
codesearchnet
def get_value(self, query): indices = self.get_dimension_indices(query) index = self.get_value_index(indices) value = self.get_value_by_index(index) return value
Converts a dimension/category list of dicts into a data value \ in three steps. Args: query(list): list of dicts with the desired query. Returns: value(float): numeric data value.
juraj-google-style
def regex_to_sql_like(regex_text: str, single_wildcard: str='_', zero_or_more_wildcard: str='%') -> List[str]: def append_to_all(new_content: str) -> None: nonlocal results results = [(r + new_content) for r in results] def split_and_append(new_options: List[str]) -> None: nonlocal results newresults = [] for option in new_options: newresults.extend([(r + option) for r in results]) results = newresults def deduplicate_wildcards(text: str) -> str: while ((zero_or_more_wildcard + zero_or_more_wildcard) in text): text = text.replace((zero_or_more_wildcard + zero_or_more_wildcard), zero_or_more_wildcard) return text working = regex_text results = [zero_or_more_wildcard] while working: if working.startswith('.*'): append_to_all(zero_or_more_wildcard) working = working[2:] elif working.startswith('['): close_bracket = working.index(']') bracketed = working[1:close_bracket] option_groups = bracketed.split('|') options = [c for group in option_groups for c in group] split_and_append(options) working = working[(close_bracket + 1):] elif ((len(working) > 1) and (working[1] == '?')): split_and_append(['', working[0]]) working = working[2:] elif working.startswith('.'): append_to_all(single_wildcard) working = working[1:] else: append_to_all(working[0]) working = working[1:] append_to_all(zero_or_more_wildcard) results = [deduplicate_wildcards(r) for r in results] return results
Converts regular expression text to a reasonably close fragment for the SQL ``LIKE`` operator. NOT PERFECT, but works for current built-in regular expressions. Args: regex_text: regular expression text to work with single_wildcard: SQL single wildcard, typically an underscore zero_or_more_wildcard: SQL "zero/one/many" wildcard, probably always a percent symbol Returns: string for an SQL string literal Raises: :exc:`ValueError` for some regex text that it doesn't understand properly
codesearchnet
def aside_view_declaration(self, view_name): if view_name in self._combined_asides: return getattr(self, self._combined_asides[view_name]) else: return None
Find and return a function object if one is an aside_view for the given view_name Aside methods declare their view provision via @XBlockAside.aside_for(view_name) This function finds those declarations for a block. Arguments: view_name (string): the name of the view requested. Returns: either the function or None
juraj-google-style
def build_sql_statement(self) -> str: builders = self._view.get_select_expressions() from_expressions = [f'`{self._dataset}`.{self._table_name}'] where_expressions = self._build_where_expressions(self._view.get_constraint_expressions()) if not builders: return self._build_sql_statement(['*'], from_expressions, where_expressions) sql_statement = '' next_from_expressions = [] child_builders = [] columns_selected = [] while builders or next_from_expressions: select_expressions, next_from_expressions = self._build_select_and_next_from_expressions(builders, child_builders, columns_selected) sql_statement = self._build_sql_statement(select_expressions, from_expressions, where_expressions) from_expressions = [f'({sql_statement})'] from_expressions.extend(next_from_expressions) where_expressions = [] builders = tuple(child_builders) child_builders = [] return sql_statement
Build SQL statement. Returns: SQL string representation of the view
github-repos
def write_dot_file(G, filename): with io.open(filename, "w") as fh: fh.write("strict digraph DependencyDiagram {\n") edge_list = G.edges() node_list = set(G.nodes()) if edge_list: for edge in sorted(edge_list): source, targ = edge node_list = node_list - set(source) node_list = node_list - set(targ) line = '"{}" -> "{}";\n' fh.write(line.format(source, targ)) if node_list: for node in sorted(node_list): line = '"{}"\n'.format(node) fh.write(line) fh.write("}")
Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files
juraj-google-style
def apply_transform(self, data: OperationInputT, output_column_name: str) -> dict[str, OperationOutputT]:
Define any processing logic in the apply_transform() method. processing logics are applied on inputs and returns a transformed output. Args: inputs: input data.
github-repos
def unapprove(self, **kwargs): path = '%s/%s/unapprove' % (self.manager.path, self.get_id()) data = {} server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs) self._update_attrs(server_data)
Unapprove the merge request. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabMRApprovalError: If the unapproval failed
juraj-google-style
def __request_finish(self, queue_item, new_requests, request_failed=False): if self.__stopping: return del self.__threads[queue_item.get_hash()] if request_failed: new_queue_items = [] self.queue.move(queue_item, QueueItem.STATUS_ERRORED) else: self.routing.increase_route_count(queue_item.request) new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests) self.queue.move(queue_item, QueueItem.STATUS_FINISHED) try: action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items) except Exception as e: action = None print(e) print(traceback.format_exc()) queue_item.decompose() if (action == CrawlerActions.DO_STOP_CRAWLING): self.__should_stop = True if ((action == CrawlerActions.DO_CONTINUE_CRAWLING) or (action is None)): self.__should_spawn_new_requests = True
Called when the crawler finished the given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. request_failed (bool): True if the request failed (if needs to be moved to errored).
codesearchnet
def _get_authenticated_client(self, wsdl): return zeep.Client((wsdl % quote(self.username)), transport=zeep.Transport(session=self._get_authenticated_session()))
Return an authenticated SOAP client. Returns: zeep.Client: Authenticated API client.
codesearchnet
def parse(cls, json_value: Union[int, float, str, List[Any], Tuple[Any], None], spec: Optional[DNASpec]=None) -> 'DNA': return DNA(json_value, spec=spec)
Parse DNA from a nested structure of numbers. Deprecated: use `DNA.__init__` instead. Args: json_value: A nested structure of numbers. spec: DNA spec that will be applied to current DNA tree. Returns: an instance of DNA object. Raises: ValueError: Bad format for json_value or parsed DNA does not conform to the DNA spec.
github-repos
class TFGPT2Tokenizer(keras.layers.Layer): def __init__(self, vocab: Dict[str, int], merges: List[str], max_length: Optional[int]=None, pad_token_id: Optional[int]=None): super().__init__() self.pad_token_id = pad_token_id self.max_length = max_length self.vocab = vocab self.merges = merges self.tf_tokenizer = BytePairTokenizer(vocab, merges, sequence_length=max_length) @classmethod def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs): merges = [' '.join(m) for m in tokenizer.bpe_ranks.keys()] vocab = tokenizer.get_vocab() return cls(vocab, merges, *args, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs) @classmethod def from_config(cls, config): return cls(**config) def get_config(self): return {'vocab': self.vocab, 'merges': self.merges, 'max_length': self.max_length, 'pad_token_id': self.pad_token_id} def call(self, x, max_length: Optional[int]=None): input_ids = self.tf_tokenizer(x) attention_mask = tf.ones_like(input_ids) if self.pad_token_id is not None: max_length = max_length if max_length is not None else self.max_length if max_length is not None: input_ids, attention_mask = pad_model_inputs(input_ids, max_seq_length=max_length, pad_value=self.pad_token_id) return {'attention_mask': attention_mask, 'input_ids': input_ids}
This is an in-graph tokenizer for GPT2. It should be initialized similarly to other tokenizers, using the `from_pretrained()` method. It can also be initialized with the `from_tokenizer()` method, which imports settings from an existing standard tokenizer object. In-graph tokenizers, unlike other Hugging Face tokenizers, are actually Keras layers and are designed to be run when the model is called, rather than during preprocessing. As a result, they have somewhat more limited options than standard tokenizer classes. They are most useful when you want to create an end-to-end model that goes straight from `tf.string` inputs to outputs. Args: vocab (Dict[str, int]): Vocabulary dict for Byte Pair Tokenizer merges (List[str]): Merges list for Byte Pair Tokenizer
github-repos
def signature_summary(self, default_values=False): summary = f'{self._function_type!r}' if default_values: summary += '\nDefaults:' if self.default_values: for name, value in self.default_values.items(): summary += f'\n {name}: {value!r}' else: summary += '\n None' return summary
Returns a string summarizing this function's signature. Args: default_values: If true, then include default values in the signature. Returns: A `string`.
github-repos
def get_variables(self, include_submodules=False, include_nontrainable=False): if include_nontrainable: model_variables = [self.all_variables[key] for key in sorted(self.all_variables)] states_preprocessing_variables = [ variable for name in sorted(self.states_preprocessing) for variable in self.states_preprocessing[name].get_variables() ] model_variables += states_preprocessing_variables actions_exploration_variables = [ variable for name in sorted(self.actions_exploration) for variable in self.actions_exploration[name].get_variables() ] model_variables += actions_exploration_variables if self.reward_preprocessing is not None: reward_preprocessing_variables = self.reward_preprocessing.get_variables() model_variables += reward_preprocessing_variables else: model_variables = [self.variables[key] for key in sorted(self.variables)] return model_variables
Returns the TensorFlow variables used by the model. Args: include_submodules: Includes variables of submodules (e.g. baseline, target network) if true. include_nontrainable: Includes non-trainable variables if true. Returns: List of variables.
juraj-google-style
def format_search_results(self, search_results): formatted_lines = [] for search_result in search_results: lines = self._format_search_result(search_result) formatted_lines.extend(lines) return formatted_lines
Format search results. Args: search_results (list of `ResourceSearchResult`): Search to format. Returns: List of 2-tuple: Text and color to print in.
juraj-google-style
def load_lines(filename): with open(filename, 'r', encoding='utf-8') as f: return [line.rstrip('\n') for line in f.readlines()]
Load a text file as an array of lines. Args: filename: Path to the input file. Returns: An array of strings, each representing an individual line.
juraj-google-style
def check_layout(tensor: tensor_lib.Tensor, layout: layout_lib.Layout) -> None: if fetch_layout(tensor) != layout: raise ValueError('Layout of tensor: ' + str(fetch_layout(tensor)) + ', did not match expected layout: ' + str(layout))
Asserts that the layout of the DTensor is `layout`. Args: tensor: A DTensor whose layout is to be checked. layout: The `Layout` to compare against. Raises: ValueError: If the layout of `tensor` does not match the supplied `layout`.
github-repos
def get_user( self, identified_with, identifier, req, resp, resource, uri_kwargs ): stored_value = self.kv_store.get( self._get_storage_key(identified_with, identifier) ) if stored_value is not None: user = self.serialization.loads(stored_value.decode()) else: user = None return user
Get user object for given identifier. Args: identified_with (object): authentication middleware used to identify the user. identifier: middleware specifix user identifier (string or tuple in case of all built in authentication middleware classes). Returns: dict: user object stored in Redis if it exists, otherwise ``None``
juraj-google-style
def select_all(self, serial_numbers): sheet = self.table col = self.db_sheet_cols.id rows = sheet.loc[:, col].isin(serial_numbers) return sheet.loc[rows, :]
Select rows for identification for a list of serial_number. Args: serial_numbers: list (or ndarray) of serial numbers Returns: pandas.DataFrame
juraj-google-style
def from_table(table, fields=None): if fields is None: fields = '*' elif isinstance(fields, list): fields = ','.join(fields) return Query('SELECT %s FROM %s' % (fields, table._repr_sql_()))
Return a Query for the given Table object Args: table: the Table object to construct a Query out of fields: the fields to return. If None, all fields will be returned. This can be a string which will be injected into the Query after SELECT, or a list of field names. Returns: A Query object that will return the specified fields from the records in the Table.
juraj-google-style
def create_window(size=None, samples=16, *, fullscreen=False, title=None, threaded=True) -> Window: if size is None: width, height = 1280, 720 else: width, height = size if samples < 0 or (samples & (samples - 1)) != 0: raise Exception('Invalid number of samples: %d' % samples) window = Window.__new__(Window) window.wnd = glwnd.create_window(width, height, samples, fullscreen, title, threaded) return window
Create the main window. Args: size (tuple): The width and height of the window. samples (int): The number of samples. Keyword Args: fullscreen (bool): Fullscreen? title (bool): The title of the window. threaded (bool): Threaded? Returns: Window: The main window.
juraj-google-style
def _check_wires_list(self, wires, node): if len(set(wires)) != len(wires): raise DAGCircuitError("duplicate wires") wire_tot = len(node.qargs) + len(node.cargs) if node.condition is not None: wire_tot += node.condition[0].size if len(wires) != wire_tot: raise DAGCircuitError("expected %d wires, got %d" % (wire_tot, len(wires)))
Check that a list of wires is compatible with a node to be replaced. - no duplicate names - correct length for operation Raise an exception otherwise. Args: wires (list[register, index]): gives an order for (qu)bits in the input circuit that is replacing the node. node (DAGNode): a node in the dag Raises: DAGCircuitError: if check doesn't pass.
juraj-google-style
def recipe_archive(config, auth_write, archive_days, archive_bucket, archive_path, archive_delete): archive(config, {'auth': auth_write, 'days': archive_days, 'storage': {'bucket': archive_bucket, 'path': archive_path}, 'delete': archive_delete})
Wipe old information from a Storage bucket based on last update time. Args: auth_write (authentication) - Credentials used for writing data. archive_days (integer) - NA archive_bucket (string) - NA archive_path (string) - NA archive_delete (boolean) - NA
github-repos
def clean(decrypted: bytes) -> str: r last = decrypted[-1] if isinstance(last, int): return decrypted[:-last].decode('utf8') return decrypted[:-ord(last)].decode('utf8')
r"""Strip padding from decrypted value. Remove number indicated by padding e.g. if last is '\x0e' then ord('\x0e') == 14, so take off 14. Args: decrypted: decrypted value Returns: Decrypted stripped of junk padding
juraj-google-style
def delete_existing_policy(self, scaling_policy, server_group): self.log.info("Deleting policy %s on %s", scaling_policy['policyName'], server_group) delete_dict = { "application": self.app, "description": "Delete scaling policy", "job": [{ "policyName": scaling_policy['policyName'], "serverGroupName": server_group, "credentials": self.env, "region": self.region, "provider": "aws", "type": "deleteScalingPolicy", "user": "foremast-autoscaling-policy" }] } wait_for_task(json.dumps(delete_dict))
Given a scaling_policy and server_group, deletes the existing scaling_policy. Scaling policies need to be deleted instead of upserted for consistency. Args: scaling_policy (json): the scaling_policy json from Spinnaker that should be deleted server_group (str): the affected server_group
juraj-google-style
def save_output_in_cache(name, filename, output): cache_filename = _get_cache_filename(name, filename) with _open_for_write(cache_filename) as f: f.write(output)
Saves output in the cache location. Args: name: string: name of the linter. filename: string: path of the filename for which we are saving the output. output: string: full output (not yet filetered) of the lint command.
juraj-google-style
def _GetEventLogProviderKey(self, log_source): table_names = ['event_log_providers'] column_names = ['event_log_provider_key'] condition = 'log_source == "{0:s}"'.format(log_source) values_list = list(self._database_file.GetValues( table_names, column_names, condition)) number_of_values = len(values_list) if number_of_values == 0: return None if number_of_values == 1: values = values_list[0] return values['event_log_provider_key'] raise RuntimeError('More than one value found in database.')
Retrieves the Event Log provider key. Args: log_source (str): Event Log source. Returns: str: Event Log provider key or None if not available. Raises: RuntimeError: if more than one value is found in the database.
juraj-google-style
def _close_on_stop(self, sess, cancel_op, coord): coord.wait_for_stop() try: sess.run(cancel_op) except Exception as e: logging.vlog(1, 'Ignored exception: %s', str(e))
Close the queue when the Coordinator requests stop. Args: sess: A Session. cancel_op: The Operation to run. coord: Coordinator.
github-repos
def PushAttributeContainer(self, serialized_data): self._list.append(serialized_data) self.data_size += len(serialized_data) self.next_sequence_number += 1
Pushes a serialized attribute container onto the list. Args: serialized_data (bytes): serialized attribute container data.
juraj-google-style
def encode_message(self, message): message.check_initialized() return json.dumps(message, cls=MessageJSONEncoder, protojson_protocol=self)
Encode Message instance to JSON string. Args: Message instance to encode in to JSON string. Returns: String encoding of Message instance in protocol JSON format. Raises: messages.ValidationError if message is not initialized.
codesearchnet
def populate_sites( self, number_of_atoms, selected_sites=None ): if number_of_atoms > self.number_of_sites: raise ValueError if selected_sites: atoms = [ atom.Atom( initial_site = site ) for site in random.sample( [ s for s in self.sites if s.label in selected_sites ], number_of_atoms ) ] else: atoms = [ atom.Atom( initial_site = site ) for site in random.sample( self.sites, number_of_atoms ) ] self.number_of_occupied_sites = number_of_atoms return atoms
Populate the lattice sites with a specific number of atoms. Args: number_of_atoms (Int): The number of atoms to populate the lattice sites with. selected_sites (:obj:List, optional): List of site labels if only some sites are to be occupied. Defaults to None. Returns: None
juraj-google-style
def exists(self, filename): if is_package(filename): filepath = os.path.join(self.connection['mount_point'], 'Packages', filename) else: filepath = os.path.join(self.connection['mount_point'], 'Scripts', filename) return os.path.exists(filepath)
Report whether a file exists on the distribution point. Determines file type by extension. Args: filename: Filename you wish to check. (No path! e.g.: "AdobeFlashPlayer-14.0.0.176.pkg")
codesearchnet
def compiler_ir_generator(stage='hlo', device_name=None, platform_name=None): if device_name is not None: if platform_name is not None: raise ValueError('device_name and platform_name cannot be provided at the same time.') warnings.warn('device_name is being deprecated. Use platform_name.') device_name = maybe_get_device_name(device_name) res_bytes = context.context().get_compiler_ir(device_name=device_name, platform_name=platform_name, function_name=fn_name, flat_args=filtered_flat_specs, captured_inputs=concrete_fn.captured_inputs, stage=stage) if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'): return res_bytes else: return res_bytes.decode('utf-8')
Gets the compiler IR bytes. Args: stage: The exported stage for the given function. device_name: The name of the device with the form as "/job:localhost/replica:0/task:0/device:CPU:0", "/device:TPU:0" etc. When this is used, actual device is needed for getting the compiler IR. platform_name: The name of the platform, e.g. "TPU". See the comment in `get_compiler_ir` in `context.py`. Returns: The compiler IR bytes.
github-repos
def is_connectable(host: str, port: Union[(int, str)]) -> bool: socket_ = None try: socket_ = socket.create_connection((host, port), 1) result = True except socket.timeout: result = False finally: if socket_: socket_.close() return result
Tries to connect to the device to see if it is connectable. Args: host: The host to connect. port: The port to connect. Returns: True or False.
codesearchnet
def GetTSKVsPartByPathSpec(tsk_volume, path_spec): location = getattr(path_spec, 'location', None) part_index = getattr(path_spec, 'part_index', None) start_offset = getattr(path_spec, 'start_offset', None) partition_index = None if part_index is None: if location is not None: if location.startswith('/p'): try: partition_index = int(location[2:], 10) - 1 except ValueError: pass if partition_index is None or partition_index < 0: location = None if location is None and start_offset is None: return None, None bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume) current_part_index = 0 current_partition_index = 0 tsk_vs_part = None tsk_vs_part_list = list(tsk_volume) number_of_tsk_vs_parts = len(tsk_vs_part_list) if number_of_tsk_vs_parts > 0: if (part_index is not None and (part_index < 0 or part_index >= number_of_tsk_vs_parts)): return None, None for tsk_vs_part in tsk_vs_part_list: if TSKVsPartIsAllocated(tsk_vs_part): if partition_index is not None: if partition_index == current_partition_index: break current_partition_index += 1 if part_index is not None and part_index == current_part_index: break if start_offset is not None: start_sector = TSKVsPartGetStartSector(tsk_vs_part) if start_sector is not None: start_sector *= bytes_per_sector if start_sector == start_offset: break current_part_index += 1 if tsk_vs_part is None or current_part_index >= number_of_tsk_vs_parts: return None, None if not TSKVsPartIsAllocated(tsk_vs_part): current_partition_index = None return tsk_vs_part, current_partition_index
Retrieves the TSK volume system part object from the TSK volume object. Args: tsk_volume (pytsk3.Volume_Info): TSK volume information. path_spec (PathSpec): path specification. Returns: tuple: contains: pytsk3.TSK_VS_PART_INFO: TSK volume system part information or None on error. int: partition index or None if not available.
juraj-google-style
def getUserSid(username): if six.PY2: username = _to_unicode(username) domain = win32api.GetComputerName() if (username.find('\\') != (- 1)): domain = username.split('\\')[0] username = username.split('\\')[(- 1)] domain = domain.upper() return win32security.ConvertSidToStringSid(win32security.LookupAccountName(None, ((domain + '\\') + username))[0])
Get the Security ID for the user Args: username (str): The user name for which to look up the SID Returns: str: The user SID CLI Example: .. code-block:: bash salt '*' user.getUserSid jsnuffy
codesearchnet
def get_scheduler(name: Union[str, SchedulerType], optimizer: Optimizer, num_warmup_steps: Optional[int]=None, num_training_steps: Optional[int]=None, scheduler_specific_kwargs: Optional[dict]=None): name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] if optimizer is not None and isinstance(optimizer, LayerWiseDummyOptimizer): optimizer_dict = optimizer.optimizer_dict scheduler_dict = {} for param in optimizer_dict.keys(): scheduler_dict[param] = get_scheduler(name, optimizer=optimizer_dict[param], num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, scheduler_specific_kwargs=scheduler_specific_kwargs) def scheduler_hook(param): scheduler_dict[param].step() for param in optimizer_dict.keys(): if param.requires_grad: param.register_post_accumulate_grad_hook(scheduler_hook) return LayerWiseDummyScheduler(optimizer_dict=optimizer_dict, lr=optimizer.defaults['lr']) if name == SchedulerType.CONSTANT: return schedule_func(optimizer) if scheduler_specific_kwargs is None: scheduler_specific_kwargs = {} if name == SchedulerType.REDUCE_ON_PLATEAU: return schedule_func(optimizer, **scheduler_specific_kwargs) if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.') if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) if name == SchedulerType.INVERSE_SQRT: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) if name == SchedulerType.WARMUP_STABLE_DECAY: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, **scheduler_specific_kwargs) if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.') return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, **scheduler_specific_kwargs)
Unified API to get any scheduler from its name. Args: name (`str` or `SchedulerType`): The name of the scheduler to use. optimizer (`torch.optim.Optimizer`): The optimizer that will be used during training. num_warmup_steps (`int`, *optional*): The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. num_training_steps (`int``, *optional*): The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. scheduler_specific_kwargs (`dict`, *optional*): Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler parameters will cause the scheduler function to raise a TypeError.
github-repos
def platform_config_dir(): if LINUX: dpath_ = os.environ.get('XDG_CONFIG_HOME', '~/.config') elif DARWIN: dpath_ = '~/Library/Application Support' elif WIN32: dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming') else: raise NotImplementedError(('Unknown Platform %r' % (sys.platform,))) dpath = normpath(expanduser(dpath_)) return dpath
Returns a directory which should be writable for any application This should be used for persistent configuration files. Returns: PathLike : path to the cahce dir used by the current operating system
codesearchnet
def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file): parser = etree.XMLParser(ns_clean=True) tree = etree.parse(sifts_file, parser) root = tree.getroot() my_pdb_resnum = None my_pdb_annotation = False ent = '. for chain in root.findall(ent): if (chain.attrib['entityId'] == chain_id): ures = ('. my_uniprot_residue = chain.findall(ures) if (len(my_uniprot_residue) == 1): parent = my_uniprot_residue[0].getparent() pres = '. my_pdb_residue = parent.findall(pres) my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum']) anno = '. my_pdb_annotation = parent.findall(anno) if (len(my_pdb_annotation) == 1): my_pdb_annotation = my_pdb_annotation[0].text if (my_pdb_annotation == 'Not_Observed'): my_pdb_annotation = False else: my_pdb_annotation = True else: return (None, False) return (my_pdb_resnum, my_pdb_annotation)
Map a UniProt residue number to its corresponding PDB residue number. This function requires that the SIFTS file be downloaded, and also a chain ID (as different chains may have different mappings). Args: uniprot_resnum (int): integer of the residue number you'd like to map chain_id (str): string of the PDB chain to map to sifts_file (str): Path to the SIFTS XML file Returns: (tuple): tuple containing: mapped_resnum (int): Mapped residue number is_observed (bool): Indicates if the 3D structure actually shows the residue
codesearchnet
def wait_for_tx(self, tx, max_seconds=120): tx_hash = None if isinstance(tx, (str, UInt256)): tx_hash = str(tx) elif isinstance(tx, Transaction): tx_hash = tx.Hash.ToString() else: raise AttributeError("Supplied tx is type '%s', but must be Transaction or UInt256 or str" % type(tx)) wait_event = Event() time_start = time.time() while True: _tx, height = Blockchain.Default().GetTransaction(tx_hash) if height > -1: return True wait_event.wait(3) seconds_passed = time.time() - time_start if seconds_passed > max_seconds: raise TxNotFoundInBlockchainError("Transaction with hash %s not found after %s seconds" % (tx_hash, int(seconds_passed)))
Wait for tx to show up on blockchain Args: tx (Transaction or UInt256 or str): Transaction or just the hash max_seconds (float): maximum seconds to wait for tx to show up. default: 120 Returns: True: if transaction was found Raises: AttributeError: if supplied tx is not Transaction or UInt256 or str TxNotFoundInBlockchainError: if tx is not found in blockchain after max_seconds
juraj-google-style
def get_icloud_folder_location(): yosemite_icloud_path = '~/Library/Mobile Documents/com~apple~CloudDocs/' icloud_home = os.path.expanduser(yosemite_icloud_path) if (not os.path.isdir(icloud_home)): error('Unable to find your iCloud Drive =(') return str(icloud_home)
Try to locate the iCloud Drive folder. Returns: (str) Full path to the iCloud Drive folder.
codesearchnet
def kms_encrypt(value, key, aws_config=None): aws_config = (aws_config or {}) aws = boto3.session.Session(**aws_config) client = aws.client('kms') enc_res = client.encrypt(KeyId=key, Plaintext=value) return n(b64encode(enc_res['CiphertextBlob']))
Encrypt and value with KMS key. Args: value (str): value to encrypt key (str): key id or alias aws_config (optional[dict]): aws credentials dict of arguments passed into boto3 session example: aws_creds = {'aws_access_key_id': aws_access_key_id, 'aws_secret_access_key': aws_secret_access_key, 'region_name': 'us-east-1'} Returns: str: encrypted cipher text
codesearchnet
def transform_table(self, table, table_meta, missing=None): if missing is None: missing = self.missing else: self.missing = missing warnings.warn(DEPRECATION_MESSAGE.format('transform_table'), DeprecationWarning) content = {} columns = [] table_name = table_meta['name'] for field in table_meta['fields']: column_name = field['name'] if missing and table[column_name].isnull().any(): null_transformer = transformers.NullTransformer(field) clean_column = null_transformer.fit_transform(table[column_name]) null_name = '?' + column_name columns.append(null_name) content[null_name] = clean_column[null_name].values column = clean_column[column_name] else: column = table[column_name].to_frame() transformer = self.transformers[(table_name, column_name)] content[column_name] = transformer.transform(column)[column_name].values columns.append(column_name) return pd.DataFrame(content, columns=columns)
Apply the stored transformers to `table`. Args: table(pandas.DataFrame): Contents of the table to be transformed. table_meta(dict): Metadata for the given table. missing(bool): Wheter or not use NullTransformer to handle missing values. Returns: pandas.DataFrame: Transformed table.
juraj-google-style
def recipe_dcm_run(config, auth_read, account, report_id, report_name): dcm(config, {'auth': auth_read, 'report_run_only': True, 'report': {'account': account, 'report_id': report_id, 'name': report_name}})
Trigger a CM report run Args: auth_read (authentication) - Credentials used for reading data. account (integer) - CM network id. report_id (integer) - CM report id, empty if using name. report_name (string) - CM report name, empty if using id instead.
github-repos
def AssignTasksToClient(self, client_id): rules = data_store.REL_DB.ReadAllForemanRules() if not rules: return 0 last_foreman_run = self._GetLastForemanRunTime(client_id) latest_rule_creation_time = max(rule.creation_time for rule in rules) if latest_rule_creation_time <= last_foreman_run: return 0 self._SetLastForemanRunTime(client_id, latest_rule_creation_time) relevant_rules = [] expired_rules = False now = rdfvalue.RDFDatetime.Now() for rule in rules: if rule.expiration_time < now: expired_rules = True continue if rule.creation_time <= last_foreman_run: continue relevant_rules.append(rule) actions_count = 0 if relevant_rules: client_data = data_store.REL_DB.ReadClientFullInfo(client_id) if client_data is None: return for rule in relevant_rules: if rule.Evaluate(client_data): actions_count += self._RunAction(rule, client_id) if expired_rules: data_store.REL_DB.RemoveExpiredForemanRules() return actions_count
Examines our rules and starts up flows based on the client. Args: client_id: Client id of the client for tasks to be assigned. Returns: Number of assigned tasks.
juraj-google-style
def _make_columnar(self, x): if tensorshape_util.rank(x.shape) is not None: if tensorshape_util.rank(x.shape) == 1: x = x[tf.newaxis, :] return x shape = tf.shape(input=x) maybe_expanded_shape = tf.concat([ shape[:-1], distribution_util.pick_vector( tf.equal(tf.rank(x), 1), [1], np.array([], dtype=np.int32)), shape[-1:], ], 0) return tf.reshape(x, maybe_expanded_shape)
Ensures non-scalar input has at least one column. Example: If `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`. If `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged. If `x = 1` then the output is unchanged. Args: x: `Tensor`. Returns: columnar_x: `Tensor` with at least two dimensions.
juraj-google-style
def task_ordinal_at_coordinates(self, device_coordinates): return self._topology_tasks[tuple(device_coordinates)]
Returns the TensorFlow task number attached to `device_coordinates`. Args: device_coordinates: An integer sequence describing a device's physical coordinates in the TPU fabric. Returns: Returns the TensorFlow task number that contains the TPU device with those physical coordinates.
github-repos
def unpack(self, buff, offset=0): super().unpack(buff, offset) try: self.oxm_field = self._unpack_oxm_field() except ValueError as exception: raise UnpackException(exception) self.oxm_hasmask = (self.oxm_field_and_mask & 1) == 1 start = offset + 4 end = start + self.oxm_length self.oxm_value = buff[start:end]
Unpack the buffer into a OxmTLV. Args: buff (bytes): The binary data to be unpacked. offset (int): If we need to shift the beginning of the data.
juraj-google-style
def mpim_open(self, *, users: List[str], **kwargs) -> SlackResponse: kwargs.update({"users": users}) return self.api_call("mpim.open", json=kwargs)
This method opens a multiparty direct message. Args: users (list): A lists of user ids. The ordering of the users is preserved whenever a MPIM group is returned. e.g. ['W1234567890', 'U2345678901', 'U3456789012']
juraj-google-style
def get_artist_location(self, cache=True): if not (cache and ('artist_location' in self.cache)): response = self.get_attribute('profile', bucket='artist_location') self.cache['artist_location'] = response['songs'][0]['artist_location'] return self.cache['artist_location']
Get the location of a song's artist. Args: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: An artist location object. Example: >>> s = song.Song('SOQKVPH12A58A7AF4D') >>> s.artist_location {u'latitude': 34.053489999999996, u'location': u'Los Angeles, CA', u'longitude': -118.24532000000001} >>>
juraj-google-style
def apply(self, func, axis, *args, **kwargs): if callable(func): return self._callable_func(func, axis, *args, **kwargs) elif isinstance(func, dict): return self._dict_func(func, axis, *args, **kwargs) elif is_list_like(func): return self._list_like_func(func, axis, *args, **kwargs) else: pass
Apply func across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
codesearchnet
def advance_for_next_slice(self, recovery_slice=False): if recovery_slice: self.slice_id += 2 self.input_reader = self.input_reader.from_json(self._input_reader_json) else: self.slice_id += 1
Advance relavent states for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info.
juraj-google-style
def find_record(self, model_class, record_id, reload=False): cached_model = self.peek_record(model_class, record_id) if ((cached_model is not None) and (reload is False)): return cached_model else: return self._get_record(model_class, record_id)
Return a instance of model_class from the API or the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. reload (bool, optional): Don't return the cached version if reload==True. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
codesearchnet
def split_vector_ctype(ctype): if (not is_vector_ctype(ctype)): raise ValueError('The given ctype is not a vector type.') for vector_length in [2, 3, 4, 8, 16]: if ctype.endswith(str(vector_length)): vector_str_len = len(str(vector_length)) return (ctype[:(- vector_str_len)], int(ctype[(- vector_str_len):]))
Split a vector ctype into a raw ctype and the vector length. If the given ctype is not a vector type, we raise an error. I Args: ctype (str): the ctype to possibly split into a raw ctype and the vector length Returns: tuple: the raw ctype and the vector length
codesearchnet
def reward(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> tf.Tensor: scope = self.reward_scope(state, action, next_state) r = self.compile_reward(scope).tensor with self.graph.as_default(): with tf.name_scope('reward'): return tf.expand_dims(r, (- 1))
Compiles the reward function given the current `state`, `action` and `next_state`. Args: state (Sequence[tf.Tensor]): A tuple of current state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. next_state (Sequence[tf.Tensor]): A tuple of next state tensors. Returns: (:obj:`tf.Tensor`): A tensor representing the reward function.
codesearchnet
def request_with_retry(func, *args, **kwargs): max_retries = kwargs.pop('max_retries', 30) sleep = 2 retry_count = 0 while True: try: response = func(*args, **kwargs) response.raise_for_status() return response except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout) as e: if retry_count == max_retries: return e retry_count += 1 delay = sleep + random.random() * 0.25 * sleep if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 429: logger.info( "Rate limit exceeded, retrying in %s seconds" % delay) else: logger.warning('requests_with_retry encountered retryable exception: %s. args: %s, kwargs: %s', e, args, kwargs) time.sleep(delay) sleep *= 2 if sleep > MAX_SLEEP_SECONDS: sleep = MAX_SLEEP_SECONDS except requests.exceptions.RequestException as e: logger.error(response.json()['error']) logger.exception( 'requests_with_retry encountered unretryable exception: %s', e) return e
Perform a requests http call, retrying with exponential backoff. Args: func: An http-requesting function to call, like requests.post max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk *args: passed through to func **kwargs: passed through to func
juraj-google-style
def _get_param_matcher(self, callable_type): callable_param_count = collections.Counter(self.ctx.annotation_utils.get_type_parameters(callable_type)) if isinstance(callable_type, abstract.CallableClass): callable_param_count.subtract(self.ctx.annotation_utils.get_type_parameters(callable_type.get_formal_type_parameter(abstract_utils.ARGS))) def match(left, right, subst): if not isinstance(left, abstract.TypeParameter) or not isinstance(right, abstract.TypeParameter) or right.constraints or right.bound or (callable_param_count[right] != 1): return None self._type_params.seen.add(right) subst = subst.copy() subst[right.full_name] = self.ctx.program.NewVariable([self.ctx.convert.empty], [], self._node) return subst return match
Helper for matching the parameters of a callable. Args: callable_type: The callable being matched against. Returns: A special param matcher: (left, right, subst) -> Optional[subst]. left: An argument to be matched against a parameter of callable_type. right: A parameter of callable_type. subst: The current substitution dictionary. If the matcher returns a non-None subst dict, then the match has succeeded via special matching rules for single TypeVars. Otherwise, the caller should next attempt normal matching on the inputs. (See _match_signature_against_callable for a usage example.)
github-repos
def set_messages(self, messages: Sequence[CachedMessage]) -> None: uids = {msg.uid for msg in messages} expunged = self._messages._uids - uids return self.add_updates(messages, expunged)
This is the non-optimized alternative to :meth:`.add_updates` for backend implementations that cannot detect their own updates and must instead compare the entire state of the mailbox. The ``messages`` list should contain the entire set of messages in the mailbox, ordered by UID. Any UID that previously existed and is not included in ``messages`` will be expunged. Args: messages: The entire set of cached message objects.
juraj-google-style
def _getfullargspec(target): return _convert_maybe_argspec_to_fullargspec(getargspec(target))
A python2 version of getfullargspec. Args: target: the target object to inspect. Returns: A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.
github-repos
def update_headers(self, headers): check_type(headers, dict, may_be_none=False) self._req_session.headers.update(headers)
Update the HTTP headers used for requests in this session. Note: Updates provided by the dictionary passed as the `headers` parameter to this method are merged into the session headers by adding new key-value pairs and/or updating the values of existing keys. The session headers are not replaced by the provided dictionary. Args: headers(dict): Updates to the current session headers.
codesearchnet
async def puts(self, items, seqn=None): size = 0 for chunk in s_common.chunks(items, 1000): metrics = self._items.save(chunk) self._metrics.add(metrics) await self.fire('cryotank:puts', numrecords=len(chunk)) size += len(chunk) await asyncio.sleep(0) if seqn is not None: iden, offs = seqn self.setOffset(iden, offs + size) return size
Add the structured data from items to the CryoTank. Args: items (list): A list of objects to store in the CryoTank. seqn (iden, offs): An iden / offset pair to record. Returns: int: The ending offset of the items or seqn.
juraj-google-style
def fresh(t, non_generic): mappings = {} def freshrec(tp): p = prune(tp) if isinstance(p, TypeVariable): if is_generic(p, non_generic): if (p not in mappings): mappings[p] = TypeVariable() return mappings[p] else: return p elif isinstance(p, dict): return p elif isinstance(p, Collection): return Collection(*[freshrec(x) for x in p.types]) elif isinstance(p, Scalar): return Scalar([freshrec(x) for x in p.types]) elif isinstance(p, TypeOperator): return TypeOperator(p.name, [freshrec(x) for x in p.types]) elif isinstance(p, MultiType): return MultiType([freshrec(x) for x in p.types]) else: assert False, 'missing freshrec case {}'.format(type(p)) return freshrec(t)
Makes a copy of a type expression. The type t is copied. The generic variables are duplicated and the non_generic variables are shared. Args: t: A type to be copied. non_generic: A set of non-generic TypeVariables
codesearchnet
def conversations_replies(self, *, channel: str, ts: str, **kwargs) -> SlackResponse: kwargs.update({'channel': channel, 'ts': ts}) return self.api_call('conversations.replies', http_verb='GET', params=kwargs)
Retrieve a thread of messages posted to a conversation Args: channel (str): Conversation ID to fetch thread from. e.g. 'C1234567890' ts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456'
codesearchnet
def _bfd_rx(self, **kwargs): int_type = kwargs['int_type'] method_name = 'interface_%s_bfd_interval_min_rx' % int_type bfd_rx = getattr(self._interface, method_name) config = bfd_rx(**kwargs) if kwargs['delete']: tag = 'min-rx' config.find('. pass return config
Return the BFD minimum receive interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_rx (str): BFD receive interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None
juraj-google-style
def __init__(self, block_shape, block_rows, name='block_diagonal_matrix'): super(BlockDiagonalMatrix, self).__init__( block_shape=block_shape, block_rows=block_rows, include_diagonal=True, include_off_diagonal=False, name=name)
Constructs a new `BlockDiagonalMatrix` module. Args: block_shape: tuple, 2-dimensional tuple indicating the shape of each individual block. block_rows: int, the number of blocks in each row (and column) of the output matrix. name: string, name of the module.
juraj-google-style
def luhn(base, num_only=False, allow_lower_case=False): if num_only: alphabet = _ALPHABET[:10] else: alphabet = _ALPHABET if allow_lower_case: base = base.upper() try: pre_calc = (_PRE_CALC[alphabet.index(c)] for c in reversed(base)) cum = 0 parity = 1 for elem in pre_calc: val, parity = elem[parity] cum += val except ValueError: pass else: return 10 - cum % 10 if num_only: msg = 'The string given must only contain digits.' elif allow_lower_case: msg = 'The string given must only contain digits and ascii letters.' else: msg = 'The string given must only contain digits and upper case ' \ 'ascii letters.' raise ValueError(msg)
Return the Luhn check digit for the given string. Args: base(str): string for which to calculate the check digit num_only(bool): allow only digits in `base` (default: False) allow_lower_case(bool): allow lower case letters in `base` (default: False) Returns: int: Luhn check digit Raises: ValueError: given `base` contains an unallowed character
juraj-google-style
def create_prefetch(self, addresses): with self._lock: for add in addresses: self._state[add] = _ContextFuture(address=add, wait_for_tree=True)
Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain).
codesearchnet
def insert(self, start_time: int, schedule: ScheduleComponent) -> 'ScheduleComponent': return ops.insert(self, start_time, schedule)
Return a new schedule with `schedule` inserted within `self` at `start_time`. Args: start_time: time to be inserted schedule: schedule to be inserted
juraj-google-style
def series_with_permutation(self, other): combined_permutation = tuple([self.permutation[p] for p in other.permutation]) return CPermutation.create(combined_permutation)
Compute the series product with another channel permutation circuit Args: other (CPermutation): Returns: Circuit: The composite permutation circuit (could also be the identity circuit for n channels)
juraj-google-style
def decstr2int(dec_str, decimals): if not isinstance(decimals, int): raise TypeError('decimals must be an integer') try: dollars, cents = dec_str.split('.') except ValueError: if '.' not in dec_str: dollars = dec_str cents = '0' else: raise ValueError('Invalid decimal string') else: if len(cents) < decimals: cents = cents.ljust(decimals, '0') elif decimals < 1: cents = '0' elif len(cents) > decimals: cents = cents[:decimals] try: cents = int(cents) except: cents = 0 try: return int(int(dollars) * (10 ** decimals)) + cents except: raise ValueError('Invalid decimal string')
Returns an integer that has the value of the decimal string: dec_str*10^decimals Arguments: dec_str (string) that represents a decimal number decimals (int): number of decimals for creating the integer output Returns: (int) Raises: ValueError if dec_string is not a valid decimal string TypeError if decimals is not an integer Note: values may be truncated (not rounded).
juraj-google-style
def download_extract_tar(tar_url, folder, tar_filename=''): try: makedirs(folder) except OSError: if (not isdir(folder)): raise data_file = tar_filename if (not data_file): (fd, data_file) = mkstemp('.tar.gz') download(tar_url, os.fdopen(fd, 'wb')) else: download(tar_url, data_file) with tarfile.open(data_file) as tar: tar.extractall(path=folder)
Download and extract the tar at the url to the given folder Args: tar_url (str): URL of tar file to download folder (str): Location of parent directory to extract to. Doesn't have to exist tar_filename (str): Location to download tar. Default is to a temp file
codesearchnet
def id_range(self): if (len(self._anchor_points) == 0): return (0, 0) return (self._anchor_points[0].reading_id, self._anchor_points[(- 1)].reading_id)
Get the range of archor reading_ids. Returns: (int, int): The lowest and highest reading ids. If no reading ids have been loaded, (0, 0) is returned.
codesearchnet
def request_json(link, outfile, force_rerun_flag, outdir=None): if (not outdir): outdir = '' outfile = op.join(outdir, outfile) if force_rerun(flag=force_rerun_flag, outfile=outfile): text_raw = requests.get(link) my_dict = text_raw.json() with open(outfile, 'w') as f: json.dump(my_dict, f) log.debug('Loaded and saved {} to {}'.format(link, outfile)) else: with open(outfile, 'r') as f: my_dict = json.load(f) log.debug('Loaded {}'.format(outfile)) return my_dict
Download a file in JSON format from a web request Args: link: Link to web request outfile: Name of output file outdir: Directory of output file force_rerun_flag: If true, redownload the file Returns: dict: contents of the JSON request
codesearchnet
def get_script_module(script_information, package='pylabcontrol', verbose=False): (module, _, _, _, _, _, _) = Script.get_script_information(script_information=script_information, package=package, verbose=verbose) return module
wrapper to get the module for a script Args: script_information: information of the script. This can be - a dictionary - a Script instance - name of Script class package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string Returns: module
codesearchnet
def async_call(self, *args, **kwargs): def after_autoconnect_callback(future): if self.is_connected(): self._call(*args, **kwargs) else: pass if ('callback' not in kwargs): kwargs['callback'] = discard_reply_cb if (not self.is_connected()): if self.autoconnect: connect_future = self.connect() cb = after_autoconnect_callback self.__connection._ioloop.add_future(connect_future, cb) else: error = ConnectionError('you are not connected and autoconnect=False') kwargs['callback'](error) else: self._call(*args, **kwargs)
Calls a redis command, waits for the reply and call a callback. Following options are available (not part of the redis command itself): - callback Function called (with the result as argument) when the result is available. If not set, the reply is silently discarded. In case of errors, the callback is called with a TornadisException object as argument. Args: *args: full redis command as variable length argument list or a Pipeline object (as a single argument). **kwargs: options as keyword parameters. Examples: >>> def cb(result): pass >>> client.async_call("HSET", "key", "field", "val", callback=cb)
codesearchnet
def event_date(self, event_date): if not self.can_update(): self._tcex.handle_error(910, [self.type]) event_date = self._utils.format_datetime(event_date, date_format='%Y-%m-%dT%H:%M:%SZ') self._data['eventDate'] = event_date request = {'eventDate': event_date} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
Updates the event_date. Args: event_date: Converted to %Y-%m-%dT%H:%M:%SZ date format. Returns:
juraj-google-style
def parsed_top_level_errors(parsed, errors, component_type: str='') -> Errors: fn_cnt = 0 rel_cnt = 0 nested_cnt = 0 for key in parsed: if (parsed[key]['type'] == 'Function'): fn_cnt += 1 if (parsed[key]['type'] == 'Relation'): rel_cnt += 1 if (parsed[key]['type'] == 'Nested'): nested_cnt += 1 if (not component_type): if (nested_cnt > 1): errors.append(('Error', 'Too many nested objects - can only have one per BEL Assertion')) if nested_cnt: if (rel_cnt > 2): errors.append(('Error', 'Too many relations - can only have two in a nested BEL Assertion')) elif (fn_cnt > 4): errors.append(('Error', 'Too many BEL subject and object candidates')) elif (rel_cnt > 1): errors.append(('Error', 'Too many relations - can only have one in a BEL Assertion')) elif (fn_cnt > 2): errors.append(('Error', 'Too many BEL subject and object candidates')) elif (component_type == 'subject'): if (rel_cnt > 0): errors.append(('Error', 'Too many relations - cannot have any in a BEL Subject')) elif (fn_cnt > 1): errors.append(('Error', 'Too many BEL subject candidates - can only have one')) elif (component_type == 'object'): if nested_cnt: if (rel_cnt > 1): errors.append(('Error', 'Too many relations - can only have one in a nested BEL object')) elif (fn_cnt > 2): errors.append(('Error', 'Too many BEL subject and object candidates in a nested BEL object')) elif (rel_cnt > 0): errors.append(('Error', 'Too many relations - cannot have any in a BEL Subject')) elif (fn_cnt > 1): errors.append(('Error', 'Too many BEL subject candidates - can only have one')) return errors
Check full parse for errors Args: parsed: errors: component_type: Empty string or 'subject' or 'object' to indicate that we are parsing the subject or object field input
codesearchnet
def extraction_data_statistics(path): with functions.DBContextManager(path) as session: extraction = session.query(models.Extraction).first() (X, y) = extraction.return_main_dataset() functions.verify_dataset(X, y) if (extraction.test_dataset['method'] == 'split_from_main'): (X, X_test, y, y_test) = train_test_split(X, y, test_size=extraction.test_dataset['split_ratio'], random_state=extraction.test_dataset['split_seed'], stratify=y) elif (extraction.test_dataset['method'] == 'source'): if (('source' not in extraction.test_dataset) or (not extraction.test_dataset['source'])): raise exceptions.UserError('Source is empty') extraction_code = extraction.test_dataset['source'] extraction_function = functions.import_object_from_string_code(extraction_code, 'extract_test_dataset') (X_test, y_test) = extraction_function() else: (X_test, y_test) = (None, None) extraction_code = extraction.meta_feature_generation['source'] return_splits_iterable = functions.import_object_from_string_code(extraction_code, 'return_splits_iterable') number_of_splits = 0 test_indices = [] try: for (train_idx, test_idx) in return_splits_iterable(X, y): number_of_splits += 1 test_indices.append(test_idx) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) test_indices = np.concatenate(test_indices) (X, y) = (X[test_indices], y[test_indices]) extraction_code = extraction.stacked_ensemble_cv['source'] return_splits_iterable = functions.import_object_from_string_code(extraction_code, 'return_splits_iterable') number_of_splits_stacked_cv = 0 try: for (train_idx, test_idx) in return_splits_iterable(X, y): number_of_splits_stacked_cv += 1 except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) data_stats = dict() data_stats['train_data_stats'] = functions.verify_dataset(X, y) if (X_test is not None): data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test) else: data_stats['test_data_stats'] = None data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits} data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv} extraction.data_statistics = data_stats session.add(extraction) session.commit()
Generates data statistics for the given data extraction setup stored in Xcessiv notebook. This is in rqtasks.py but not as a job yet. Temporarily call this directly while I'm figuring out Javascript lel. Args: path (str, unicode): Path to xcessiv notebook
codesearchnet
def call(method: Method, *args: Any, **kwargs: Any) -> Any: return validate_args(method, *args, **kwargs)(*args, **kwargs)
Validates arguments and then calls the method. Args: method: The method to call. *args, **kwargs: Arguments to the method. Returns: The "result" part of the JSON-RPC response (the return value from the method). Raises: TypeError: If arguments don't match function signature.
codesearchnet
def get_access_token(self, http=None, additional_claims=None): if (additional_claims is None): if ((self.access_token is None) or self.access_token_expired): self.refresh(None) return client.AccessTokenInfo(access_token=self.access_token, expires_in=self._expires_in()) else: (token, unused_expiry) = self._create_token(additional_claims) return client.AccessTokenInfo(access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)
Create a signed jwt. Args: http: unused additional_claims: dict, additional claims to add to the payload of the JWT. Returns: An AccessTokenInfo with the signed jwt
codesearchnet
def AddStopTime(self, stop, problems=None, schedule=None, **kwargs): if (problems is None): problems = problems_module.default_problem_reporter stoptime = self.GetGtfsFactory().StopTime(problems=problems, stop=stop, **kwargs) self.AddStopTimeObject(stoptime, schedule)
Add a stop to this trip. Stops must be added in the order visited. Args: stop: A Stop object kwargs: remaining keyword args passed to StopTime.__init__ Returns: None
codesearchnet
async def get_final_ranking(self) -> OrderedDict: if (self._state != TournamentState.complete.value): return None ranking = {} for p in self.participants: if (p.final_rank in ranking): ranking[p.final_rank].append(p) else: ranking[p.final_rank] = [p] return OrderedDict(sorted(ranking.items(), key=(lambda t: t[0])))
Get the ordered players ranking Returns: collections.OrderedDict[rank, List[Participant]]: Raises: APIException
codesearchnet
def _snapshot_device_function_stack_metadata(self) -> list[traceable_stack.TraceableObject]: snapshot = [] for obj in self._device_function_stack.peek_traceable_objs(): obj_copy = obj.copy_metadata() obj_copy.obj = obj.obj.display_name snapshot.append(obj_copy) return snapshot
Return device function stack as a list of TraceableObjects. Returns: [traceable_stack.TraceableObject, ...] where each TraceableObject's .obj member is a displayable name for the user's argument to Graph.device, and the filename and lineno members point to the code location where Graph.device was called directly or indirectly by the user.
github-repos
def _is_autocomplete_valid(cur_commands, alias_command): parent_command = ' '.join(cur_commands[1:]) with open(GLOBAL_ALIAS_TAB_COMP_TABLE_PATH, 'r') as tab_completion_table_file: try: tab_completion_table = json.loads(tab_completion_table_file.read()) return alias_command in tab_completion_table and parent_command in tab_completion_table[alias_command] except Exception: return False
Determine whether autocomplete can be performed at the current state. Args: parser: The current CLI parser. cur_commands: The current commands typed in the console. alias_command: The alias command. Returns: True if autocomplete can be performed.
juraj-google-style