code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def order_by(self, field_path, **kwargs): query = query_mod.Query(self) return query.order_by(field_path, **kwargs)
Create an "order by" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.order_by` for more information on this method. Args: field_path (str): A field path (``.``-delimited list of field names) on which to order the query results. kwargs (Dict[str, Any]): The keyword arguments to pass along to the query. The only supported keyword is ``direction``, see :meth:`~.firestore_v1beta1.query.Query.order_by` for more information. Returns: ~.firestore_v1beta1.query.Query: An "order by" query.
codesearchnet
def _tf_flatten_batch_dims(x, num_nonbatch_dims): shape = x.shape.as_list() assert (None not in shape) new_shape = ([list_product(shape[:(- num_nonbatch_dims)])] + shape[(- num_nonbatch_dims):]) if (new_shape != shape): x = tf.reshape(x, new_shape) return x
Flatten all but last num_nonbatch_dims into one dimension. Args: x: a tf.Tensor: num_nonbatch_dims: an integer Returns: a tf.Tensor with 1 + num_nonbatch_dims dimensions.
codesearchnet
def get_geostationary_mask(area): h = area.proj_dict['h'] xmax, ymax = get_geostationary_angle_extent(area) xmax *= h ymax *= h x, y = area.get_proj_coords_dask() return ((x / xmax) ** 2 + (y / ymax) ** 2) <= 1
Compute a mask of the earth's shape as seen by a geostationary satellite Args: area (pyresample.geometry.AreaDefinition) : Corresponding area definition Returns: Boolean mask, True inside the earth's shape, False outside.
juraj-google-style
def swapdim(P, dim1=1, dim2=0): if not isinstance(P, Poly): return numpy.swapaxes(P, dim1, dim2) dim = P.dim shape = P.shape dtype = P.dtype if dim1==dim2: return P m = max(dim1, dim2) if P.dim <= m: P = chaospy.poly.dimension.setdim(P, m+1) dim = m+1 A = {} for key in P.keys: val = P.A[key] key = list(key) key[dim1], key[dim2] = key[dim2], key[dim1] A[tuple(key)] = val return Poly(A, dim, shape, dtype)
Swap the dim between two variables. Args: P (Poly): Input polynomial. dim1 (int): First dim dim2 (int): Second dim. Returns: (Poly): Polynomial with swapped dimensions. Examples: >>> x,y = variable(2) >>> P = x**4-y >>> print(P) q0^4-q1 >>> print(swapdim(P)) q1^4-q0
juraj-google-style
def _invalid_docstring_quote(self, quote, row, col=None): self.add_message( 'invalid-docstring-quote', line=row, args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote)), **self.get_offset(col) )
Add a message for an invalid docstring quote. Args: quote: The quote characters that were found. row: The row number the quote characters were found on. col: The column the quote characters were found on.
juraj-google-style
def ReadVFS(pathspec, offset, length, progress_callback=None): fd = VFSOpen(pathspec, progress_callback=progress_callback) fd.Seek(offset) return fd.Read(length)
Read from the VFS and return the contents. Args: pathspec: path to read from offset: number of bytes to skip length: number of bytes to read progress_callback: A callback to indicate that the open call is still working but needs more time. Returns: VFS file contents
codesearchnet
class MarkupLMProcessor(ProcessorMixin): feature_extractor_class = 'MarkupLMFeatureExtractor' tokenizer_class = ('MarkupLMTokenizer', 'MarkupLMTokenizerFast') parse_html = True def __call__(self, html_strings=None, nodes=None, xpaths=None, node_labels=None, questions=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding: if self.parse_html: if html_strings is None: raise ValueError('Make sure to pass HTML strings in case `parse_html` is set to `True`') if nodes is not None or xpaths is not None or node_labels is not None: raise ValueError("Please don't pass nodes, xpaths nor node labels in case `parse_html` is set to `True`") features = self.feature_extractor(html_strings) nodes = features['nodes'] xpaths = features['xpaths'] else: if html_strings is not None: raise ValueError('You have passed HTML strings but `parse_html` is set to `False`.') if nodes is None or xpaths is None: raise ValueError('Make sure to pass nodes and xpaths in case `parse_html` is set to `False`') if questions is not None and self.parse_html: if isinstance(questions, str): questions = [questions] encoded_inputs = self.tokenizer(text=questions if questions is not None else nodes, text_pair=nodes if questions is not None else None, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) return encoded_inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names return tokenizer_input_names
Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single processor. [`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model. It first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings. Next, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`. Args: feature_extractor (`MarkupLMFeatureExtractor`): An instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input. tokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`): An instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input. parse_html (`bool`, *optional*, defaults to `True`): Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths.
github-repos
def __init__(self, env): self._env = env observ_shape = self._parse_shape(self._env.observation_space) observ_dtype = self._parse_dtype(self._env.observation_space) action_shape = self._parse_shape(self._env.action_space) action_dtype = self._parse_dtype(self._env.action_space) with tf.name_scope('environment'): self._observ = tf.Variable( tf.zeros(observ_shape, observ_dtype), name='observ', trainable=False) self._action = tf.Variable( tf.zeros(action_shape, action_dtype), name='action', trainable=False) self._reward = tf.Variable( 0.0, dtype=tf.float32, name='reward', trainable=False) self._done = tf.Variable( True, dtype=tf.bool, name='done', trainable=False) self._step = tf.Variable( 0, dtype=tf.int32, name='step', trainable=False)
Put an OpenAI Gym environment into the TensorFlow graph. Args: env: OpenAI Gym environment.
juraj-google-style
def sync_job_info(self, job_name): job_path = os.path.join(self._logdir, job_name) if (job_name not in self._monitored_jobs): self._create_job_info(job_path) self._monitored_jobs.add(job_name) else: self._update_job_info(job_path) expr_dirs = filter((lambda d: os.path.isdir(os.path.join(job_path, d))), os.listdir(job_path)) for expr_dir_name in expr_dirs: self.sync_trial_info(job_path, expr_dir_name) self._update_job_info(job_path)
Load information of the job with the given job name. 1. Traverse each experiment sub-directory and sync information for each trial. 2. Create or update the job information, together with the job meta file. Args: job_name (str) name of the Tune experiment
codesearchnet
def name_from_base(base, max_length=63, short=False): timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp() trimmed_base = base[:max_length - len(timestamp) - 1] return '{}-{}'.format(trimmed_base, timestamp)
Append a timestamp to the provided string. This function assures that the total length of the resulting string is not longer than the specified max length, trimming the input parameter if necessary. Args: base (str): String used as prefix to generate the unique name. max_length (int): Maximum length for the resulting string. short (bool): Whether or not to use a truncated timestamp. Returns: str: Input parameter with appended timestamp.
juraj-google-style
def __init__(self, ary): self._dirty = True self._typed = None if isinstance(ary, (list, tuple, collections.Sequence)): self.data = ary elif isinstance(ary, ArrayWrapper): self.data = ary.data else: raise TypeError("Invalid value given to array validator: {0}" .format(ary)) logger.debug(fmt("Initializing ArrayWrapper {} with {}", self, ary))
Initialize a wrapper for the array Args: ary: (list-like, or ArrayWrapper)
juraj-google-style
def rename(df, **kwargs): return df.rename(columns={v: k for (k, v) in kwargs.items()})
Renames columns, where keyword argument values are the current names of columns and keys are the new names. Args: df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe. Kwargs: **kwargs: key:value pairs where keys are new names for columns and values are current names of columns.
codesearchnet
def register_gradient_tensor(self, x_tensor_name, gradient_tensor): if len(_gradient_debuggers) == 1 or self._is_active_context: self._check_same_graph(gradient_tensor) self._gradient_tensors[x_tensor_name] = gradient_tensor
Register the gradient tensor for an x-tensor. Args: x_tensor_name: (`str`) the name of the independent `tf.Tensor`, i.e., the tensor on the denominator of the differentiation. gradient_tensor: the gradient `tf.Tensor`.
github-repos
def TestIamPermissions(self, request, global_params=None): config = self.GetMethodConfig('TestIamPermissions') return self._RunMethod(config, request, global_params=global_params)
Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning. Args: request: (BigqueryTablesTestIamPermissionsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (TestIamPermissionsResponse) The response message.
github-repos
def dbname(self, value): self._dbname = value self._connectionXML.set('dbname', value)
Set the connection's database name property. Args: value: New name of the database. String. Returns: Nothing.
codesearchnet
def noise_get_turbulence( n: tcod.noise.Noise, f: Sequence[float], oc: float, typ: int = NOISE_DEFAULT, ) -> float: return float( lib.TCOD_noise_get_turbulence_ex( n.noise_c, ffi.new("float[4]", f), oc, typ ) )
Return the turbulence noise sampled from the ``f`` coordinate. Args: n (Noise): A Noise instance. f (Sequence[float]): The point to sample the noise from. typ (int): The noise algorithm to use. octaves (float): The level of level. Should be more than 1. Returns: float: The sampled noise value.
juraj-google-style
def as_dimension(value): if isinstance(value, Dimension): return value else: return Dimension(value)
Converts the given value to a Dimension. A Dimension input will be returned unmodified. An input of `None` will be converted to an unknown Dimension. An integer input will be converted to a Dimension with that value. Args: value: The value to be converted. Returns: A Dimension corresponding to the given value.
github-repos
def enclose_points(points, clip_rect): point_array = ffi.new('SDL_Point[]', len(points)) for i, p in enumerate(points): point_array[i] = p._ptr enclosing_rect = Rect() if lib.SDL_EnclosePoints(point_array, len(points), clip_rect._ptr, enclosing_rect._ptr): return enclosing_rect else: return None
Return the minimal rectangle enclosing the given set of points Args: points (List[Point]): The set of points that the new Rect must enclose. clip_rect (Rect): A clipping Rect. Returns: Rect: A new Rect enclosing the given points.
juraj-google-style
def replace_with_operand(cls, old_builder: 'Builder', old_path: str, replacement_node: _evaluation.ExpressionNode) -> 'Builder': localized = copy.deepcopy(old_builder.node) localized.replace_operand(old_path, replacement_node) return cls(localized, old_builder._handler)
Returns a builder with the old path replaced with a new node. Args: old_builder: Builder with nodes to be copied into the new one. old_path: String of the old path to be replaced in the old_builder. If no path matches, then the old builder will be the same as the new builder. replacement_node: An expression node that will replace the node that matches the old_path. Returns: A builder with the new expression node tree.
github-repos
def create_single_payment(self, order_number, order_description, order_items, amount, return_url, contact=None, currency=None, lang=None, additional_params=None): return self.create_payment(contact, {'amount': amount, 'currency': (currency if (currency is not None) else settings.GOPAY_CURRENCY), 'lang': (lang if (lang is not None) else settings.GOPAY_LANG), 'additional_params': ([] if (additional_params is None) else [{'name': key, 'value': str(value)} for (key, value) in additional_params.items()]), 'order_number': str(order_number), 'order_description': order_description, 'items': [{'name': key, 'amount': value} for (key, value) in order_items.items()], 'callback': {'return_url': return_url, 'notification_url': '{}{}'.format(settings.GOPAY_DOMAIN, reverse('gopay_notify'))}})
Create a single payment. Args: contact: JSON describing a payer (see PaymentManager#create_contact) order_number: your identifier to the order which the payment is for order_description: desription of the order which is show to the user order_items: items in order which are shown to the other (item name -> amount) amount: total amount of money which will be paid returl_url: url for rediraction after payment is processed currency: default is set in settings (GOPAY_CURRENCY) lang: default is set in settings (GOPAY_LANG) Returns: dict: payment status
codesearchnet
def encode(self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, bandwidth: Optional[float]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], EncodecEncoderOutput]: return_dict = return_dict if return_dict is not None else self.config.return_dict if bandwidth is None: bandwidth = self.config.target_bandwidths[0] if bandwidth not in self.config.target_bandwidths: raise ValueError(f"This model doesn't support the bandwidth {bandwidth}. Select one of {self.config.target_bandwidths}.") _, channels, input_length = input_values.shape if channels < 1 or channels > 2: raise ValueError(f'Number of audio channels must be 1 or 2, but got {channels}') chunk_length = self.config.chunk_length if chunk_length is None: chunk_length = input_length stride = input_length else: stride = self.config.chunk_stride if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() encoded_frames = [] scales = [] step = chunk_length - stride if input_length % stride - step != 0: raise ValueError('The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly.') for offset in range(0, input_length - step, stride): mask = padding_mask[..., offset:offset + chunk_length].bool() frame = input_values[:, :, offset:offset + chunk_length] encoded_frame, scale = self._encode_frame(frame, bandwidth, mask) encoded_frames.append(encoded_frame) scales.append(scale) encoded_frames = torch.stack(encoded_frames) if not return_dict: return (encoded_frames, scales) return EncodecEncoderOutput(encoded_frames, scales)
Encodes the input audio waveform into discrete codes. Args: input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Float values of the input audio waveform. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Padding mask used to pad the `input_values`. bandwidth (`float`, *optional*): The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as bandwidth == 6.0 Returns: A list of frames containing the discrete encoded codes for the input audio waveform, along with rescaling factors for each chunk when `normalize` is True. Each frames is a tuple `(codebook, scale)`, with `codebook` of shape `[batch_size, num_codebooks, frames]`.
github-repos
def nodeids(self, ivs=None, quantifier=None): if ivs is None: nids = list(self._nodeids) else: _vars = self._vars nids = [] for iv in ivs: if iv in _vars and IVARG_ROLE in _vars[iv]['refs']: nids.extend(_vars[iv]['refs'][IVARG_ROLE]) else: raise KeyError(iv) if quantifier is not None: nids = [n for n in nids if self.ep(n).is_quantifier()==quantifier] return nids
Return the list of nodeids given by *ivs*, or all nodeids. Args: ivs: the intrinsic variables of the predications to select; if `None`, return all nodeids (but see *quantifier*) quantifier: if `True`, only return nodeids of quantifiers; if `False`, only return non-quantifiers; if `None` (the default), return both
juraj-google-style
def normalize(inputs, epsilon=1e-8, scope="ln"): with tf.variable_scope(scope): inputs_shape = inputs.get_shape() params_shape = inputs_shape[-1:] mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) beta = tf.Variable(tf.zeros(params_shape)) gamma = tf.Variable(tf.ones(params_shape)) normalized = (inputs - mean) / ((variance + epsilon) ** (.5)) outputs = gamma * normalized + beta return outputs
Applies layer normalization. Args: inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`. epsilon: A floating number. A very small number for preventing ZeroDivision Error. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A tensor with the same shape and data dtype as `inputs`.
juraj-google-style
def parse_requirements(file_): modules = [] delim = ['<', '>', '=', '!', '~'] try: f = open_func(file_, 'r') except OSError: logging.error('Failed on file: {}'.format(file_)) raise else: data = [x.strip() for x in f.readlines() if (x != '\n')] finally: f.close() data = [x for x in data if x[0].isalpha()] for x in data: if (not any([(y in x) for y in delim])): modules.append({'name': x, 'version': None}) for y in x: if (y in delim): module = x.split(y) module_name = module[0] module_version = module[(- 1)].replace('=', '') module = {'name': module_name, 'version': module_version} if (module not in modules): modules.append(module) break return modules
Parse a requirements formatted file. Traverse a string until a delimiter is detected, then split at said delimiter, get module name by element index, create a dict consisting of module:version, and add dict to list of parsed modules. Args: file_: File to parse. Raises: OSerror: If there's any issues accessing the file. Returns: tuple: The contents of the file, excluding comments.
codesearchnet
def poll(self, timeout_ms=None, future=None): if (future is not None): timeout_ms = 100 elif (timeout_ms is None): timeout_ms = self.config['request_timeout_ms'] elif (not isinstance(timeout_ms, (int, float))): raise TypeError(('Invalid type for timeout: %s' % type(timeout_ms))) responses = [] while True: with self._lock: if self._closed: break for node_id in list(self._connecting): self._maybe_connect(node_id) metadata_timeout_ms = self._maybe_refresh_metadata() if ((future is not None) and future.is_done): timeout = 0 else: idle_connection_timeout_ms = self._idle_expiry_manager.next_check_ms() timeout = min(timeout_ms, metadata_timeout_ms, idle_connection_timeout_ms, self.config['request_timeout_ms']) timeout = max(0, (timeout / 1000)) self._poll(timeout) responses.extend(self._fire_pending_completed_requests()) if ((future is None) or future.is_done): break return responses
Try to read and write to sockets. This method will also attempt to complete node connections, refresh stale metadata, and run previously-scheduled tasks. Arguments: timeout_ms (int, optional): maximum amount of time to wait (in ms) for at least one response. Must be non-negative. The actual timeout will be the minimum of timeout, request timeout and metadata timeout. Default: request_timeout_ms future (Future, optional): if provided, blocks until future.is_done Returns: list: responses received (can be empty)
codesearchnet
def _apply_conv(self, inputs, w): tiled_weights = tf.tile(w, [1, 1, self._input_channels, 1]) outputs = tf.nn.depthwise_conv2d(inputs, tiled_weights, strides=self.stride, padding=self._conv_op_padding, data_format=self._data_format) return outputs
Apply a depthwise_conv2d operation on `inputs` using variable `w`. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. w: A weight matrix of the same type as `inputs`. Returns: outputs: The result of the convolution operation on `inputs`.
juraj-google-style
def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]): gold = arraylike_to_numpy(gold) if (len(ignore_in_pred) > 0): raise ValueError('ignore_in_pred not defined for ROC-AUC score.') keep = [(x not in ignore_in_gold) for x in gold] gold = gold[keep] probs = probs[(keep, :)] gold_s = pred_to_prob(torch.from_numpy(gold), k=probs.shape[1]).numpy() return skm.roc_auc_score(gold_s, probs)
Compute the ROC AUC score, given the gold labels and predicted probs. Args: gold: A 1d array-like of gold labels probs: A 2d array-like of predicted probabilities ignore_in_gold: A list of labels for which elements having that gold label will be ignored. Returns: roc_auc_score: The (float) roc_auc score
codesearchnet
def get_ggt(self, n, u): gk = self[0].einsum_sequence([n, u, n, u]) result = -(2*gk*np.outer(u, u) + self[0].einsum_sequence([n, n]) + self[1].einsum_sequence([n, u, n, u])) / (2*gk) return result
Gets the Generalized Gruneisen tensor for a given third-order elastic tensor expansion. Args: n (3x1 array-like): normal mode direction u (3x1 array-like): polarization direction
juraj-google-style
def generate_scaling_plot(timing_data, title, ylabel, description, plot_file): proc_counts = timing_data['proc_counts'] if len(proc_counts) > 2: plt.figure(figsize=(10, 8), dpi=150) plt.title(title) plt.xlabel("Number of processors") plt.ylabel(ylabel) for case, case_color in zip(['bench', 'model'], [' case_data = timing_data[case] means = case_data['means'] mins = case_data['mins'] maxs = case_data['maxs'] plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5) plt.plot(proc_counts, means, 'o-', color=case_color, label=case) plt.legend(loc='best') else: plt.figure(figsize=(5, 3)) plt.axis('off') plt.text(0.4, 0.8, "ERROR:") plt.text(0.0, 0.6, "Not enough data points to draw scaling plot") plt.text(0.0, 0.44, "To generate this data rerun BATS with the") plt.text(0.0, 0.36, "performance option enabled.") if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
Generate a scaling plot. Args: timing_data: data returned from a `*_scaling` method title: the title of the plot ylabel: the y-axis label of the plot description: a description of the plot plot_file: the file to write out to Returns: an image element containing the plot file and metadata
juraj-google-style
def autobuild_bootstrap_file(file_name, image_list): family = utilities.get_family('module_settings.json') target = family.platform_independent_target() resolver = ProductResolver.Create() env = Environment(tools=[]) output_dir = target.build_dirs()['output'] build_dir = target.build_dirs()['build'] build_output_name = os.path.join(build_dir, file_name) full_output_name = os.path.join(output_dir, file_name) processed_input_images = [] for image_name in image_list: image_info = resolver.find_unique('firmware_image', image_name) image_path = image_info.full_path hex_path = arm.ensure_image_is_hex(image_path) processed_input_images.append(hex_path) env.Command(build_output_name, processed_input_images, action=Action(arm.merge_hex_executables, ('Merging %d hex files into $TARGET' % len(processed_input_images)))) env.Command(full_output_name, build_output_name, Copy('$TARGET', '$SOURCE'))
Combine multiple firmware images into a single bootstrap hex file. The files listed in image_list must be products of either this tile or any dependency tile and should correspond exactly with the base name listed on the products section of the module_settings.json file of the corresponding tile. They must be listed as firmware_image type products. This function keeps a global map of all of the intermediate files that it has had to create so that we don't try to build them multiple times. Args: file_name(str): Full name of the output bootstrap hex file. image_list(list of str): List of files that will be combined into a single hex file that will be used to flash a chip.
codesearchnet
def __init__(self, prefix_length: int, option_suffix: Text = '') -> None: super().__init__(option_suffix) self._prefix_length = prefix_length
Create a new instance. Args: prefix_length: Amount of characters to skip at the beginning of the entry
juraj-google-style
def _build_inner(self, slice_content: 'Iterable[cfg.Variable]') -> 'tuple[list[_base.BaseValue], set[int]]': inner = [] ellipses = set() for var in slice_content: if len(var.bindings) > 1: self.ctx.errorlog.ambiguous_annotation(self.ctx.vm.frames, var.data) inner.append(self.ctx.convert.unsolvable) else: val = var.bindings[0].data if val is self.ctx.convert.ellipsis: ellipses.add(len(inner)) inner.append(self.ctx.convert.unsolvable) else: inner.append(val) return (inner, ellipses)
Build the list of parameters. Args: slice_content: The iterable of variables to extract parameters from. Returns: A tuple of a list of parameters and a set of indices at which an ellipsis was replaced with Any.
github-repos
def GetHeaderGuardCPPVariable(filename): filename = re.sub('_flymake\\.h$', '.h', filename) filename = re.sub('/\\.flymake/([^/]*)$', '/\\1', filename) filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: suffix = os.sep if (suffix == '\\'): suffix += '\\' file_path_from_root = re.sub((('^' + _root) + suffix), '', file_path_from_root) return (re.sub('[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_')
Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file.
codesearchnet
def _scoped_subscribe(tensor, side_effects, control_cache): with ops.device(tensor.device): with _preserve_control_flow_context(tensor): return _subscribe(tensor, side_effects, control_cache)
Helper method that subscribes a single tensor to a list of side_effects. This is a thin wrapper around `_subscribe` and ensures that the side effect ops are added within the same device and control flow context of the subscribed tensor. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed.
github-repos
def _acquire(self, uuid_path): for index in range(self._min_third_octet, (self._max_third_octet + 1)): lease = self.create_lease_object_from_idx(index) if self._lease_valid(lease): continue self._take_lease(lease, uuid_path, safe=False) return lease.to_ip_network() raise LagoSubnetLeaseStoreFullException(self.get_allowed_range())
Lease a free network for the given uuid path Args: uuid_path (str): Path to the uuid file of a :class:`lago.Prefix` Returns: netaddr.IPNetwork: Which represents the selected subnet Raises: LagoSubnetLeaseException: If the store is full
codesearchnet
def _is_none_or_undef(value): return value is None or isinstance(value, variables.UndefinedReturnValue) or isinstance(value, variables.Undefined)
Tests whether a value is None or undefined. AutoGraph represents undefined symbols using special objects of type Undefined or UndefinedReturnValue. Args: value: value to test Returns: Boolean
github-repos
def __register_types(self): try: for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.types']['plugins']: cls = entry_point.load() self.types[ResourceType.get(cls.resource_type).resource_type_id] = cls logger.debug('Registered resource type {}'.format(cls.__name__)) except SQLAlchemyError as ex: logger.warning('Failed loading type information: {}'.format(ex))
Iterates all entry points for resource types and registers a `resource_type_id` to class mapping Returns: `None`
codesearchnet
def serialize_sparse_tensors(tensors): ret = nest.pack_sequence_as(tensors, [sparse_ops.serialize_sparse(tensor, out_type=dtypes.variant) if isinstance(tensor, sparse_tensor.SparseTensor) else tensor for tensor in nest.flatten(tensors)]) return ret
Serializes sparse tensors. Args: tensors: a tensor structure to serialize. Returns: `tensors` with any sparse tensors replaced by their serialized version.
github-repos
def training_env(): from sagemaker_containers import _env return _env.TrainingEnv(resource_config=_env.read_resource_config(), input_data_config=_env.read_input_data_config(), hyperparameters=_env.read_hyperparameters())
Create a TrainingEnv. Returns: TrainingEnv: an instance of TrainingEnv
codesearchnet
def _tensor_proto_to_health_pill(self, tensor_event, node_name, device, output_slot): return self._process_health_pill_value(wall_time=tensor_event.wall_time, step=tensor_event.step, device_name=device, output_slot=output_slot, node_name=node_name, tensor_proto=tensor_event.tensor_proto)
Converts an event_accumulator.TensorEvent to a HealthPillEvent. Args: tensor_event: The event_accumulator.TensorEvent to convert. node_name: The name of the node (without the output slot). device: The device. output_slot: The integer output slot this health pill is relevant to. Returns: A HealthPillEvent.
codesearchnet
def rtt_control(self, command, config): config_byref = ctypes.byref(config) if config is not None else None res = self._dll.JLINK_RTTERMINAL_Control(command, config_byref) if res < 0: raise errors.JLinkRTTException(res) return res
Issues an RTT Control command. All RTT control is done through a single API call which expects specifically laid-out configuration structures. Args: self (JLink): the ``JLink`` instance command (int): the command to issue (see enums.JLinkRTTCommand) config (ctypes type): the configuration to pass by reference. Returns: An integer containing the result of the command.
juraj-google-style
def run(inputs, program, outputs): root = tempfile.mkdtemp() try: cwd = os.getcwd() for (fake, real) in inputs: parent = os.path.join(root, os.path.dirname(fake)) if (not os.path.exists(parent)): os.makedirs(parent) if (hasattr(os, 'symlink') and (not (os.name == 'nt'))): os.symlink(os.path.join(cwd, real), os.path.join(root, fake)) else: shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake)) if (subprocess.call((program + [root])) != 0): return 1 for (fake, real) in outputs: shutil.copyfile(os.path.join(root, fake), real) return 0 finally: try: shutil.rmtree(root) except EnvironmentError: pass
Creates temp symlink tree, runs program, and copies back outputs. Args: inputs: List of fake paths to real paths, which are used for symlink tree. program: List containing real path of program and its arguments. The execroot directory will be appended as the last argument. outputs: List of fake outputted paths to copy back to real paths. Returns: 0 if succeeded or nonzero if failed.
codesearchnet
def EncodeToBytes(data): if data is None: return b'' if isinstance(data, bytes): return data s = str(data) try: return s.encode('iso-8859-1') except UnicodeEncodeError: pass try: return s.encode(GetConsoleAttr().GetEncoding()) except UnicodeEncodeError: pass return s.encode('utf-8')
Encode data to bytes. The primary use case is for base64/mime style 7-bit ascii encoding where the encoder input must be bytes. "safe" means that the conversion always returns bytes and will not raise codec exceptions. If data is text then an 8-bit ascii encoding is attempted, then the console encoding, and finally utf-8. Args: data: Any bytes, string, or object that has str() or unicode() methods. Returns: A bytes string representation of the data.
github-repos
def split(self, path): path = path.strip() if not path.startswith(GCSFileSystem.GCS_PREFIX): raise ValueError('Path %r must be GCS path.' % path) prefix_len = len(GCSFileSystem.GCS_PREFIX) last_sep = path[prefix_len:].rfind('/') if last_sep >= 0: last_sep += prefix_len if last_sep > 0: return (path[:last_sep], path[last_sep + 1:]) elif last_sep < 0: return (path, '') else: raise ValueError('Invalid path: %s' % path)
Splits the given path into two parts. Splits the path into a pair (head, tail) such that tail contains the last component of the path and head contains everything up to that. Head will include the GCS prefix ('gs://'). Args: path: path as a string Returns: a pair of path components as strings.
github-repos
def _ContainsAny(self, verb, expected): if len(expected) == 1 and expected[0] in self._actual: return if expected: try: actual_set = set(self._actual) except TypeError: actual_set = self._actual for i in expected: if i in actual_set: return self._FailComparingValues(verb, expected)
Determines if the subject contains any of the expected elements. Helper function for ContainsAnyIn() and ContainsAnyOf(). Args: verb: string describing how the expected elements should be contained. expected: iterable of objects that should be contained in the subject. Returns: None if the subject contains any of the expected elements. Raises: TruthAssertionError: the subject is missing all of the expected elements.
github-repos
def _calculate_scores(self, query, key): return NotImplementedError
Calculates attention scores. Args: query: Query tensor of shape `[batch_size, Tq, dim]`. key: Key tensor of shape `[batch_size, Tv, dim]`. Returns: Tensor of shape `[batch_size, Tq, Tv]`.
github-repos
def compress(self, value: List[str]) -> value_class: localized_value = self.value_class() for (lang_code, _), value in zip(settings.LANGUAGES, value): localized_value.set(lang_code, value) return localized_value
Compresses the values from individual fields into a single :see:LocalizedValue instance. Arguments: value: The values from all the widgets. Returns: A :see:LocalizedValue containing all the value in several languages.
juraj-google-style
def _AddOption(self, name): if (name in [option.name for option in self.options]): raise TextFSMTemplateError(('Duplicate option "%s"' % name)) try: option = self._options_cls.GetOption(name)(self) except AttributeError: raise TextFSMTemplateError(('Unknown option "%s"' % name)) self.options.append(option)
Add an option to this Value. Args: name: (str), the name of the Option to add. Raises: TextFSMTemplateError: If option is already present or the option does not exist.
codesearchnet
def __init__(self, usb, chunk_kb=1024): self.usb = usb self.chunk_kb = chunk_kb
Constructs a FastbootProtocol instance. Args: usb: UsbHandle instance. chunk_kb: Packet size. For older devices, 4 may be required.
juraj-google-style
def GetFileEntryByPath(self, path): if (path is None): return None (file_entry_type, _) = self._paths.get(path, (None, None)) if (not file_entry_type): return None path_spec = fake_path_spec.FakePathSpec(location=path) return fake_file_entry.FakeFileEntry(self._resolver_context, self, path_spec, file_entry_type=file_entry_type)
Retrieves a file entry for a path. Args: path (str): path of the file entry. Returns: FakeFileEntry: a file entry or None if not available.
codesearchnet
def service_info(self, short_name): if (short_name not in self.services): raise ArgumentError('Unknown service name', short_name=short_name) info = {} info['short_name'] = short_name info['long_name'] = self.services[short_name]['state'].long_name info['preregistered'] = self.services[short_name]['state'].preregistered return info
Get static information about a service. Args: short_name (string): The short name of the service to query Returns: dict: A dictionary with the long_name and preregistered info on this service.
codesearchnet
def get_concatenated_pdf_from_disk(filenames: Iterable[str], start_recto: bool=True) -> bytes: if start_recto: writer = PdfFileWriter() for filename in filenames: if filename: if ((writer.getNumPages() % 2) != 0): writer.addBlankPage() writer.appendPagesFromReader(PdfFileReader(open(filename, 'rb'))) return pdf_from_writer(writer) else: merger = PdfFileMerger() for filename in filenames: if filename: merger.append(open(filename, 'rb')) return pdf_from_writer(merger)
Concatenates PDFs from disk and returns them as an in-memory binary PDF. Args: filenames: iterable of filenames of PDFs to concatenate start_recto: start a new right-hand page for each new PDF? Returns: concatenated PDF, as ``bytes``
codesearchnet
def _get_executor_init(self, workers): def pool_fn(seqs): pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, None, get_worker_id_queue())) _DATA_POOLS.add(pool) return pool return pool_fn
Gets the Pool initializer for multiprocessing. Args: workers: Number of workers. Returns: Function, a Function to initialize the pool
github-repos
def AddTripDecoration(self, triplist, color=" tmpstr = self._DrawTrips(triplist,color) self._decorators.append(tmpstr)
Flushes existing decorations and highlights the given trips. Args: # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] # An optional string with a html color code color: "#fff"
juraj-google-style
def dp990(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dp990`'.format(value)) self._dp990 = value
Corresponds to IDD Field `dp990` Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def get_statistics(self, id_or_uri, port_name=''): uri = (self._client.build_uri(id_or_uri) + '/statistics') if port_name: uri = ((uri + '/') + port_name) return self._client.get(uri)
Gets the statistics from an interconnect. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. port_name (str): A specific port name of an interconnect. Returns: dict: The statistics for the interconnect that matches id.
codesearchnet
def _logsum_expbig_minus_expsmall(big, small): with tf.name_scope("logsum_expbig_minus_expsmall"): return tf.math.log1p(-tf.exp(small - big)) + big
Stable evaluation of `Log[exp{big} - exp{small}]`. To work correctly, we should have the pointwise relation: `small <= big`. Args: big: Floating-point `Tensor` small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable shape. Returns: `Tensor` of same `dtype` of `big` and broadcast shape.
juraj-google-style
def create(self, name, **request_parameters): check_type(name, basestring, may_be_none=False) post_data = dict_from_items_with_values(request_parameters, name=name) json_data = self._session.post(API_ENDPOINT, json=post_data) return self._object_factory(OBJECT_TYPE, json_data)
Create a team. The authenticated user is automatically added as a member of the team. Args: name(basestring): A user-friendly name for the team. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: Team: A Team object with the details of the created team. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def _get_client_by_hostname(self, hostname): print('Searching for client: {0:s}'.format(hostname)) try: search_result = self.grr_api.SearchClients(hostname) except grr_errors.UnknownError as exception: self.state.add_error('Could not search for host {0:s}: {1!s}'.format(hostname, exception), critical=True) return None result = [] for client in search_result: if (hostname.lower() in client.data.os_info.fqdn.lower()): result.append((client.data.last_seen_at, client)) if (not result): self.state.add_error('Could not get client_id for {0:s}'.format(hostname), critical=True) return None (last_seen, client) = sorted(result, key=(lambda x: x[0]), reverse=True)[0] last_seen_datetime = datetime.datetime.utcfromtimestamp((last_seen / 1000000)) last_seen_seconds = (datetime.datetime.utcnow() - last_seen_datetime).total_seconds() last_seen_minutes = int(round((last_seen_seconds / 60))) print('{0:s}: Found active client'.format(client.client_id)) print('Found active client: {0:s}'.format(client.client_id)) print('Client last seen: {0:s} ({1:d} minutes ago)'.format(last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'), last_seen_minutes)) return client
Search GRR by hostname and get the latest active client. Args: hostname: hostname to search for. Returns: GRR API Client object Raises: DFTimewolfError: if no client ID found for hostname.
codesearchnet
def add_gene(self, gene): logger.debug("Adding gene {0} to variant {1}".format( gene, self['variant_id'])) self['genes'].append(gene)
Add the information of a gene This adds a gene dict to variant['genes'] Args: gene (dict): A gene dictionary
juraj-google-style
def create_branch(self, branch_name: str): LOGGER.info('creating branch: %s', branch_name) self._validate_branch_name(branch_name) if (branch_name in self.list_branches()): LOGGER.error('branch already exists') sys.exit((- 1)) new_branch = self.repo.create_head(branch_name) new_branch.commit = self.repo.head.commit
Creates a new branch Args: branch_name: name of the branch
codesearchnet
def add_arguments(self, parser): parser.add_argument('name', nargs=1, choices=['kinetis'], help='name of MCU to unlock') return self.add_common_arguments(parser, True)
Adds the unlock command arguments to the parser. Args: self (UnlockCommand): the ``UnlockCommand`` instance parser (argparse.ArgumentParser): the parser to add the arguments to Returns: ``None``
codesearchnet
def normalize(self, inplace=False): if inplace: nrm = self.norm() self.data /= nrm return None nrm = self.norm() data_copy = np.array(self.data, copy=True) data_copy /= nrm return Quaternion(data_copy)
Normalizes a Quaternion to unit length so that it represents a valid rotation. Args: inplace (bool): Do an inplace normalization. Returns: Quaternion: Normalized quaternion.
juraj-google-style
def calc_health_pill(tensor): health_pill = ([0.0] * 14) if (not isinstance(tensor, np.ndarray)): return health_pill health_pill[0] = 1.0 if (not (np.issubdtype(tensor.dtype, np.float) or np.issubdtype(tensor.dtype, np.complex) or np.issubdtype(tensor.dtype, np.integer) or (tensor.dtype == np.bool))): return None health_pill[1] = float(np.size(tensor)) nan_mask = np.isnan(tensor) inf_mask = np.isinf(tensor) health_pill[2] = float(np.sum(nan_mask)) health_pill[3] = float(np.sum((tensor == (- np.inf)))) health_pill[4] = float(np.sum(np.logical_and(np.logical_not(inf_mask), (tensor < 0.0)))) health_pill[5] = float(np.sum((tensor == 0.0))) health_pill[6] = float(np.sum(np.logical_and(np.logical_not(inf_mask), (tensor > 0.0)))) health_pill[7] = float(np.sum((tensor == np.inf))) finite_subset = tensor[np.logical_and(np.logical_not(nan_mask), np.logical_not(inf_mask))] if np.size(finite_subset): health_pill[8] = float(np.min(finite_subset)) health_pill[9] = float(np.max(finite_subset)) health_pill[10] = float(np.mean(finite_subset)) health_pill[11] = float(np.var(finite_subset)) else: health_pill[8] = np.inf health_pill[9] = (- np.inf) health_pill[10] = np.nan health_pill[11] = np.nan health_pill[12] = (- 1.0) health_pill[13] = float(len(tensor.shape)) health_pill.extend([float(x) for x in tensor.shape]) return health_pill
Calculate health pill of a tensor. Args: tensor: An instance of `np.array` (for initialized tensors) or `tensorflow.python.debug.lib.debug_data.InconvertibleTensorProto` (for unininitialized tensors). Returns: If `tensor` is an initialized tensor of numeric or boolean types: the calculated health pill, as a `list` of `float`s. Else if `tensor` is an initialized tensor with `string`, `resource` or any other non-numeric types: `None`. Else (i.e., if `tensor` is uninitialized): An all-zero `list`, with the first element signifying that the tensor is uninitialized.
codesearchnet
def attention_params_simple( mesh, io_dim, kv_dim, heads_dim, variable_dtype): return AttentionParams( mesh, query_input_dim=io_dim, memory_input_dim=io_dim, output_dim=io_dim, key_dim=kv_dim, value_dim=kv_dim, query_heads_dims=[heads_dim], memory_heads_dims=[heads_dim], variable_dtype=variable_dtype)
Common case attention parameters. Args: mesh: a Mesh io_dim: a Dimension (channels dimension of inputs and outputs) kv_dim: a Dimension (channels in keys and values) heads_dim: a Dimension (number of attention "heads") variable_dtype: a mtf.VariableDType Returns: an AttentionParams
juraj-google-style
def create_knowledge_base(project_id, display_name): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) knowledge_base = dialogflow.types.KnowledgeBase( display_name=display_name) response = client.create_knowledge_base(project_path, knowledge_base) print('Knowledge Base created:\n') print('Display Name: {}\n'.format(response.display_name)) print('Knowledge ID: {}\n'.format(response.name))
Creates a Knowledge base. Args: project_id: The GCP project linked with the agent. display_name: The display name of the Knowledge base.
juraj-google-style
def notify(self, subsystem, recipient, subject, body_html, body_text): if not re.match(RGX_EMAIL_VALIDATION_PATTERN, recipient, re.I): raise ValueError('Invalid recipient provided') email = Email() email.timestamp = datetime.now() email.subsystem = subsystem email.sender = self.sender email.recipients = recipient email.subject = subject email.uuid = uuid.uuid4() email.message_html = body_html email.message_text = body_text method = dbconfig.get('method', NS_EMAIL, 'ses') try: if method == 'ses': self.__send_ses_email([recipient], subject, body_html, body_text) elif method == 'smtp': self.__send_smtp_email([recipient], subject, body_html, body_text) else: raise ValueError('Invalid email method: {}'.format(method)) db.session.add(email) db.session.commit() except Exception as ex: raise EmailSendError(ex)
Method to send a notification. A plugin may use only part of the information, but all fields are required. Args: subsystem (`str`): Name of the subsystem originating the notification recipient (`str`): Recipient email address subject (`str`): Subject / title of the notification body_html (`str)`: HTML formatted version of the message body_text (`str`): Text formatted version of the message Returns: `None`
juraj-google-style
def replace_species(self, species_mapping): species_mapping = {get_el_sp(k): v for k, v in species_mapping.items()} sp_to_replace = set(species_mapping.keys()) sp_in_structure = set(self.composition.keys()) if not sp_in_structure.issuperset(sp_to_replace): warnings.warn( "Some species to be substituted are not present in " "structure. Pls check your input. Species to be " "substituted = %s; Species in structure = %s" % (sp_to_replace, sp_in_structure)) for site in self._sites: if sp_to_replace.intersection(site.species): c = Composition() for sp, amt in site.species.items(): new_sp = species_mapping.get(sp, sp) try: c += Composition(new_sp) * amt except Exception: c += {new_sp: amt} site.species = c
Swap species. Args: species_mapping (dict): dict of species to swap. Species can be elements too. E.g., {Element("Li"): Element("Na")} performs a Li for Na substitution. The second species can be a sp_and_occu dict. For example, a site with 0.5 Si that is passed the mapping {Element('Si): {Element('Ge'):0.75, Element('C'):0.25} } will have .375 Ge and .125 C.
juraj-google-style
def has_access(user, required_roles, match_all=True): if ROLE_ADMIN in user.roles: return True if isinstance(required_roles, str): if required_roles in user.roles: return True return False if match_all: for role in required_roles: if role not in user.roles: return False return True else: for role in required_roles: if role in user.roles: return True return False
Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply Args: user (:obj:`User`): User object required_roles (`list` of `str`): List of roles that the user must have applied match_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will return `True` Returns: `bool`
juraj-google-style
def _ProcessSources( self, source_path_specs, storage_writer, filter_find_specs=None): if self._processing_profiler: self._processing_profiler.StartTiming('process_sources') self._status = definitions.STATUS_INDICATOR_COLLECTING self._number_of_consumed_event_tags = 0 self._number_of_consumed_events = 0 self._number_of_consumed_reports = 0 self._number_of_consumed_sources = 0 self._number_of_consumed_warnings = 0 self._number_of_produced_event_tags = 0 self._number_of_produced_events = 0 self._number_of_produced_reports = 0 self._number_of_produced_sources = 0 self._number_of_produced_warnings = 0 path_spec_generator = self._path_spec_extractor.ExtractPathSpecs( source_path_specs, find_specs=filter_find_specs, recurse_file_system=False, resolver_context=self._resolver_context) for path_spec in path_spec_generator: if self._abort: break event_source = event_sources.FileEntryEventSource(path_spec=path_spec) storage_writer.AddEventSource(event_source) self._number_of_produced_sources = storage_writer.number_of_event_sources self._UpdateForemanProcessStatus() if self._status_update_callback: self._status_update_callback(self._processing_status) self._ScheduleTasks(storage_writer) if self._abort: self._status = definitions.STATUS_INDICATOR_ABORTED else: self._status = definitions.STATUS_INDICATOR_COMPLETED self._number_of_produced_events = storage_writer.number_of_events self._number_of_produced_sources = storage_writer.number_of_event_sources self._number_of_produced_warnings = storage_writer.number_of_warnings if self._processing_profiler: self._processing_profiler.StopTiming('process_sources') self._UpdateForemanProcessStatus() tasks_status = self._task_manager.GetStatusInformation() if self._task_queue_profiler: self._task_queue_profiler.Sample(tasks_status) self._processing_status.UpdateTasksStatus(tasks_status) if self._status_update_callback: self._status_update_callback(self._processing_status)
Processes the sources. Args: source_path_specs (list[dfvfs.PathSpec]): path specifications of the sources to process. storage_writer (StorageWriter): storage writer for a session storage. filter_find_specs (Optional[list[dfvfs.FindSpec]]): find specifications used in path specification extraction. If set, path specifications that match the find specification will be processed.
juraj-google-style
def from_json(cls, data): if 'month' not in data: data['month'] = 1 if 'day' not in data: data['day'] = 1 if 'hour' not in data: data['hour'] = 0 if 'minute' not in data: data['minute'] = 0 if 'year' not in data: data['year'] = 2017 leap_year = True if int(data['year']) == 2016 else False return cls(data['month'], data['day'], data['hour'], data['minute'], leap_year)
Creat datetime from a dictionary. Args: data: { 'month': A value for month between 1-12. (Defualt: 1) 'day': A value for day between 1-31. (Defualt: 1) 'hour': A value for hour between 0-23. (Defualt: 0) 'minute': A value for month between 0-59. (Defualt: 0) }
juraj-google-style
def CheckFile(self, path): print('Checking: {0:s}'.format(path)) definitions_registry = registry.DataTypeDefinitionsRegistry() definitions_reader = reader.YAMLDataTypeDefinitionsFileReader() result = False try: definitions_reader.ReadFile(definitions_registry, path) result = True except KeyError as exception: logging.warning('Unable to register data type definition in file: {0:s} with error: {1:s}'.format(path, exception)) except errors.FormatError as exception: logging.warning('Unable to validate file: {0:s} with error: {1:s}'.format(path, exception)) return result
Validates the definition in a file. Args: path (str): path of the definition file. Returns: bool: True if the file contains valid definitions.
codesearchnet
def id_pools_ipv4_subnets(self): if (not self.__id_pools_ipv4_subnets): self.__id_pools_ipv4_subnets = IdPoolsIpv4Subnets(self.__connection) return self.__id_pools_ipv4_subnets
Gets the IdPoolsIpv4Subnets API client. Returns: IdPoolsIpv4Subnets:
codesearchnet
def _identity_matrix(num_columns: types.IntTensor, num_digits: types.IntTensor, dtype: tf.DType=None) -> types.IntTensor: dtype = dtype or tf.int32 shifts = tf.range(num_digits - 1, num_digits - 1 - num_columns, delta=-1) return tf.bitwise.left_shift(tf.ones(shape=(1, num_columns), dtype=dtype), tf.cast(shifts, dtype))
Returns the identity matrix. Args: num_columns: Positive scalar `Tensor` with rank 0 representing the number of columns of the returned matrix. num_digits: Positive scalar `Tensor` with rank 0 representing the base-2 precision of the samples. dtype: Optional `dtype`. The `dtype` of the output `Tensor` (either a signed or unsigned integer `dtype`). Default value: `None` which maps to `int32`. Returns: A scalar `Tensor` with shape `(1, num_columns)`.
github-repos
def _find_human_readable_labels(synsets, synset_to_human): humans = [] for s in synsets: assert s in synset_to_human, ('Failed to find: %s' % s) humans.append(synset_to_human[s]) return humans
Build a list of human-readable labels. Args: synsets: list of strings; each string is a unique WordNet ID. synset_to_human: dict of synset to human labels, e.g., 'n02119022' --> 'red fox, Vulpes vulpes' Returns: List of human-readable strings corresponding to each synset.
juraj-google-style
def super(cls, method): method_cls = type(method.__self__) for supercls in method_cls.__mro__: if '__mixin_overloads__' in supercls.__dict__ and supercls.__mixin_overloads__.get(method.__name__) is cls: method_cls = supercls break return getattr(super(method_cls, method.__self__), method.__name__)
Imitate super() in a mix-in. This method is a substitute for super(MixinClass, self).overloaded_method(arg), which we can't use because mix-ins appear at the end of the MRO. It should be called as MixinClass.super(self.overloaded_method)(arg) . It works by finding the class on which MixinMeta.__init__ set MixinClass.overloaded_method and calling super() on that class. Args: method: The method in the mix-in. Returns: The method overloaded by 'method'.
github-repos
def _ws_on_message(self, ws: websocket.WebSocketApp, raw: Union[(str, bytes)]): if isinstance(raw, bytes): decoded = zlib.decompress(raw, 15, 10490000).decode('utf-8') else: decoded = raw data = json.loads(decoded) if (data.get('s') is not None): global last_sequence last_sequence = str(data['s']) self.logger.debug(('Set last_sequence to ' + last_sequence)) event = WebSocketEvent.parse(data['op']) self.logger.debug('Received event {} (op if (event == WebSocketEvent.HELLO): interval = (float(data['d']['heartbeat_interval']) / 1000) self.logger.debug(f'Starting heartbeat thread at {interval} seconds') self._ws_keep_alive = WebSocketKeepAlive(self.logger, ws, interval) self._ws_keep_alive.start() elif (event == WebSocketEvent.DISPATCH): self.logger.debug(('Got dispatch ' + data['t'])) if (data['t'] == PycordCallback.MESSAGE.value): message_content = data['d']['content'] if (message_content.startswith(self.command_prefix) and self._commands): cmd_str = message_content[1:].split(' ')[0].lower() self.logger.debug(f'Got new message, checking for callback for command "{cmd_str}"') for command_obj in self._commands: if (command_obj[0].lower() == cmd_str): self.logger.debug(f'Found matching command "{command_obj[0]}", invoking callback') command_obj[1](data) for key in self.callbacks: if (key.value == data['t']): self.callbacks[key](data)
Callback for receiving messages from the websocket connection This method receives ALL events from the websocket connection, some of which are used for the initial authentication flow, some of which are used for maintaining the connection, some of which are for notifying this client of user states, etc. Only a few of the events are really worth listening to by "downstream" clients, mostly chat events (``WebSocketEvent.DISPATCH`` with element ``t`` == 'MESSAGE_CREATE'), and those can be accessed by clients using this library via the command registration, which is handled by this method. Args: ws: websocket connection raw: message received from the connection; either string or bytes, the latter is a zlip-compressed string. Either way, the end result of formatting is JSON
codesearchnet
def connect_output(self, node): if len(self.outputs) == self.max_outputs: raise TooManyOutputsError("Attempted to connect too many nodes to the output of a node", max_outputs=self.max_outputs, stream=self.stream) self.outputs.append(node)
Connect another node to our output. This downstream node will automatically be triggered when we update our output. Args: node (SGNode): The node that should receive our output
juraj-google-style
def push(self, filename, data): self._queue.put(Chunk(filename, data))
Push a chunk of a file to the streaming endpoint. Args: filename: Name of file that this is a chunk of. chunk_id: TODO: change to 'offset' chunk: File data.
codesearchnet
def get_string(self, offset, length): return struct.unpack(str(length) + "s", self.data[ offset:offset + length ])[0]
Returns string (length bytes) Args: offset (int): sring offset in byte array length (int): string length
juraj-google-style
def universal_transformer_with_lstm_as_transition_function(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): (state, unused_inputs, memory) = tf.unstack(layer_inputs, num=None, axis=0, name='unstack') assert (not hparams.add_step_timing_signal) mh_attention_input = step_preprocess(state, step, hparams) transition_function_input = attention_unit(mh_attention_input) if hparams.add_ffn_unit_to_the_transition_function: transition_function_input = ffn_unit(transition_function_input) transition_function_input = common_layers.layer_preprocess(transition_function_input, hparams) with tf.variable_scope('lstm'): transition_function_input_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='input', bias_initializer=tf.zeros_initializer(), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar('lstm_input_gate', tf.reduce_mean(transition_function_input_gate)) transition_function_forget_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='forget', bias_initializer=tf.zeros_initializer(), activation=None, pad_remover=pad_remover, preprocess=False, postprocess=False) forget_bias_tensor = tf.constant(hparams.lstm_forget_bias) transition_function_forget_gate = tf.sigmoid((transition_function_forget_gate + forget_bias_tensor)) tf.contrib.summary.scalar('lstm_forget_gate', tf.reduce_mean(transition_function_forget_gate)) transition_function_output_gate = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='output', bias_initializer=tf.zeros_initializer(), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=False, postprocess=False) tf.contrib.summary.scalar('lstm_output_gate', tf.reduce_mean(transition_function_output_gate)) transition_function_input_modulation = _ffn_layer_multi_inputs([transition_function_input, state], hparams, name='input_modulation', bias_initializer=tf.zeros_initializer(), activation=tf.tanh, pad_remover=pad_remover, preprocess=False, postprocess=False) transition_function_memory = ((memory * transition_function_forget_gate) + (transition_function_input_gate * transition_function_input_modulation)) transition_function_output = (tf.tanh(transition_function_memory) * transition_function_output_gate) transition_function_output = common_layers.layer_preprocess(transition_function_output, hparams) return (transition_function_output, unused_inputs, transition_function_memory)
Universal Transformer which uses a lstm as transition function. It's kind of like having a lstm, filliped vertically next to the Universal Transformer that controls the flow of the information in depth, over different steps of the Universal Transformer. Args: layer_inputs: - state: state - inputs: the original embedded inputs (= inputs to the first step) - memory: memory used in lstm. step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: the original embedded inputs (= inputs to the first step) memory: contains information of state from all the previous steps.
codesearchnet
def prepare_iter_request(url: Union[(methods, str)], data: MutableMapping, *, iterkey: Optional[str]=None, itermode: Optional[str]=None, limit: int=200, itervalue: Optional[Union[(str, int)]]=None) -> Tuple[(MutableMapping, str, str)]: (itermode, iterkey) = find_iteration(url, itermode, iterkey) if (itermode == 'cursor'): data['limit'] = limit if itervalue: data['cursor'] = itervalue elif (itermode == 'page'): data['count'] = limit if itervalue: data['page'] = itervalue elif (itermode == 'timeline'): data['count'] = limit if itervalue: data['latest'] = itervalue return (data, iterkey, itermode)
Prepare outgoing iteration request Args: url: :class:`slack.methods` item or string of url data: Outgoing data limit: Maximum number of results to return per call. iterkey: Key in response data to iterate over (required for url string). itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`) itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode) Returns: :py:class:`tuple` (data, iterkey, itermode)
codesearchnet
def RemoveKeywordsForName(self, name, keywords): data_store.DB.IndexRemoveKeywordsForName(self.urn, name, keywords)
Removes keywords for a name. Args: name: A name which should not be associated with some keywords anymore. keywords: A collection of keywords.
juraj-google-style
def get_variant_slice(self, package_name, range_): variant_list = self.variant_lists.get(package_name) if variant_list is None: variant_list = _PackageVariantList(package_name, self.solver) self.variant_lists[package_name] = variant_list entries = variant_list.get_intersection(range_) if not entries: return None slice_ = _PackageVariantSlice(package_name, entries=entries, solver=self.solver) return slice_
Get a list of variants from the cache. Args: package_name (str): Name of package. range_ (`VersionRange`): Package version range. Returns: `_PackageVariantSlice` object.
juraj-google-style
def get_endpoints(self, start=0, count=(- 1), filter='', sort=''): uri = '{}/endpoints/'.format(self.data['uri']) return self._helper.get_all(start, count, filter=filter, sort=sort, uri=uri)
Gets a list of endpoints in a SAN. Args: start: The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the first available item. count: The number of resources to return. A count of -1 requests all items. The actual number of items in the response might differ from the requested count if the sum of start and count exceeds the total number of items. filter (list or str): A general filter/query string to narrow the list of items returned. The default is no filter; all resources are returned. sort: The sort order of the returned data set. By default, the sort order is based on create time with the oldest entry first. Returns: list: A list of endpoints.
codesearchnet
def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None): all_responses = {} if self._cache: all_responses = self._cache.bulk_lookup(cache_api_name, url_params) url_params = [key for key in url_params if key not in all_responses.keys()] if len(url_params): urls = self._to_urls(fmt_url_path, url_params) responses = self._requests.multi_get(urls, query_params) for url_param, response in zip(url_params, responses): if self._cache: self._cache.cache_value(cache_api_name, url_param, response) all_responses[url_param] = response return all_responses
Makes multiple GETs to an OpenDNS endpoint. Args: cache_api_name: string api_name for caching fmt_url_path: format string for building URL paths url_params: An enumerable of strings used in building URLs query_params - None / dict / list of dicts containing query params Returns: A dict of {url_param: api_result}
juraj-google-style
def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs): vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer vision_feature_select_strategy = vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy downsample_ratio = self.config.downsample_ratio if vision_feature_layer == -1: vision_features = self.vision_tower(pixel_values=pixel_values).last_hidden_state else: vision_features = self.vision_model(pixel_values=pixel_values).hidden_states[vision_feature_layer] if vision_feature_select_strategy == 'default': vision_features = vision_features[:, 1:, :] channels = vision_features.shape[1] feature_size = int(channels ** 0.5) batch_size = vision_features.shape[0] vision_features = vision_features.reshape(batch_size, feature_size, feature_size, -1) vision_features = self.pixel_shuffle(vision_features, scale_factor=downsample_ratio) vision_features = vision_features.reshape(batch_size, -1, vision_features.shape[-1]) vision_features = self.multi_modal_projector(vision_features) return vision_features
Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) The tensors corresponding to the input images. vision_feature_layer (`int` or `List[int]`): Layer index or list of layer indices to extract features from. Returns: vision_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`.
github-repos
def run_suite(test_classes, argv=None): args = _parse_cli_args(argv) for test_class in test_classes: if not issubclass(test_class, base_test.BaseTestClass): logging.error('Test class %s does not extend mobly.base_test.BaseTestClass', test_class) sys.exit(1) if args.list_tests: _print_test_names(test_classes) sys.exit(0) test_configs = config_parser.load_test_config_file(args.config, args.test_bed) selected_tests = compute_selected_tests(test_classes, args.tests) console_level = logging.DEBUG if args.verbose else logging.INFO ok = True for config in test_configs: runner = test_runner.TestRunner(config.log_path, config.testbed_name) with runner.mobly_logger(console_level=console_level): for test_class, tests in selected_tests.items(): runner.add_test_class(config, test_class, tests) try: runner.run() ok = runner.results.is_all_pass and ok except signals.TestAbortAll: pass except Exception: logging.exception('Exception when executing %s.', config.testbed_name) ok = False if not ok: sys.exit(1)
Executes multiple test classes as a suite. This is the default entry point for running a test suite script file directly. Args: test_classes: List of python classes containing Mobly tests. argv: A list that is then parsed as cli args. If None, defaults to cli input.
github-repos
def CaffeBilinearUpSample(x, shape): inp_shape = x.shape.as_list() ch = inp_shape[1] assert ch == 1, "This layer only works for channel=1" shape = int(shape) filter_shape = 2 * shape def bilinear_conv_filler(s): f = np.ceil(float(s) / 2) c = float(2 * f - 1 - f % 2) / (2 * f) ret = np.zeros((s, s), dtype='float32') for x in range(s): for y in range(s): ret[x, y] = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) return ret w = bilinear_conv_filler(filter_shape) w = np.repeat(w, ch * ch).reshape((filter_shape, filter_shape, ch, ch)) weight_var = tf.constant(w, tf.float32, shape=(filter_shape, filter_shape, ch, ch), name='bilinear_upsample_filter') x = tf.pad(x, [[0, 0], [0, 0], [shape - 1, shape - 1], [shape - 1, shape - 1]], mode='SYMMETRIC') out_shape = tf.shape(x) * tf.constant([1, 1, shape, shape], tf.int32) deconv = tf.nn.conv2d_transpose(x, weight_var, out_shape, [1, 1, shape, shape], 'SAME', data_format='NCHW') edge = shape * (shape - 1) deconv = deconv[:, :, edge:-edge, edge:-edge] if inp_shape[2]: inp_shape[2] *= shape if inp_shape[3]: inp_shape[3] *= shape deconv.set_shape(inp_shape) return deconv
Deterministic bilinearly-upsample the input images. It is implemented by deconvolution with "BilinearFiller" in Caffe. It is aimed to mimic caffe behavior. Args: x (tf.Tensor): a NCHW tensor shape (int): the upsample factor Returns: tf.Tensor: a NCHW tensor.
juraj-google-style
def restore_original_dimensions(obs, obs_space, tensorlib=tf): if hasattr(obs_space, 'original_space'): return _unpack_obs(obs, obs_space.original_space, tensorlib=tensorlib) else: return obs
Unpacks Dict and Tuple space observations into their original form. This is needed since we flatten Dict and Tuple observations in transit. Before sending them to the model though, we should unflatten them into Dicts or Tuples of tensors. Arguments: obs: The flattened observation tensor. obs_space: The flattened obs space. If this has the `original_space` attribute, we will unflatten the tensor to that shape. tensorlib: The library used to unflatten (reshape) the array/tensor. Returns: single tensor or dict / tuple of tensors matching the original observation space.
codesearchnet
def trigger_streamer(*inputs, **kwargs): streamer_marker = kwargs['mark_streamer'] try: reading = inputs[1].pop() except StreamEmptyError: return [] finally: for input_x in inputs: input_x.skip_all() try: streamer_marker(reading.value) except ArgumentError: return [] return [IOTileReading(0, 0, 0)]
Trigger a streamer based on the index read from input b. Returns: list(IOTileReading)
codesearchnet
def __init__(self, located_items=None, unique_identifiers=None): super(LocateResponsePayload, self).__init__( enums.Tags.RESPONSE_PAYLOAD) self._located_items = None self._unique_identifiers = None self.located_items = located_items self.unique_identifiers = unique_identifiers
Construct a Locate response payload structure. Args: located_items (int): An integer specifying the number of matching objects found by the server. Note that this may not equal the number of object identifiers returned in this payload. Optional, defaults to None. unique_identifiers (list): A list of strings specifying the object identifiers for matching objects. Optional, defaults to None.
juraj-google-style
def calculate_bbh(blast_results_1, blast_results_2, r_name=None, g_name=None, outdir=''): cols = ['gene', 'subject', 'PID', 'alnLength', 'mismatchCount', 'gapOpenCount', 'queryStart', 'queryEnd', 'subjectStart', 'subjectEnd', 'eVal', 'bitScore'] if not r_name and not g_name: r_name = op.basename(blast_results_1).split('_vs_')[0] g_name = op.basename(blast_results_1).split('_vs_')[1].replace('_blast.out', '') r_name2 = op.basename(blast_results_2).split('_vs_')[1].replace('_blast.out', '') if r_name != r_name2: log.warning('{} != {}'.format(r_name, r_name2)) outfile = op.join(outdir, '{}_vs_{}_bbh.csv'.format(r_name, g_name)) if op.exists(outfile) and os.stat(outfile).st_size != 0: log.debug('{} vs {} BLAST BBHs already found at {}'.format(r_name, g_name, outfile)) return outfile bbh1 = pd.read_csv(blast_results_1, sep='\t', names=cols) bbh2 = pd.read_csv(blast_results_2, sep='\t', names=cols) out = pd.DataFrame() log.debug('Finding BBHs for {} vs. {}'.format(r_name, g_name)) for g in bbh1[pd.notnull(bbh1.gene)].gene.unique(): res = bbh1[bbh1.gene == g] if len(res) == 0: continue best_hit = res.ix[res.PID.idxmax()].copy() best_gene = best_hit.subject res2 = bbh2[bbh2.gene == best_gene] if len(res2) == 0: continue best_hit2 = res2.ix[res2.PID.idxmax()] best_gene2 = best_hit2.subject if g == best_gene2: best_hit['BBH'] = '<=>' else: best_hit['BBH'] = '->' out = pd.concat([out, pd.DataFrame(best_hit).transpose()]) out.to_csv(outfile) log.debug('{} vs {} BLAST BBHs saved at {}'.format(r_name, g_name, outfile)) return outfile
Calculate the best bidirectional BLAST hits (BBH) and save a dataframe of results. Args: blast_results_1 (str): BLAST results for reference vs. other genome blast_results_2 (str): BLAST results for other vs. reference genome r_name: Name of reference genome g_name: Name of other genome outdir: Directory where BLAST results are stored. Returns: Path to Pandas DataFrame of the BBH results.
juraj-google-style
def sphere_selector_using_residues(self, radius, force_rerun=False): log.debug('{}: running sphere selector...'.format(self.id)) if ((not self.sphgen_path) or (not self.bindingsite_path)): return ValueError('Please run sphgen and binding_site_mol2') selsph = op.join(self.dock_dir, '{}_selsph_binding.sph'.format(self.id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=selsph): cmd = 'sphere_selector {} {} {}'.format(self.sphgen_path, self.bindingsite_path, radius) rename = 'mv selected_spheres.sph {}'.format(selsph) os.system(cmd) os.system(rename) if ssbio.utils.is_non_zero_file(selsph): self.sphsel_path = selsph log.debug('{}: successful sphere selection'.format(self.sphsel_path)) else: log.critical('{}: sphere_selector_using_residues failed to run on sph file'.format(self.sphgen_path))
Select spheres based on binding site residues Args: radius (int, float): Radius around binding residues to dock to force_rerun (bool): If method should be rerun even if output file exists
codesearchnet
def _get_image_patches(self, image: 'torch.Tensor', grid_pinpoints, size: tuple, patch_size: int, interpolation: 'F.InterpolationMode') -> List['torch.Tensor']: if not isinstance(grid_pinpoints, list): raise TypeError('grid_pinpoints must be a list of possible resolutions.') possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=ChannelDimension.FIRST) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching(image, best_resolution, interpolation=interpolation, input_data_format=ChannelDimension.FIRST) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=ChannelDimension.FIRST) patches = divide_to_patches(padded_image, patch_size=patch_size) resized_original_image = F.resize(image, size=size, interpolation=interpolation) image_patches = [resized_original_image] + patches return image_patches
Process an image with variable resolutions by dividing it into patches. Args: image ("torch.Tensor"): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. interpolation (`"InterpolationMode"`): Resampling filter to use if resizing the image. Returns: List["torch.Tensor"]: A list of NumPy arrays containing the processed image patches.
github-repos
def get(self, key, default=None): if key.count('.') == 0: return super(DotDict, self).get(key, default) value = default first, remainder = key.split('.', 1) if first in self: value = super(DotDict, self).get(first, default) if isinstance(value, (dict, DotDict)): return DotDict(value).get(remainder, default) return value
Get a value from the `DotDict`. The `key` parameter can either be a regular string key, e.g. "foo", or it can be a string key with dot notation, e.g. "foo.bar.baz", to signify a nested lookup. The default value is returned if any level of the key's components are not found. Args: key (str): The key to get the value for. default: The return value should the given key not exist in the `DotDict`.
juraj-google-style
def make_sql(table_name, max_rows=None, for_eval=False): if for_eval: where_clause = 'WHERE MOD(FARM_FINGERPRINT(unique_key), 3) = 0 AND pickup_latitude is not null AND pickup_longitude is not null AND dropoff_latitude is not null AND dropoff_longitude is not null' else: where_clause = 'WHERE MOD(FARM_FINGERPRINT(unique_key), 3) > 0 AND pickup_latitude is not null AND pickup_longitude is not null AND dropoff_latitude is not null AND dropoff_longitude is not null' limit_clause = '' if max_rows: limit_clause = 'LIMIT {max_rows}'.format(max_rows=max_rows) return '\n SELECT\n CAST(pickup_community_area AS string) AS pickup_community_area,\n CAST(dropoff_community_area AS string) AS dropoff_community_area,\n CAST(pickup_census_tract AS string) AS pickup_census_tract,\n CAST(dropoff_census_tract AS string) AS dropoff_census_tract,\n fare,\n EXTRACT(MONTH FROM trip_start_timestamp) AS trip_start_month,\n EXTRACT(HOUR FROM trip_start_timestamp) AS trip_start_hour,\n EXTRACT(DAYOFWEEK FROM trip_start_timestamp) AS trip_start_day,\n UNIX_SECONDS(trip_start_timestamp) AS trip_start_timestamp,\n pickup_latitude,\n pickup_longitude,\n dropoff_latitude,\n dropoff_longitude,\n trip_miles,\n payment_type,\n company,\n trip_seconds,\n tips\n FROM `{table_name}`\n {where_clause}\n {limit_clause}\n'.format(table_name=table_name, where_clause=where_clause, limit_clause=limit_clause)
Creates the sql command for pulling data from BigQuery. Args: table_name: BigQuery table name max_rows: if set, limits the number of rows pulled from BigQuery for_eval: True if this is for evaluation, false otherwise Returns: sql command as string
github-repos
def city_nums(): city_nums = {} first_row = 1 num = 0 fname = pkg_resources.resource_filename(__name__, 'resources/Distance_Matrix.csv') with open(fname, 'rU') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: if (first_row == 1): first_row = 0 else: city_nums[row[0]] = num num = (num + 1) return city_nums
Get a dictionary of Backpage city names mapped to their 'legend' value. Returns: dictionary of Backpage city names mapped to their numeric value
codesearchnet
def wait(self, timeout_ms=None): closed = timeouts.loop_until_timeout_or_true( timeouts.PolledTimeout.from_millis(timeout_ms), self.stream.is_closed, .1) if closed: if hasattr(self.stdout, 'getvalue'): return self.stdout.getvalue() return True return None
Block until this command has completed. Args: timeout_ms: Timeout, in milliseconds, to wait. Returns: Output of the command if it complete and self.stdout is a StringIO object or was passed in as None. Returns True if the command completed but stdout was provided (and was not a StringIO object). Returns None if the timeout expired before the command completed. Be careful to check the return value explicitly for None, as the output may be ''.
juraj-google-style
def to_pdb(prot: Protein) -> str: restypes = residue_constants.restypes + ['X'] def res_1to3(r: int) -> str: return residue_constants.restype_1to3.get(restypes[r], 'UNK') atom_types = residue_constants.atom_types pdb_lines: List[str] = [] atom_mask = prot.atom_mask aatype = prot.aatype atom_positions = prot.atom_positions residue_index = prot.residue_index.astype(np.int32) b_factors = prot.b_factors chain_index = prot.chain_index if np.any(aatype > residue_constants.restype_num): raise ValueError('Invalid aatypes.') headers = get_pdb_headers(prot) if len(headers) > 0: pdb_lines.extend(headers) n = aatype.shape[0] atom_index = 1 prev_chain_index = 0 chain_tags = string.ascii_uppercase chain_tag = None for i in range(n): res_name_3 = res_1to3(aatype[i]) for atom_name, pos, mask, b_factor in zip(atom_types, atom_positions[i], atom_mask[i], b_factors[i]): if mask < 0.5: continue record_type = 'ATOM' name = atom_name if len(atom_name) == 4 else f' {atom_name}' alt_loc = '' insertion_code = '' occupancy = 1.0 element = atom_name[0] charge = '' chain_tag = 'A' if chain_index is not None: chain_tag = chain_tags[chain_index[i]] atom_line = f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}{res_name_3:>3} {chain_tag:>1}{residue_index[i]:>4}{insertion_code:>1} {pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}{occupancy:>6.2f}{b_factor:>6.2f} {element:>2}{charge:>2}' pdb_lines.append(atom_line) atom_index += 1 should_terminate = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: should_terminate = True prev_chain_index = chain_index[i + 1] if should_terminate: chain_end = 'TER' chain_termination_line = f'{chain_end:<6}{atom_index:>5} {res_1to3(aatype[i]):>3} {chain_tag:>1}{residue_index[i]:>4}' pdb_lines.append(chain_termination_line) atom_index += 1 if i != n - 1: pdb_lines.extend(get_pdb_headers(prot, prev_chain_index)) pdb_lines.append('END') pdb_lines.append('') return '\n'.join(pdb_lines)
Converts a `Protein` instance to a PDB string. Args: prot: The protein to convert to PDB. Returns: PDB string.
github-repos