code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _check_rules(browser, rules_js, config): if (config.rules_to_run is None): msg = 'No accessibility rules were specified to check.' log.warning(msg) return None rules = config.rules_to_run if rules: rules_config = u'auditConfig.auditRulesToRun = {rules};'.format(rules=rules) else: rules_config = '' ignored_rules = config.rules_to_ignore if ignored_rules: rules_config += u'\nauditConfig.auditRulesToIgnore = {rules};'.format(rules=ignored_rules) script = dedent(u'\n {rules_js}\n var auditConfig = new axs.AuditConfiguration();\n {rules_config}\n auditConfig.scope = {scope};\n var run_results = axs.Audit.run(auditConfig);\n var audit_results = axs.Audit.auditResults(run_results)\n return audit_results;\n '.format(rules_js=rules_js, rules_config=rules_config, scope=config.scope)) result = browser.execute_script(script) audit_results = AuditResults(errors=result.get('errors_'), warnings=result.get('warnings_')) return audit_results
Check the page for violations of the configured rules. By default, all rules in the ruleset will be checked. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A namedtuple with 'errors' and 'warnings' fields whose values are the errors and warnings returned from the audit. None if config has rules_to_run set to None. __Caution__: You probably don't really want to call this method directly! It will be used by `A11yAudit.do_audit` if using this ruleset.
codesearchnet
def name_changed(self, changed_item): name = str(changed_item.text()) if name != '': if name != self.selected_element_name: self.elements_from_file[name] = self.elements_from_file[self.selected_element_name] del self.elements_from_file[self.selected_element_name] self.selected_element_name = name
checks if name has been changed and ignores the name change if the changed_item is an existing script Args: changed_item:
juraj-google-style
def discrete_bottleneck(self, x): x_reshaped = self.slice_hidden(x) x_means_hot = [] x_means = 0 loss = 0 x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup( x_reshaped, self.means) if self.hparams.ema: tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta)) updated_ema_count = \ moving_averages.assign_moving_average( self.ema_count, tf.reduce_sum( tf.reshape( x_means_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]), axis=0), self.hparams.decay, zero_debias=False) dw = tf.matmul( tf.transpose(x_means_hot, perm=[1, 2, 0]), tf.transpose(x_reshaped, perm=[1, 0, 2])) updated_ema_means = \ moving_averages.assign_moving_average( self.ema_means, dw, self.hparams.decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( n + 2**self.hparams.z_size * self.hparams.epsilon) * n) updated_ema_means = updated_ema_means / tf.expand_dims( updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]): update_means = tf.assign(self.means, updated_ema_means) with tf.control_dependencies([update_means]): loss += self.hparams.beta * e_loss else: loss += q_loss + self.hparams.beta * e_loss x_means_idx = tf.argmax(x_means_hot, axis=-1) num_bits = int(self.hparams.z_size x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2) x_discrete = self.bit_to_int( tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2) shape_x = common_layers.shape_list(x) shape_discrete = shape_x[:-1] x_discrete = tf.reshape(x_discrete, shape_discrete) x_means = tf.reshape(x_means, shape=shape_x) h1 = x + tf.stop_gradient(x_means - x) h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") res = tf.layers.dense( tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") embed_fn = partial(self.embed) return { "dense": res, "discrete": x_discrete, "loss": loss, "embed": embed_fn }
Discretization bottleneck for latent variables. Args: x: Input to the discretization bottleneck. Returns: Embedding to pass to the decoder, discrete latent, loss, and the embedding function. Raises: ValueError: If projection_tensors is None for reshape_method project, or ema_count or ema_means is None if we are using ema, or unknown args.
juraj-google-style
def on_run_end(self, request): self._is_run_start = False if request.performed_action == framework.OnRunStartAction.DEBUG_RUN: partition_graphs = None if request.run_metadata and request.run_metadata.partition_graphs: partition_graphs = request.run_metadata.partition_graphs elif request.client_graph_def: partition_graphs = [request.client_graph_def] if request.tf_error and (not os.path.isdir(self._dump_root)): raise request.tf_error debug_dump = debug_data.DebugDumpDir(self._dump_root, partition_graphs=partition_graphs) debug_dump.set_python_graph(self._sess.graph) passed_filter = None passed_filter_exclude_node_names = None if self._active_tensor_filter: if not debug_dump.find(self._tensor_filters[self._active_tensor_filter], first_n=1, exclude_node_names=self._active_filter_exclude_node_names): self._remove_dump_root() return framework.OnRunEndResponse() else: passed_filter = self._active_tensor_filter passed_filter_exclude_node_names = self._active_filter_exclude_node_names self._active_tensor_filter = None self._active_filter_exclude_node_names = None self._prep_debug_cli_for_run_end(debug_dump, request.tf_error, passed_filter, passed_filter_exclude_node_names) self._run_start_response = self._launch_cli() self._remove_dump_root() elif request.performed_action == framework.OnRunStartAction.PROFILE_RUN: self._prep_profile_cli_for_run_end(self._sess.graph, request.run_metadata) self._run_start_response = self._launch_cli() else: self._run_start_response = None return framework.OnRunEndResponse()
Overrides on-run-end callback. Actions taken: 1) Load the debug dump. 2) Bring up the Analyzer CLI. Args: request: An instance of OnSessionInitRequest. Returns: An instance of OnSessionInitResponse.
github-repos
def extractDates(self, inp): def merge(param): (day, time) = param if (not (day or time)): return None if (not day): return time if (not time): return day return datetime.datetime(day.year, day.month, day.day, time.hour, time.minute) days = self.extractDays(inp) times = self.extractTimes(inp) return map(merge, zip_longest(days, times, fillvalue=None))
Extract semantic date information from an input string. In effect, runs both parseDay and parseTime on the input string and merges the results to produce a comprehensive datetime object. Args: inp (str): Input string to be parsed. Returns: A list of datetime objects containing the extracted dates from the input snippet, or an empty list if not found.
codesearchnet
def wp_decode(self, sequences): decode_strs = [seq.replace(' ', '') for seq in self.wp_tokenizer.batch_decode(sequences)] return decode_strs
Convert a list of lists of word piece token ids into a list of strings by calling word piece tokenizer. Args: sequences (`torch.Tensor`): List of tokenized input ids. Returns: `List[str]`: The list of wp decoded sentences.
github-repos
def _get_function(self, name): return self._functions.get(compat.as_str(name), None)
Returns the function definition for 'name'. Args: name: string function name. Returns: The function def proto.
github-repos
def playback_trajectory(env, ep_dir): xml_path = os.path.join(ep_dir, 'model.xml') with open(xml_path, 'r') as f: env.reset_from_xml_string(f.read()) state_paths = os.path.join(ep_dir, 'state_*.npz') t = 0 for state_file in sorted(glob(state_paths)): print(state_file) dic = np.load(state_file) states = dic['states'] for state in states: env.sim.set_state_from_flattened(state) env.sim.forward() env.render() t += 1 if ((t % 100) == 0): print(t)
Playback data from an episode. Args: ep_dir: The path to the directory containing data for an episode.
codesearchnet
def update_tag(self, tag_name, description=None, custom_properties=None, **kwargs): data = {'description': description or '', 'customProperties': custom_properties or {}} resp = self._put(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name), data=data, **kwargs) resp.raise_for_status() return resp.json()
update a tag by name Args: tag_name (string): name of tag to update description (optional[string]): a description custom_properties (optional[dict]): dictionary of custom properties
juraj-google-style
def _convert_id_to_token(self, artists_index, genres_index, lyric_index): artist = self.artists_decoder.get(artists_index) genres = [self.genres_decoder.get(genre) for genre in genres_index] lyrics = [self.lyrics_decoder.get(character) for character in lyric_index] return (artist, genres, lyrics)
Converts an index (integer) in a token (str) using the vocab. Args: artists_index (`int`): Index of the artist in its corresponding dictionary. genres_index (`Union[List[int], int]`): Index of the genre in its corresponding dictionary. lyric_index (`List[int]`): List of character indices, which each correspond to a character.
github-repos
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1]
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def is_link(path): if (sys.getwindowsversion().major < 6): raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.') try: return salt.utils.path.islink(path) except Exception as exc: raise CommandExecutionError(exc)
Check if the path is a symlink This is only supported on Windows Vista or later. Inline with Unix behavior, this function will raise an error if the path is not a symlink, however, the error raised will be a SaltInvocationError, not an OSError. Args: path (str): The path to a file or directory Returns: bool: True if path is a symlink, otherwise False CLI Example: .. code-block:: bash salt '*' file.is_link /path/to/link
codesearchnet
def to_dict(self, fields=None): data = {} def _add(field): return (fields is None or field in fields) if _add("resolved_packages"): resolved_packages = [] for pkg in (self._resolved_packages or []): resolved_packages.append(pkg.handle.to_dict()) data["resolved_packages"] = resolved_packages if _add("serialize_version"): data["serialize_version"] = \ '.'.join(map(str, ResolvedContext.serialize_version)) if _add("patch_locks"): data["patch_locks"] = dict((k, v.name) for k, v in self.patch_locks) if _add("package_orderers"): package_orderers = [package_order.to_pod(x) for x in (self.package_orderers or [])] data["package_orderers"] = package_orderers or None if _add("package_filter"): data["package_filter"] = self.package_filter.to_pod() if _add("graph"): if self.graph_string and self.graph_string.startswith('{'): graph_str = self.graph_string else: g = self.graph() graph_str = write_compacted(g) data["graph"] = graph_str data.update(dict( timestamp=self.timestamp, requested_timestamp=self.requested_timestamp, building=self.building, caching=self.caching, implicit_packages=map(str, self.implicit_packages), package_requests=map(str, self._package_requests), package_paths=self.package_paths, default_patch_lock=self.default_patch_lock.name, rez_version=self.rez_version, rez_path=self.rez_path, user=self.user, host=self.host, platform=self.platform, arch=self.arch, os=self.os, created=self.created, parent_suite_path=self.parent_suite_path, suite_context_name=self.suite_context_name, status=self.status_.name, failure_description=self.failure_description, from_cache=self.from_cache, solve_time=self.solve_time, load_time=self.load_time, num_loaded_packages=self.num_loaded_packages )) if fields: data = dict((k, v) for k, v in data.iteritems() if k in fields) return data
Convert context to dict containing only builtin types. Args: fields (list of str): If present, only write these fields into the dict. This can be used to avoid constructing expensive fields (such as 'graph') for some cases. Returns: dict: Dictified context.
juraj-google-style
def labels(self, main_type, sub_type, unique_id, owner=None, filters=None, params=None): params = params or {} if owner: params['owner'] = owner if filters and filters.filters: params['filters'] = filters.filters_string if not sub_type: url = '/v2/{}/{}/securityLabels'.format(main_type, unique_id) else: url = '/v2/{}/{}/{}/securityLabels'.format(main_type, sub_type, unique_id) for l in self._iterate(url, params, 'securityLabel'): yield l
Args: main_type: sub_type: unique_id: owner: filters: params: Return:
juraj-google-style
def graph_def(self): return self._graph.as_graph_def(add_shapes=self._add_shapes)
A serializable version of the underlying TensorFlow graph. Returns: A graph_pb2.GraphDef proto containing nodes for all of the Operations in the underlying TensorFlow graph.
github-repos
def transformer_prepare_encoder(inputs, target_space, hparams, features=None): ishape_static = inputs.shape.as_list() encoder_input = inputs if features and "inputs_segmentation" in features: inputs_segmentation = features["inputs_segmentation"] inputs_position = features["inputs_position"] targets_segmentation = features["targets_segmentation"] if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: encoder_self_attention_bias = ( common_attention.attention_bias_same_segment( inputs_segmentation, inputs_segmentation)) encoder_decoder_attention_bias = ( common_attention.attention_bias_same_segment(targets_segmentation, inputs_segmentation)) else: encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding inputs_position = None if hparams.proximity_bias: encoder_self_attention_bias += common_attention.attention_bias_proximal( common_layers.shape_list(inputs)[1]) if target_space is not None and hparams.get("use_target_space_embedding", True): emb_target_space = common_layers.embedding( target_space, 32, ishape_static[-1], name="target_space_embedding", dtype=hparams.get("activation_dtype", "float32")) emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) encoder_input += emb_target_space if hparams.pos == "timing": if inputs_position is not None: encoder_input = common_attention.add_timing_signal_1d_given_position( encoder_input, inputs_position) else: encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", inputs_position) encoder_self_attention_bias = common_layers.cast_like( encoder_self_attention_bias, encoder_input) encoder_decoder_attention_bias = common_layers.cast_like( encoder_decoder_attention_bias, encoder_input) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias)
Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention
juraj-google-style
def _get_index_points(self, index_points=None): if self._index_points is None and index_points is None: raise ValueError( 'This GaussianProcess instance was not instantiated with a value for ' 'index_points. One must therefore be provided when calling sample, ' 'log_prob, and other such methods. In particular, one can\'t compute ' 'KL divergences to/from an instance of `GaussianProccess` with ' 'unspecified `index_points` directly. Instead, use the ' '`get_marginal_distribution` function, which takes `index_points` as ' 'an argument and returns a `Normal` or ' '`MultivariateNormalLinearOperator` instance, whose KL can be ' 'computed.') return index_points if index_points is not None else self._index_points
Return `index_points` if not None, else `self._index_points`. Args: index_points: if given, this is what is returned; else, `self._index_points` Returns: index_points: the given arg, if not None, else the class member `self._index_points`. Rases: ValueError: if `index_points` and `self._index_points` are both `None`.
juraj-google-style
def transformer_encoder_ffn_unit(x, hparams, nonpadding_mask=None, pad_remover=None): with tf.variable_scope('ffn'): if (hparams.transformer_ffn_type == 'fc'): y = transformer.transformer_ffn_layer(common_layers.layer_preprocess(x, hparams), hparams, pad_remover, conv_padding='SAME', nonpadding_mask=nonpadding_mask) if (hparams.transformer_ffn_type == 'sepconv'): assert (nonpadding_mask is not None), 'The nonpadding_mask should be provided, otherwise the model uses the leaked padding information to estimate the length!' y = common_layers.sepconv_relu_sepconv(common_layers.layer_preprocess(x, hparams), filter_size=hparams.filter_size, output_size=hparams.hidden_size, first_kernel_size=(3, 1), second_kernel_size=(5, 1), padding='SAME', nonpadding_mask=nonpadding_mask, dropout=hparams.relu_dropout) x = common_layers.layer_postprocess(x, y, hparams) return x
Applies a feed-forward function which is parametrised for encoding. Args: x: input hparams: model hyper-parameters nonpadding_mask: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convoltutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. pad_remover: to mask out padding in convolutional layers (efficiency). Returns: the output tensor
codesearchnet
def _parse_price(html_chunk): price = get_first_content(html_chunk.find('div', {'class': 'prices'})) if (not price): return None price = dhtmlparser.removeTags(price) price = price.split('\n')[(- 1)] return price
Parse price of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Price as string with currency or None if not found.
codesearchnet
def __init__(self, config, auth: str) -> None: self.config = config self.auth = auth self.columns = SA_FIELDS self.reportId = None
Construct a report factory, providing project and authentication data. This class will track the reportID internally if the request call is used. Args: config, required - see: starthinker/util/configuration.py auth, required - either "user" or "service" used to create and/or read the report. Returns: None
github-repos
def from_config(cls, config, custom_objects=None, columns_by_name=None): return cls._from_config(config, custom_objects, columns_by_name)
Creates a FeatureColumn from its config. This method should be the reverse of `get_config`, capable of instantiating the same FeatureColumn from the config dictionary. See `get_config` for an example of common (de)serialization practices followed in this file. TODO(b/118939620): This is a private method until consensus is reached on supporting object deserialization deduping within Keras. Args: config: A Dict config acquired with `get_config`. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. columns_by_name: A Dict[String, FeatureColumn] of existing columns in order to avoid duplication. Should be passed to any calls to deserialize_feature_column(). Returns: A FeatureColumn for the input config.
github-repos
def sample_uniform(domain, rng): if isinstance(domain, hp.IntInterval): return rng.randint(domain.min_value, domain.max_value) elif isinstance(domain, hp.RealInterval): return rng.uniform(domain.min_value, domain.max_value) elif isinstance(domain, hp.Discrete): return rng.choice(domain.values) else: raise TypeError(('unknown domain type: %r' % (domain,)))
Sample a value uniformly from a domain. Args: domain: An `IntInterval`, `RealInterval`, or `Discrete` domain. rng: A `random.Random` object; defaults to the `random` module. Raises: TypeError: If `domain` is not a known kind of domain. IndexError: If the domain is empty.
codesearchnet
def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100): out_logits, out_bbox = (outputs.logits, outputs.pred_boxes) if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') prob = out_logits.sigmoid() prob = prob.view(out_logits.shape[0], -1) k_value = min(top_k, prob.size(1)) topk_values, topk_indexes = torch.topk(prob, k_value, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor') labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({'scores': score, 'labels': label, 'boxes': box}) return results
Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. top_k (`int`, *optional*, defaults to 100): Keep only top k bounding boxes before filtering by thresholding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model.
github-repos
def notify_progress(self, conn_string, operation, finished, total, wait=True): if (operation not in self.PROGRESS_OPERATIONS): raise ArgumentError('Invalid operation for progress event: {}'.format(operation)) event = dict(operation=operation, finished=finished, total=total) if wait: return self.notify_event(conn_string, 'progress', event) self.notify_event_nowait(conn_string, 'progress', event) return None
Send a progress event. Progress events can be sent for ``debug`` and ``script`` operations and notify the caller about the progress of these potentially long-running operations. They have two integer properties that specify what fraction of the operation has been completed. Args: conn_string (str): The device that is sending the event. operations (str): The operation that is in progress: debug or script finished (int): The number of "steps" that have finished. total (int): The total number of steps to perform. wait (bool): Whether to return an awaitable that we can use to block until the notification has made it to all callbacks. Returns: awaitable or None: An awaitable if wait=True. If wait is False, the notification is run in the background with no way to check its progress and None is returned.
codesearchnet
def condition_indices(df): eigvals = eigenvalues(df) cond_idx = np.sqrt(eigvals.max() / eigvals) return pd.Series(cond_idx, df.columns, name='Condition index')
Returns a pandas Series with condition indices of the df columns. Args: df: pandas DataFrame with columns to run diagnostics on
juraj-google-style
def get_user(self, user_id=None, user_name=None): if user_id: endpoint = '/api/user_id/{0}'.format(user_id) elif user_name: endpoint = '/api/user_name/{0}'.format(user_name) else: endpoint = '/api/user' data = self._make_request(verb='GET', endpoint=endpoint) try: return User.NewFromJSON(data) except: return data
Get a user object from the API. If no ``user_id`` or ``user_name`` is specified, it will return the User object for the currently authenticated user. Args: user_id (int): User ID of the user for whom you want to get information. [Optional] user_name(str): Username for the user for whom you want to get information. [Optional] Returns: A User object.
codesearchnet
def to_json(self, variables=None): variables_to_resolve = [] if variables: for key, value in variables.items(): variables_to_resolve.append(Variable(key, value)) for k in self.get_parameter_definitions(): if not variables or k not in variables: variables_to_resolve.append(Variable(k, 'unused_value')) self.resolve_variables(variables_to_resolve) return self.render_template()[1]
Render the blueprint and return the template in json form. Args: variables (dict): Optional dictionary providing/overriding variable values. Returns: str: the rendered CFN JSON template
juraj-google-style
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs): beginning_of_answer = self.tokenizer.convert_tokens_to_ids(BEGINNING_OF_ANSWER_STRING) unpadded_output_sequences = [seq[(seq == beginning_of_answer).nonzero(as_tuple=True)[0] + 1:] for seq in generated_outputs] max_len = max((len(seq) for seq in unpadded_output_sequences)) padded_output_sequences = torch.full((len(unpadded_output_sequences), max_len), self.pad_token_id) for i, seq in enumerate(unpadded_output_sequences): padded_output_sequences[i, :len(seq)] = torch.tensor(seq) return self.batch_decode(padded_output_sequences, skip_special_tokens=skip_special_tokens, **kwargs)
Post-processes the output of `FuyuForConditionalGeneration` to only return the text output. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model. The output is expected to be a tensor of shape `(batch_size, sequence_length)` containing the token ids of the generated sequences. skip_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. **kwargs: Additional arguments to be passed to the tokenizer's `batch_decode method`. Returns: `List[str]`: The decoded text output.
github-repos
def strip_number(self): if (self.type != EventType.TABLET_PAD_STRIP): raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_tablet_pad_get_strip_number(self._handle)
The number of the strip that has changed state, with 0 being the first strip. On tablets with only one strip, this method always returns 0. For events not of type :attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property raises :exc:`AttributeError`. Returns: int: The index of the strip that changed state. Raises: AttributeError
codesearchnet
def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False): to_exclude = ['coach_bsites', 'coach_ec', 'coach_go_mf', 'coach_go_bp', 'coach_go_cc'] if (not exclude_attributes): excluder = to_exclude else: excluder = ssbio.utils.force_list(exclude_attributes) excluder.extend(to_exclude) summary_dict = StructProp.get_dict(self, only_attributes=only_attributes, exclude_attributes=excluder, df_format=df_format) if self.coach_bsites: tmp = {('top_bsite_' + k): v for (k, v) in self.coach_bsites[0].items()} summary_dict.update(tmp) if self.coach_ec: tmp = {('top_ec_' + k): v for (k, v) in self.coach_ec[0].items()} summary_dict.update(tmp) if self.coach_go_mf: tmp = {('top_go_mf_' + k): v for (k, v) in self.coach_go_mf[0].items()} summary_dict.update(tmp) if self.coach_go_bp: tmp = {('top_go_bp_' + k): v for (k, v) in self.coach_go_bp[0].items()} summary_dict.update(tmp) if self.coach_go_cc: tmp = {('top_go_cc_' + k): v for (k, v) in self.coach_go_cc[0].items()} summary_dict.update(tmp) return summary_dict
Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH Args: only_attributes (str, list): Attributes that should be returned. If not provided, all are returned. exclude_attributes (str, list): Attributes that should be excluded. df_format (bool): If dictionary values should be formatted for a dataframe (everything possible is transformed into strings, int, or float - if something can't be transformed it is excluded) Returns: dict: Dictionary of attributes
codesearchnet
def on_test_end(self, logs=None):
Called at the end of evaluation or validation. Subclasses should override for any actions to run. Args: logs: Dict. Currently the output of the last call to `on_test_batch_end()` is passed to this argument for this method but that may change in the future.
github-repos
def _matmul_3d_with_map_fn(a, b, **kwargs): if isinstance(b, ragged_tensor.RaggedTensor) and (b.ragged_rank == 2 or kwargs.get('transpose_b') or kwargs.get('adjoint_b')): output_ragged_rank = 2 else: output_ragged_rank = 1 def single_batch_matmul(x): out = _matmul_2d(x[0], x[1], **kwargs) if output_ragged_rank == 2: out = ragged_tensor.RaggedTensor.from_tensor(out) return out fn_out_shape = None row_splits_dtype = a.row_splits.dtype if isinstance(a, ragged_tensor.RaggedTensor) else b.row_splits.dtype output_type = kwargs['output_type'] if output_type is None: output_type = a.dtype spec = ragged_tensor.RaggedTensorSpec(shape=fn_out_shape, dtype=output_type, ragged_rank=output_ragged_rank - 1, row_splits_dtype=row_splits_dtype) result = map_fn.map_fn(single_batch_matmul, elems=(a, b), fn_output_signature=spec) if kwargs.get('transpose_a') or kwargs.get('adjoint_a'): result._set_shape(a.shape[:-2] + a.shape[-1:] + [None]) else: result._set_shape(a.shape[:-2] + a.shape[-2:-1] + [None]) if kwargs.get('transpose_b') or kwargs.get('adjoint_b'): result._set_shape(b.shape[:-2] + [None] + b.shape[-2:-1]) else: result._set_shape(b.shape[:-2] + [None] + b.shape[-1:]) return result
Multiplies batches of 2D matrices using map_fn. `output[n, i, k]` = sum_j (a[n, i, j] * b[n, j, k])` (for all `n`, `i`, `k`). Requires that `a[n, i].nrows()` == `b[n].nrows()` (for all `n` and `i`). Args: a: A 3D Tensor or RaggedTensor with `shape=[B, I, J]`, where dimensions `I` and `J` may be ragged. b: A 3D Tensor or RaggedTensor with `shape=[B, J, K]`, where dimensions `J` and `K` may be ragged. **kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a). Returns: A 3D RaggedTensor with `shape=[B, (I), (K)]`.
github-repos
def _sim_timestamps(self, max_rate, bg_rate, emission, i_start, rs, ip_start=0, scale=10, sort=True): counts_chunk = sim_timetrace_bg(emission, max_rate, bg_rate, self.t_step, rs=rs) nrows = emission.shape[0] if (bg_rate is not None): nrows += 1 assert (counts_chunk.shape == (nrows, emission.shape[1])) max_counts = counts_chunk.max() if (max_counts == 0): return (np.array([], dtype=np.int64), np.array([], dtype=np.int64)) time_start = (i_start * scale) time_stop = (time_start + (counts_chunk.shape[1] * scale)) ts_range = np.arange(time_start, time_stop, scale, dtype='int64') times_chunk_p = [] par_index_chunk_p = [] for (ip, counts_chunk_ip) in enumerate(counts_chunk): times_c_ip = [] for v in range(1, (max_counts + 1)): times_c_ip.append(ts_range[(counts_chunk_ip >= v)]) t = np.hstack(times_c_ip) times_chunk_p.append(t) par_index_chunk_p.append(np.full(t.size, (ip + ip_start), dtype='u1')) times_chunk = np.hstack(times_chunk_p) par_index_chunk = np.hstack(par_index_chunk_p) if sort: index_sort = times_chunk.argsort(kind='mergesort') times_chunk = times_chunk[index_sort] par_index_chunk = par_index_chunk[index_sort] return (times_chunk, par_index_chunk)
Simulate timestamps from emission trajectories. Uses attributes: `.t_step`. Returns: A tuple of two arrays: timestamps and particles.
codesearchnet
async def get_person(self, id_): data = await self._get_person_json( id_, OrderedDict(append_to_response='movie_credits') ) return Person.from_json(data, self.config['data'].get('images'))
Retrieve person data by ID. Arguments: id_ (:py:class:`int`): The person's TMDb ID. Returns: :py:class:`~.Person`: The requested person.
juraj-google-style
def _add_deprecation_notice_to_docstring(docstring, message): if docstring: return f'{docstring}\n\n.. deprecated:: {message}' else: return f'.. deprecated:: {message}'
Adds a deprecation notice to a docstring. Args: docstring: The original docstring (can be None or empty). message: The deprecation message to add. Returns: The modified docstring.
github-repos
def convert_code(in_file, out_file, in_alg='taudem', out_alg='arcgis', datatype=None): FileClass.check_file_exists(in_file) in_alg = in_alg.lower() out_alg = out_alg.lower() if in_alg not in FlowModelConst.d8_dirs or out_alg not in FlowModelConst.d8_dirs: raise RuntimeError('The input algorithm name should one of %s' % ', '.join(list(FlowModelConst.d8_dirs.keys()))) convert_dict = dict() in_code = FlowModelConst.d8_dirs.get(in_alg) out_code = FlowModelConst.d8_dirs.get(out_alg) assert len(in_code) == len(out_code) for i, tmp_in_code in enumerate(in_code): convert_dict[tmp_in_code] = out_code[i] if datatype is not None and datatype in GDALDataType: RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file, datatype) else: RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file)
convert D8 flow direction code from one algorithm to another. Args: in_file: input raster file path out_file: output raster file path in_alg: available algorithms are in FlowModelConst.d8_dirs. "taudem" is the default out_alg: same as in_alg. "arcgis" is the default datatype: default is None and use the datatype of the in_file
juraj-google-style
def parameterized_send(self, request, parameter_list): response_queues = OrderedDict() for parameter in parameter_list: response_queues[parameter] = self.send((request % parameter)) return response_queues
Send batched requests for a list of parameters Args: request (str): Request to send, like "%s.*?\n" parameter_list (list): parameters to format with, like ["TTLIN", "TTLOUT"] Returns: dict: {parameter: response_queue}
codesearchnet
def ParseFromHumanReadable(self, string): if (not string): return None match = self.REGEX.match(string.strip().lower()) if (not match): raise DecodeError(('Unknown specification for ByteSize %s' % string)) multiplier = self.DIVIDERS.get(match.group(2)) if (not multiplier): raise DecodeError(('Invalid multiplier %s' % match.group(2))) value = match.group(1) if ('.' in value): value = float(value) else: value = int(value) self._value = int((value * multiplier))
Parse a human readable string of a byte string. Args: string: The string to parse. Raises: DecodeError: If the string can not be parsed.
codesearchnet
def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False): self._logger.info('Loading configuration from file: %s', yamlfile) try: parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read()) except self._modules['yaml'].YAMLError: self._logger.exception('Problem parsing YAML') raise self.ConfigurationInvalidError( 'Failed to load from %s as YAML' % yamlfile) if not isinstance(parsed_yaml, dict): raise self.ConfigurationInvalidError( 'YAML parsed, but wrong type, should be dict', parsed_yaml) self._logger.debug('Configuration loaded from file: %s', parsed_yaml) self.load_from_dict( parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared)
Loads the configuration from a file. Parsed contents must be a single dict mapping config key to value. Args: yamlfile: The opened file object to load configuration from. See load_from_dict() for other args' descriptions. Raises: ConfigurationInvalidError: If configuration file can't be read, or can't be parsed as either YAML (or JSON, which is a subset of YAML).
juraj-google-style
def merge_translations(localization_bundle_path): logging.info('Merging translations') for lang_dir in os.listdir(localization_bundle_path): if (lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME): continue for translated_path in glob.glob(os.path.join(localization_bundle_path, lang_dir, ('*' + TRANSLATED_SUFFIX))): strings_path = translated_path[:((- 1) * len(TRANSLATED_SUFFIX))] localizable_path = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME, os.path.basename(strings_path)) localization_merge_back(localizable_path, strings_path, translated_path, strings_path)
Merges the new translation with the old one. The translated files are saved as '.translated' file, and are merged with old translated file. Args: localization_bundle_path (str): The path to the localization bundle.
codesearchnet
def get_build_tool_version(self): with open(('%s/%s/build.gradle' % (self.path, self.src_folder))) as f: for line in f.readlines(): if ('buildToolsVersion' in line): matches = re.findall('buildToolsVersion \\"(.+?)\\"', line) if (len(matches) == 1): return matches[0] return config.build_tool_version
Gets the build tool version to be used by zipalign from build.gradle file. Returns: A string containing the build tool version, default is 23.0.2.
codesearchnet
def oem(self): buf = (ctypes.c_char * self.MAX_BUF_SIZE)() res = self._dll.JLINKARM_GetOEMString(ctypes.byref(buf)) if res != 0: raise errors.JLinkException('Failed to grab OEM string.') oem = ctypes.string_at(buf).decode() if len(oem) == 0: return None return oem
Retrieves and returns the OEM string of the connected J-Link. Args: self (JLink): the ``JLink`` instance Returns: The string of the OEM. If this is an original SEGGER product, then ``None`` is returned instead. Raises: JLinkException: on hardware error.
juraj-google-style
def grid_destroy_from_name(job_name): jobs = grid_reload_from_name(job_name) for job in jobs: job.delete() logger.info("Killing the job (%s, %s)" % (job.site, job.uid))
Destroy all the jobs with a given name. Args: job_name (str): the job name
juraj-google-style
def _get_endpoint(self, sub_domain): storage_parameters = self._storage_parameters or dict() account_name = storage_parameters.get('account_name') if not account_name: raise ValueError('"account_name" is required for Azure storage') suffix = storage_parameters.get( 'endpoint_suffix', 'core.windows.net') self._endpoint = 'http%s: '' if self._unsecure else 's', account_name, sub_domain, suffix) return account_name, suffix.replace('.', r'\.')
Get endpoint information from storage parameters. Update system with endpoint information and return information required to define roots. Args: self (pycosio._core.io_system.SystemBase subclass): System. sub_domain (str): Azure storage sub-domain. Returns: tuple of str: account_name, endpoint_suffix
juraj-google-style
def query_api_version(self): version_resp = self._session.get('/api/version', logon_required=False) self._api_version = version_resp return self._api_version
The Query API Version operation returns information about the level of Web Services API supported by the HMC. This operation does not require authentication. Returns: :term:`json object`: A JSON object with members ``api-major-version``, ``api-minor-version``, ``hmc-version`` and ``hmc-name``. For details about these properties, see section 'Response body contents' in section 'Query API Version' in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.ConnectionError`
codesearchnet
def GetFileSystemTypeIndicators(cls, path_spec, resolver_context=None): if ((cls._file_system_remainder_list is None) or (cls._file_system_store is None)): (specification_store, remainder_list) = cls._GetSpecificationStore(definitions.FORMAT_CATEGORY_FILE_SYSTEM) cls._file_system_remainder_list = remainder_list cls._file_system_store = specification_store if (cls._file_system_scanner is None): cls._file_system_scanner = cls._GetSignatureScanner(cls._file_system_store) return cls._GetTypeIndicators(cls._file_system_scanner, cls._file_system_store, cls._file_system_remainder_list, path_spec, resolver_context=resolver_context)
Determines if a file contains a supported file system types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators.
codesearchnet
def Dump(obj, sort_keys = False, encoder = None): text = json.dumps( obj, indent=2, sort_keys=sort_keys, ensure_ascii=False, cls=encoder, separators=_SEPARATORS) if compatibility.PY2 and isinstance(text, bytes): text = text.decode("utf-8") return text
Stringifies a Python object into its JSON representation. Args: obj: A Python object to convert to JSON. sort_keys: If True, output dictionaries keys in sorted (ascending) order. encoder: An (optional) encoder class to use. Returns: A JSON representation of the given object.
juraj-google-style
def _segment_reduce(values, index, segment_reduce_fn, name): flat_index = flatten(index) vector_shape = values.size()[len(index.indices.size()):] flattened_shape = torch.cat([torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0) flat_values = values.reshape(flattened_shape.tolist()) out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device) segment_means = out.scatter_reduce(dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False) device = index.num_segments.device new_shape = torch.cat([torch.as_tensor(index.batch_shape(), dtype=torch.long, device=device), torch.as_tensor([index.num_segments], dtype=torch.long, device=device), torch.as_tensor(vector_shape, dtype=torch.long, device=device)], dim=0) output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype) output_index = range_index_map(index.batch_shape(), index.num_segments) return (output_values, output_index)
Applies a segment reduction segment-wise. Args: values (`torch.Tensor`): Tensor with segment values. index (`IndexMap`): IndexMap. segment_reduce_fn (`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (`str`): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
github-repos
def __init__(self, index, port = 8081): self.index = index self.server = None self.port = port if port else find_free_port() self.settings = index.columns self.docs = index.docs self._create_settings() self.html_path = get_cur_path()+'/data/table/' self.cleanup_flag = False
Table Constructor todo::make sure this is memory efficient Args: Index (Index): An Index object with a valid .query method and a .columns attribute. Returns: A table object Usage example >>> Table(ind)
juraj-google-style
def get_by_provider_display_name(self, provider_display_name): san_managers = self._client.get_all() result = [x for x in san_managers if x['providerDisplayName'] == provider_display_name] return result[0] if result else None
Gets a SAN Manager by provider display name. Args: provider_display_name: Name of the Provider Display Name Returns: dict: SAN Manager.
juraj-google-style
def __init__(self, config: JetMoeConfig, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx self.is_causal = True if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.top_k = config.num_experts_per_tok self.attention_dropout = config.attention_dropout self.kv_projection_size = config.kv_channels * config.num_key_value_heads self.num_key_value_heads = config.num_key_value_heads self.num_heads = config.num_attention_heads self.head_dim = config.kv_channels self.experts = JetMoeMoA(config) self.kv_proj = torch.nn.Linear(config.hidden_size, self.kv_projection_size * 2, bias=False) self.rotary_emb = JetMoeRotaryEmbedding(config)
Initialize the JetMoeAttention module. Args: config: Configuration object with model hyperparameters. layer_idx: Index of the layer in the model.
github-repos
def scale_geom_opt_threshold(self, gradient=0.1, displacement=0.1, energy=0.1): if ((gradient < (1.0 / (300 - 1))) or (displacement < (1.0 / (1200 - 1))) or (energy < (1.0 / (100 - 1)))): raise ValueError('The geometry optimization convergence criteria is too tight') self.params['rem']['geom_opt_tol_gradient'] = int((gradient * 300)) self.params['rem']['geom_opt_tol_displacement'] = int((displacement * 1200)) self.params['rem']['geom_opt_tol_energy'] = int((energy * 100))
Adjust the convergence criteria of geometry optimization. Args: gradient: the scale factor for gradient criteria. If less than 1.0, you are tightening the threshold. The base value is 300 × 10E−6 displacement: the scale factor for atomic displacement. If less then 1.0, you are tightening the threshold. The base value is 1200 × 10E−6 energy: the scale factor for energy change between successive iterations. If less than 1.0, you are tightening the threshold. The base value is 100 × 10E−8.
codesearchnet
def _verify_time_range(payload_dict): now = int(time.time()) issued_at = payload_dict.get('iat') if (issued_at is None): raise AppIdentityError('No iat field in token: {0}'.format(payload_dict)) expiration = payload_dict.get('exp') if (expiration is None): raise AppIdentityError('No exp field in token: {0}'.format(payload_dict)) if (expiration >= (now + MAX_TOKEN_LIFETIME_SECS)): raise AppIdentityError('exp field too far in future: {0}'.format(payload_dict)) earliest = (issued_at - CLOCK_SKEW_SECS) if (now < earliest): raise AppIdentityError('Token used too early, {0} < {1}: {2}'.format(now, earliest, payload_dict)) latest = (expiration + CLOCK_SKEW_SECS) if (now > latest): raise AppIdentityError('Token used too late, {0} > {1}: {2}'.format(now, latest, payload_dict))
Verifies the issued at and expiration from a JWT payload. Makes sure the current time (in UTC) falls between the issued at and expiration for the JWT (with some skew allowed for via ``CLOCK_SKEW_SECS``). Args: payload_dict: dict, A dictionary containing a JWT payload. Raises: AppIdentityError: If there is no ``'iat'`` field in the payload dictionary. AppIdentityError: If there is no ``'exp'`` field in the payload dictionary. AppIdentityError: If the JWT expiration is too far in the future (i.e. if the expiration would imply a token lifetime longer than what is allowed.) AppIdentityError: If the token appears to have been issued in the future (up to clock skew). AppIdentityError: If the token appears to have expired in the past (up to clock skew).
codesearchnet
def _AddFileDescriptor(self, file_desc): if not isinstance(file_desc, descriptor.FileDescriptor): raise TypeError('Expected instance of descriptor.FileDescriptor.') self._file_descriptors[file_desc.name] = file_desc
Adds a FileDescriptor to the pool, non-recursively. If the FileDescriptor contains messages or enums, the caller must explicitly register them. Args: file_desc: A FileDescriptor.
juraj-google-style
def display(port=None, height=None): _display(port=port, height=height, print_message=True, display_handle=None)
Display a TensorBoard instance already running on this machine. Args: port: The port on which the TensorBoard server is listening, as an `int`, or `None` to automatically select the most recently launched TensorBoard. height: The height of the frame into which to render the TensorBoard UI, as an `int` number of pixels, or `None` to use a default value (currently 800).
codesearchnet
def get_vulnerability_chains(current_node, sink, def_use, chain=[]): for use in def_use[current_node]: if (use == sink): (yield chain) else: vuln_chain = list(chain) vuln_chain.append(use) (yield from get_vulnerability_chains(use, sink, def_use, vuln_chain))
Traverses the def-use graph to find all paths from source to sink that cause a vulnerability. Args: current_node() sink() def_use(dict): chain(list(Node)): A path of nodes between source and sink.
codesearchnet
def ParseRecord(self, parser_mediator, key, structure): if key not in ('header', 'header_signature', 'logline'): raise errors.ParseError( 'Unable to parse record, unknown structure: {0:s}'.format(key)) if key == 'logline': self._ParseLogLine(parser_mediator, structure) elif key == 'header': self._ParseHeader(parser_mediator, structure) elif key == 'header_signature': logger.warning('Unknown locale header.') self._xchat_year = 0
Parses a log record structure and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): identifier of the structure of tokens. structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Raises: ParseError: when the structure type is unknown.
juraj-google-style
def resize_bytes(fobj, old_size, new_size, offset): if new_size < old_size: delete_size = old_size - new_size delete_at = offset + new_size delete_bytes(fobj, delete_size, delete_at) elif new_size > old_size: insert_size = new_size - old_size insert_at = offset + old_size insert_bytes(fobj, insert_size, insert_at)
Resize an area in a file adding and deleting at the end of it. Does nothing if no resizing is needed. Args: fobj (fileobj) old_size (int): The area starting at offset new_size (int): The new size of the area offset (int): The start of the area Raises: IOError
juraj-google-style
def derive_value(self, value): return IonEvent( self.event_type, self.ion_type, value, self.field_name, self.annotations, self.depth )
Derives a new event from this one setting the ``value`` attribute. Args: value: (any): The value associated with the derived event. Returns: IonEvent: The newly generated non-thunk event.
juraj-google-style
def for_default_graph(*args, **kwargs): graph = tf.get_default_graph() collection = graph.get_collection(_BOOKKEEPER) if collection: if (args or kwargs): raise ValueError(('Requesting construction of a BookKeeper that already exists: %s %s' % (args, kwargs))) return collection[0] else: books = BOOKKEEPER_FACTORY(*args, g=graph, **kwargs) graph.add_to_collection(_BOOKKEEPER, books) return books
Creates a bookkeeper for the default graph. Args: *args: Arguments to pass into Bookkeeper's constructor. **kwargs: Arguments to pass into Bookkeeper's constructor. Returns: A new Bookkeeper. Raises: ValueError: If args or kwargs are provided and the Bookkeeper already exists.
codesearchnet
def restart(self, container, timeout=10): params = {'t': timeout} url = self._url("/containers/{0}/restart", container) conn_timeout = self.timeout if conn_timeout is not None: conn_timeout += timeout res = self._post(url, params=params, timeout=conn_timeout) self._raise_for_status(res)
Restart a container. Similar to the ``docker restart`` command. Args: container (str or dict): The container to restart. If a dict, the ``Id`` key is used. timeout (int): Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def get_enabled_features(self, user_id, attributes=None): enabled_features = [] if not self.is_valid: self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_enabled_features')) return enabled_features if not isinstance(user_id, string_types): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id')) return enabled_features if not self._validate_user_inputs(attributes): return enabled_features for feature in self.config.feature_key_map.values(): if self.is_feature_enabled(feature.key, user_id, attributes): enabled_features.append(feature.key) return enabled_features
Returns the list of features that are enabled for the user. Args: user_id: ID for user. attributes: Dict representing user attributes. Returns: A list of the keys of the features that are enabled for the user.
juraj-google-style
def nack(self, items): self.modify_ack_deadline([requests.ModAckRequest(ack_id=item.ack_id, seconds=0) for item in items]) self.drop([requests.DropRequest(*item) for item in items])
Explicitly deny receipt of messages. Args: items(Sequence[NackRequest]): The items to deny.
codesearchnet
def _validate_observation_data(kernel, observation_index_points, observations): ndims = kernel.feature_ndims if (tensorshape_util.is_fully_defined(observation_index_points.shape[:(- ndims)]) and tensorshape_util.is_fully_defined(observations.shape)): index_point_count = observation_index_points.shape[:(- ndims)] observation_count = observations.shape try: tf.broadcast_static_shape(index_point_count, observation_count) except ValueError: raise ValueError('Observation index point and observation counts are not broadcastable: {} and {}, respectively.'.format(index_point_count, observation_count))
Ensure that observation data and locations have consistent shapes. This basically means that the batch shapes are broadcastable. We can only ensure this when those shapes are fully statically defined. Args: kernel: The GP kernel. observation_index_points: the observation data locations in the index set. observations: the observation data. Raises: ValueError: if the observations' batch shapes are not broadcastable.
codesearchnet
def _ParseCommentRecord(self, structure): comment = structure[1] if comment.startswith('Version'): _, _, self._version = comment.partition(':') elif comment.startswith('Software'): _, _, self._software = comment.partition(':') elif comment.startswith('Time'): _, _, time_format = comment.partition(':') if 'local' in time_format.lower(): self._use_local_timezone = True
Parse a comment and store appropriate attributes. Args: structure (pyparsing.ParseResults): parsed log line.
juraj-google-style
def pre_release_work(patch: bool=False): default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: default_version = f'{default_version.major}.{default_version.minor + 1}.0' version = input(f'Which version are you releasing? [{default_version}]') if len(version) == 0: version = default_version print(f'Updating version to {version}.') global_version_update(version, patch=patch) print('Deleting conversion scripts.') remove_conversion_scripts()
Do all the necessary pre-release steps: - figure out the next minor release version and ask confirmation - update the version everywhere - clean-up the model list in the main README Args: patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release.
github-repos
def _process_new(self, feed_item): return {'assetIdentifier': {'name': feed_item.get(FieldMap.CREATIVE_ASSET_FILE_NAME, None), 'type': feed_item.get(FieldMap.CREATIVE_TYPE, None)}}
Creates a new creative asset DCM object from a feed item representing a creative asset from the Bulkdozer feed. This function simply creates the object to be inserted later by the BaseDAO object. Args: feed_item: Feed item representing the creative asset from the Bulkdozer feed. Returns: A creative asset object ready to be inserted in DCM through the API.
github-repos
def matvec(self, x, adjoint=False, name='matvec'): with self._name_scope(name): block_dimensions = self._block_range_dimensions() if adjoint else self._block_domain_dimensions() if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1): for i, block in enumerate(x): if not isinstance(block, linear_operator.LinearOperator): block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) self._check_input_dtype(block) block_dimensions[i].assert_is_compatible_with(block.shape[-1]) x[i] = block x_mat = [block[..., array_ops.newaxis] for block in x] y_mat = self.matmul(x_mat, adjoint=adjoint) return [array_ops.squeeze(y, axis=-1) for y in y_mat] x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x') self._check_input_dtype(x) op_dimension = self.range_dimension if adjoint else self.domain_dimension op_dimension.assert_is_compatible_with(x.shape[-1]) x_mat = x[..., array_ops.newaxis] y_mat = self.matmul(x_mat, adjoint=adjoint) return array_ops.squeeze(y_mat, axis=-1)
Transform [batch] vector `x` with left multiplication: `x --> Ax`. ```python # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] operator = LinearOperator(...) X = ... # shape [..., N], batch vector Y = operator.matvec(X) Y.shape ==> [..., M] Y[..., :] = sum_j A[..., :, j] X[..., j] ``` Args: x: `Tensor` with compatible shape and same `dtype` as `self`, or an iterable of `Tensor`s. `Tensor`s are treated a [batch] vectors, meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. name: A name for this `Op`. Returns: A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
github-repos
def inference(self, observed_arr): self.__pred_arr = self.__lstm_model.inference(observed_arr) return self.__pred_arr
Draws samples from the `true` distribution. Args: observed_arr: `np.ndarray` of observed data points. Returns: `np.ndarray` of inferenced.
juraj-google-style
def sort_ordered_objects(items, getter=(lambda x: x)): return sorted(items, key=(lambda x: getattr(getter(x), OrderedBase.CREATION_COUNTER_FIELD, (- 1))))
Sort an iterable of OrderedBase instances. Args: items (iterable): the objects to sort getter (callable or None): a function to extract the OrderedBase instance from an object. Examples: >>> sort_ordered_objects([x, y, z]) >>> sort_ordered_objects(v.items(), getter=lambda e: e[1])
codesearchnet
def get_min_max_value(statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions) -> tuple[float, float]: calib_method = calib_opts.calibration_method if calib_method not in _REGISTRY: raise ValueError(f'Unsupported calibration method: {calib_method}') calibration_algorithm = _REGISTRY[calib_method](statistics, calib_opts) return calibration_algorithm.get_min_max_value()
Calculates min and max from statistics using calibration options. Args: statistics: Collected calibration statistics. calib_opts: Calibration options used for calculating min and max. Returns: (min_value, max_value): Min and max calculated using calib_opts. Raises: ValueError: Unsupported calibration method is given.
github-repos
def get_service_state_object_id(subsystem: str, name: str, version: str) -> str: return '{}:{}:{}'.format(subsystem, name, version)
Return service state data object key. Args: subsystem (str): Subsystem the service belongs to name (str): Name of the Service version (str): Version of the Service Returns: str, Key used to store the service state data object
juraj-google-style
def _batch_accumulator(cls, primals, tangents): acc = super(ForwardAccumulator, cls).__new__(cls, primals, tangents) acc._recording = False acc._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(True) primal_ids = set() for primal, tangent in zip(nest.flatten(primals), nest.flatten(tangents)): tangent.shape.assert_is_compatible_with(tensor_shape.TensorShape([None]) + primal.shape) if id(primal) in primal_ids: raise ValueError('Tensor {} was specified as a primal multiple times. This may indicate an error. If it was intended, please sum the corresponding tangents.') primal_ids.add(id(primal)) acc._watch(primals, tangents) return acc
Factory constructor to test accumulator on batches of tangents. Args: primals: A tensor or nested structure of tensors to watch. tangents: A tensor or nested structure of tensors, with the same nesting structure as `primals`, with each element being a vector with compatible shape `[None] + primal.shape` of the corresponding primal element. Returns: A batch accumulator object.
github-repos
def get_configuration_file(configuration_files: list[str]) -> str: configuration_files_map = {} for file_name in configuration_files: if file_name.startswith('config.') and file_name.endswith('.json') and (file_name != 'config.json'): v = file_name.removeprefix('config.').removesuffix('.json') configuration_files_map[v] = file_name available_versions = sorted(configuration_files_map.keys()) configuration_file = CONFIG_NAME transformers_version = version.parse(__version__) for v in available_versions: if version.parse(v) <= transformers_version: configuration_file = configuration_files_map[v] else: break return configuration_file
Get the configuration file to use for this version of transformers. Args: configuration_files (`List[str]`): The list of available configuration files. Returns: `str`: The configuration file to use.
github-repos
class ZoeDepthReassembleStage(nn.Module): def __init__(self, config): super().__init__() self.readout_type = config.readout_type self.layers = nn.ModuleList() for neck_hidden_size, factor in zip(config.neck_hidden_sizes, config.reassemble_factors): self.layers.append(ZoeDepthReassembleLayer(config, channels=neck_hidden_size, factor=factor)) if config.readout_type == 'project': self.readout_projects = nn.ModuleList() hidden_size = config.backbone_hidden_size for _ in config.neck_hidden_sizes: self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act])) def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]: batch_size = hidden_states[0].shape[0] hidden_states = torch.cat(hidden_states, dim=0) cls_token, hidden_states = (hidden_states[:, 0], hidden_states[:, 1:]) total_batch_size, sequence_length, num_channels = hidden_states.shape hidden_states = hidden_states.reshape(total_batch_size, patch_height, patch_width, num_channels) hidden_states = hidden_states.permute(0, 3, 1, 2).contiguous() if self.readout_type == 'project': hidden_states = hidden_states.flatten(2).permute((0, 2, 1)) readout = cls_token.unsqueeze(dim=1).expand_as(hidden_states) hidden_states = torch.cat((hidden_states, readout), -1) elif self.readout_type == 'add': hidden_states = hidden_states + cls_token.unsqueeze(-1) out = [] for stage_idx, hidden_state in enumerate(hidden_states.split(batch_size, dim=0)): if self.readout_type == 'project': hidden_state = self.readout_projects[stage_idx](hidden_state) hidden_state = hidden_state.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width) hidden_state = self.layers[stage_idx](hidden_state) out.append(hidden_state) return out
This class reassembles the hidden states of the backbone into image-like feature representations at various resolutions. This happens in 3 stages: 1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to `config.readout_type`. 2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`. 3. Resizing the spatial dimensions (height, width). Args: config (`[ZoeDepthConfig]`): Model configuration class defining the model architecture.
github-repos
def get(self, uid: int) -> Optional[CachedMessage]: return self._cache.get(uid)
Return the given cached message. Args: uid: The message UID.
codesearchnet
def GetCampaignFeeds(client, feed, placeholder_type): campaign_feed_service = client.GetService('CampaignFeedService', 'v201809') campaign_feeds = [] more_pages = True selector = { 'fields': ['CampaignId', 'MatchingFunction', 'PlaceholderTypes'], 'predicates': [ { 'field': 'Status', 'operator': 'EQUALS', 'values': ['ENABLED'] }, { 'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']] }, { 'field': 'PlaceholderTypes', 'operator': 'CONTAINS_ANY', 'values': [placeholder_type] } ], 'paging': { 'startIndex': 0, 'numberResults': PAGE_SIZE } } while more_pages: page = campaign_feed_service.get(selector) if 'entries' in page: campaign_feeds.extend(page['entries']) selector['paging']['startIndex'] += PAGE_SIZE more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries']) return campaign_feeds
Get a list of Feed Item Ids used by a campaign via a given Campaign Feed. Args: client: an AdWordsClient instance. feed: a Campaign Feed. placeholder_type: the Placeholder Type. Returns: A list of Feed Item Ids.
juraj-google-style
def NHWCToNCHW(input_tensor): if isinstance(input_tensor, tensor.Tensor): return array_ops.transpose(input_tensor, [0, 3, 1, 2]) else: return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
Convert the input from NHWC format to NCHW. Args: input_tensor: a 4-D tensor, or a 4-element array representing the same. Returns: the converted tensor or a shape array
github-repos
def direct_transformers_import(path: str, file='__init__.py') -> ModuleType: name = 'transformers' location = os.path.join(path, file) spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path]) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) module = sys.modules[name] return module
Imports transformers directly Args: path (`str`): The path to the source file file (`str`, *optional*): The file to join with the path. Defaults to "__init__.py". Returns: `ModuleType`: The resulting imported module
github-repos
def AddRow(self, values): if self._number_of_columns and len(values) != self._number_of_columns: raise ValueError('Number of values is out of bounds.') self._rows.append(values) if not self._number_of_columns: self._number_of_columns = len(values)
Adds a row of values. Args: values (list[object]): values. Raises: ValueError: if the number of values is out of bounds.
juraj-google-style
def shift(x, offset, dim, wrap, name=None): return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]
Shift operation. Shift x right by +offset in dimension dim. Args: x: a Tensor offset: an integer. If negative, shift left instead of right. dim: a Dimension of x wrap: a boolean - whether to wrap (True) or pad with zeros (False). name: an optional string Returns: a Tensor with the same shape and dtype as x
juraj-google-style
def go_from(self, vertex): if self.vertex_out: self.vertex_out.edges_out.remove(self) self.vertex_out = vertex vertex.edges_out.add(self)
Tell the edge to go out from this vertex. Args: vertex (Vertex): vertex to go from.
codesearchnet
def get_member_information(self, query_params=None): return self.fetch_json(uri_path=self.base_uri, query_params=(query_params or {}))
Get Information for a member. Returns a dictionary of values. Returns: dict
codesearchnet
def create(self, vrf_name, rd=None): commands = [('vrf definition %s' % vrf_name)] if rd: commands.append(('rd %s' % rd)) return self.configure(commands)
Creates a new VRF resource Note: A valid RD has the following format admin_ID:local_assignment. The admin_ID can be an AS number or globally assigned IPv4 address. The local_assignment can be an integer between 0-65,535 if the admin_ID is an IPv4 address and can be between 0-4,294,967,295 if the admin_ID is an AS number. If the admin_ID is an AS number the local_assignment could also be in the form of an IPv4 address. Args: vrf_name (str): The VRF name to create rd (str): The value to configure the vrf rd Returns: True if create was successful otherwise False
codesearchnet
def _compute_causal_mask(self, query, value=None): q_seq_length = ops.shape(query)[1] v_seq_length = q_seq_length if value is None else ops.shape(value)[1] ones_mask = ops.ones((1, q_seq_length, v_seq_length), dtype='int32') row_index = ops.cumsum(ones_mask, axis=-2) col_index = ops.cumsum(ones_mask, axis=-1) return ops.greater_equal(row_index, col_index)
Computes a causal mask (e.g., for masked self-attention layers). For example, if query and value both contain sequences of length 4, this function returns a boolean tensor equal to: ``` [[[True, False, False, False], [True, True, False, False], [True, True, True, False], [True, True, True, True]]] ``` Args: query: query tensor of shape `(B, T, ...)`. value: value tensor of shape `(B, S, ...)` (optional, defaults to query). Returns: mask: a boolean tensor of shape `(1, T, S)` containing a lower triangular matrix of shape `(T, S)`.
github-repos
def compose(*funcs): if not funcs: return lambda *args: args[0] if args else None if len(funcs) == 1: return funcs[0] last = funcs[-1] rest = funcs[0:-1] return lambda *args: reduce(lambda ax, func: func(ax), reversed(rest), last(*args))
chained function composition wrapper creates function f, where f(x) = arg0(arg1(arg2(...argN(x)))) if *funcs is empty, an identity function is returned. Args: *funcs: list of functions to chain Returns: a new function composed of chained calls to *args
juraj-google-style
def GetAttributeContainerByIndex(self, index): if index < 0: raise IndexError( 'Unsupported negative index value: {0:d}.'.format(index)) if index < len(self._list): return self._list[index] return None
Retrieves a specific serialized attribute container from the list. Args: index (int): attribute container index. Returns: bytes: serialized attribute container data or None if not available. Raises: IndexError: if the index is less than zero.
juraj-google-style
def Parse(self, raw_data): self.results = raw_data for f in self.filters: self.results = f.Parse(self.results) return self.results
Take the results and yield results that passed through the filters. The output of each filter is used as the input for successive filters. Args: raw_data: An iterable series of rdf values. Returns: A list of rdf values that matched all filters.
codesearchnet
def launch_external_file(filename: str, raise_if_fails: bool = False) -> None: log.info("Launching external file: {!r}", filename) try: if sys.platform.startswith('linux'): cmdargs = ["xdg-open", filename] subprocess.call(cmdargs) else: os.startfile(filename) except Exception as e: log.critical("Error launching {!r}: error was {}.\n\n{}", filename, str(e), traceback.format_exc()) if raise_if_fails: raise
Launches a file using the operating system's standard launcher. Args: filename: file to launch raise_if_fails: raise any exceptions from ``subprocess.call(["xdg-open", filename])`` (Linux) or ``os.startfile(filename)`` (otherwise)? If not, exceptions are suppressed.
juraj-google-style
def update_unexpected_keys(self, model, unexpected_keys: List[str], prefix: str) -> List[str]: return unexpected_keys
Override this method if you want to adjust the `unexpected_keys`. Args: unexpected_keys (`List[str]`, *optional*): The list of unexpected keys in the checkpoint compared to the state dict of the model
github-repos
def list_media_endpoint_keys(access_token, subscription_id, rgname, msname): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '/providers/microsoft.media/', '/mediaservices/', msname, '/listKeys?api-version=', MEDIA_API]) return do_get(endpoint, access_token)
list the media endpoint keys in a media service Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. msname (str): Media service name. Returns: HTTP response. JSON body.
codesearchnet
def get_load_balancer(self, id): return LoadBalancer.get_object(api_token=self.token, id=id)
Returns a Load Balancer object by its ID. Args: id (str): Load Balancer ID
juraj-google-style
def AdManagerDateTimePacker(cls, value, version): if isinstance(value, datetime.datetime): if value.tzinfo is None: raise googleads.errors.GoogleAdsValueError( 'Datetime %s is not timezone aware.' % value ) return { 'date': cls.AdManagerDateTimePacker(value.date(), version), 'hour': value.hour, 'minute': value.minute, 'second': value.second, 'timeZoneId' if version >= 'v201811' else 'timeZoneID': value.tzinfo.zone, } elif isinstance(value, datetime.date): return {'year': value.year, 'month': value.month, 'day': value.day}
Returns dicts formatted for Ad Manager SOAP based on date/datetime. Args: value: A date or datetime object to be converted. version: the version of the current API, e.g. 'v201811' Returns: The value object correctly represented for Ad Manager SOAP.
juraj-google-style
def optimal_partitions(sizes, counts, num_part): if (num_part < 2): return [(sizes[0], sizes[(- 1)])] if (num_part >= len(sizes)): partitions = [(x, x) for x in sizes] return partitions nfps = _compute_nfps_real(counts, sizes) (partitions, _, _) = _compute_best_partitions(num_part, sizes, nfps) return partitions
Compute the optimal partitions given a distribution of set sizes. Args: sizes (numpy.array): The complete domain of set sizes in ascending order. counts (numpy.array): The frequencies of all set sizes in the same order as `sizes`. num_part (int): The number of partitions to create. Returns: list: A list of partitions in the form of `(lower, upper)` tuples, where `lower` and `upper` are lower and upper bound (inclusive) set sizes of each partition.
codesearchnet
def get(self, block=True, timeout=None): if (not block): (success, item) = ray.get(self.actor.get.remote()) if (not success): raise Empty elif (timeout is None): (success, item) = ray.get(self.actor.get.remote()) while (not success): (success, item) = ray.get(self.actor.get.remote()) elif (timeout < 0): raise ValueError("'timeout' must be a non-negative number") else: endtime = (time.time() + timeout) success = False while ((not success) and (time.time() < endtime)): (success, item) = ray.get(self.actor.get.remote()) if (not success): raise Empty return item
Gets an item from the queue. Uses polling if block=True, so there is no guarantee of order if multiple consumers get from the same empty queue. Returns: The next item in the queue. Raises: Empty if the queue is empty and blocking is False.
codesearchnet
def _restructure_if_volume_follows_journal(left, right): def _get_volume_keyword_op_and_remaining_subtree(right_subtree): if (isinstance(right_subtree, NotOp) and isinstance(right_subtree.op, KeywordOp) and (right_subtree.op.left == Keyword('volume'))): return (None, None) elif (isinstance(right_subtree, AndOp) and isinstance(right_subtree.left, NotOp) and isinstance(right_subtree.left.op, KeywordOp) and (right_subtree.left.op.left == Keyword('volume'))): return (None, right_subtree.right) elif (isinstance(right_subtree, KeywordOp) and (right_subtree.left == Keyword('volume'))): return (right_subtree, None) elif (isinstance(right_subtree, AndOp) and (right_subtree.left.left == Keyword('volume'))): return (right_subtree.left, right_subtree.right) journal_value = left.right.value volume_and_remaining_subtree = _get_volume_keyword_op_and_remaining_subtree(right) if (not volume_and_remaining_subtree): return (volume_node, remaining_subtree) = volume_and_remaining_subtree if volume_node: left.right.value = ','.join([journal_value, volume_node.right.value]) return (AndOp(left, remaining_subtree) if remaining_subtree else left)
Remove volume node if it follows a journal logically in the tree hierarchy. Args: left (ast.ASTElement): The journal KeywordOp node. right (ast.ASTElement): The rest of the tree to be restructured. Return: (ast.ASTElement): The restructured tree, with the volume node removed. Notes: This happens to support queries like "journal Phys.Rev. and vol d85". Appends the value of KeywordOp with Keyword 'volume' and discards 'volume' KeywordOp node from the tree.
codesearchnet
def _UpdateStatus( self, status, display_name, number_of_consumed_sources, storage_writer, force=False): current_timestamp = time.time() if not force and current_timestamp < ( self._last_status_update_timestamp + self._STATUS_UPDATE_INTERVAL): return if status == definitions.STATUS_INDICATOR_IDLE: status = definitions.STATUS_INDICATOR_RUNNING used_memory = self._process_information.GetUsedMemory() or 0 self._processing_status.UpdateForemanStatus( self._name, status, self._pid, used_memory, display_name, number_of_consumed_sources, storage_writer.number_of_event_sources, 0, storage_writer.number_of_events, 0, 0, 0, 0, 0, storage_writer.number_of_warnings) if self._status_update_callback: self._status_update_callback(self._processing_status) self._last_status_update_timestamp = current_timestamp
Updates the processing status. Args: status (str): human readable status of the processing e.g. 'Idle'. display_name (str): human readable of the file entry currently being processed. number_of_consumed_sources (int): number of consumed sources. storage_writer (StorageWriter): storage writer for a session storage. force (Optional[bool]): True if the update should be forced ignoring the last status update time.
juraj-google-style
def diet_expert(x, hidden_size, params): @fn_with_diet_vars(params) def diet_expert_internal(x): dim = x.get_shape().as_list()[(- 1)] h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False) y = tf.layers.dense(h, dim, use_bias=False) y *= tf.rsqrt(tf.to_float((dim * hidden_size))) return y return diet_expert_internal(x)
A two-layer feed-forward network with relu activation on hidden layer. Uses diet variables. Recomputes hidden layer on backprop to save activation memory. Args: x: a Tensor with shape [batch, io_size] hidden_size: an integer params: a diet variable HParams object. Returns: a Tensor with shape [batch, io_size]
codesearchnet
def unload(self): unloaded = False if self._lib is not None: if self._winlib is not None: ctypes.windll.kernel32.FreeLibrary.argtypes = ( ctypes.c_void_p, ) ctypes.windll.kernel32.FreeLibrary(self._lib._handle) ctypes.windll.kernel32.FreeLibrary(self._winlib._handle) self._lib = None self._winlib = None unloaded = True else: del self._lib self._lib = None unloaded = True if self._temp is not None: os.remove(self._temp.name) self._temp = None return unloaded
Unloads the library's DLL if it has been loaded. This additionally cleans up the temporary DLL file that was created when the library was loaded. Args: self (Library): the ``Library`` instance Returns: ``True`` if the DLL was unloaded, otherwise ``False``.
juraj-google-style
def render(self, fname=''): import qnet.visualization.circuit_pyx as circuit_visualization from tempfile import gettempdir from time import time, sleep if (not fname): tmp_dir = gettempdir() fname = os.path.join(tmp_dir, 'tmp_{}.png'.format(hash(time))) if circuit_visualization.draw_circuit(self, fname): done = False for k in range(20): if os.path.exists(fname): done = True break else: sleep(0.5) if done: return fname raise CannotVisualize()
Render the circuit expression and store the result in a file Args: fname (str): Path to an image file to store the result in. Returns: str: The path to the image file
codesearchnet