code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def noisy_identity_kernel_initializer(base_num_channels, stddev=1e-08): def _noisy_identity_kernel_initializer(shape, dtype=tf.float32, partition_info=None): 'Constructs a noisy identity kernel.\n\n Args:\n shape: List of integers. Represents shape of result.\n dtype: data type for values in result.\n partition_info: Partition information for initializer functions. Ignored.\n\n Returns:\n Tensor of desired shape and dtype such that applying it as a convolution\n kernel results in a noisy near-identity operation.\n\n Raises:\n ValueError: If shape does not define a valid kernel.\n If filter width and height differ.\n If filter width and height are not odd numbers.\n If number of input and output channels are not multiples of\n base_num_channels.\n ' if (len(shape) != 4): raise ValueError('Convolution kernels must be rank 4.') (filter_height, filter_width, in_channels, out_channels) = shape if (filter_width != filter_height): raise ValueError('Noisy identity initializer only works for square filters.') if ((filter_width % 2) != 1): raise ValueError('Noisy identity initializer requires filters have odd height and width.') if (((in_channels % base_num_channels) != 0) or ((out_channels % base_num_channels) != 0)): raise ValueError('in_channels and out_channels must both be multiples of base_num_channels.') middle_pixel = (filter_height is_middle_pixel = tf.logical_and(tf.equal(_range_along_dimension(0, shape), middle_pixel), tf.equal(_range_along_dimension(1, shape), middle_pixel)) is_same_channel_multiple = tf.equal(tf.floordiv((_range_along_dimension(2, shape) * base_num_channels), in_channels), tf.floordiv((_range_along_dimension(3, shape) * base_num_channels), out_channels)) noise = tf.truncated_normal(shape, stddev=stddev, dtype=dtype) return tf.where(tf.logical_and(is_same_channel_multiple, is_middle_pixel), (tf.ones(shape, dtype=dtype) * (base_num_channels / out_channels)), noise) return _noisy_identity_kernel_initializer
Build an initializer for constructing near-identity convolution kernels. Construct a convolution kernel where in_channels and out_channels are multiples of base_num_channels, but need not be equal. This initializer is essentially the same as identity_kernel_initializer, except that magnitude is "spread out" across multiple copies of the input. Args: base_num_channels: int. Number that divides both in_channels and out_channels. stddev: float. Standard deviation of truncated normal noise added to off-entries to break ties. Returns: Initializer function for building a noisy identity kernel.
codesearchnet
def _CheckIsLink(self, file_entry): if (definitions.FILE_ENTRY_TYPE_LINK not in self._file_entry_types): return False return file_entry.IsLink()
Checks the is_link find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not.
codesearchnet
def _other_wrapper(self, name, writing): io_attr = getattr(self._io, name) def other_wrapper(*args, **kwargs): write_seek = self._io.tell() ret_value = io_attr(*args, **kwargs) if write_seek != self._io.tell(): self._read_seek = self._io.tell() self._read_whence = 0 if not writing or not IS_PY2: return ret_value return other_wrapper
Wrap a stream attribute in an other_wrapper. Args: name: the name of the stream attribute to wrap. Returns: other_wrapper which is described below.
juraj-google-style
def get_pkg_module_names(package_path): module_names = set() for (fobj, modname, _) in pkgutil.iter_modules(path=[package_path]): filename = os.path.join(fobj.path, ('%s.py' % modname)) if os.path.exists(filename): module_names.add(os.path.abspath(filename)) return module_names
Returns module filenames from package. Args: package_path: Path to Python package. Returns: A set of module filenames.
codesearchnet
def _process_contains_filter_directive(filter_operation_info, location, context, parameters): filtered_field_type = filter_operation_info.field_type filtered_field_name = filter_operation_info.field_name base_field_type = strip_non_null_from_type(filtered_field_type) if (not isinstance(base_field_type, GraphQLList)): raise GraphQLCompilationError(u'Cannot apply "contains" to non-list type {}'.format(filtered_field_type)) argument_inferred_type = strip_non_null_from_type(base_field_type.of_type) (argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type) filter_predicate = expressions.BinaryComposition(u'contains', expressions.LocalField(filtered_field_name), argument_expression) if (non_existence_expression is not None): filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate) return blocks.Filter(filter_predicate)
Return a Filter basic block that checks if the directive arg is contained in the field. Args: filter_operation_info: FilterOperationInfo object, containing the directive and field info of the field where the filter is to be applied. location: Location where this filter is used. context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! parameters: list of 1 element, specifying the collection in which the value must exist; if the collection is optional and missing, the check will return True Returns: a Filter basic block that performs the contains check
codesearchnet
def _FormatIPCPermToken(self, token_data): return {'user_id': token_data.user_identifier, 'group_id': token_data.group_identifier, 'creator_user_id': token_data.creator_user_identifier, 'creator_group_id': token_data.creator_group_identifier, 'access': token_data.access_mode}
Formats an IPC permissions token as a dictionary of values. Args: token_data (bsm_token_data_ipc_perm): AUT_IPC_PERM token data. Returns: dict[str, str]: token values.
codesearchnet
def __init__(self, parent): super(ModuleFrame, self).__init__(parent) logger.debug("Initialising module tabs") style = ttk.Style() style.configure("Module.TFrame", background="white") self.module_buttons = {} self.current_button = None self.module_list = ttk.Frame(self, width=150, style="Module.TFrame") self.module_list.grid(column=0, row=0, padx=0, pady=0, sticky="W E N S") self.module_list.columnconfigure(0, weight=1) self.module_list.rowconfigure(0, weight=0) self.module_list.rowconfigure(1, weight=1) header = tk.Label(self.module_list, text="Modules", bg="white", fg=" header.grid(column=0, row=0, padx=0, pady=0, sticky="W E N") self.module_selection = ttk.Frame(self.module_list, style="Module.TFrame") self.module_selection.grid(column=0, row=1, padx=0, pady=0, sticky="W E N S") self.module_selection.columnconfigure(0, weight=1) self.module_ui = ttk.Frame(self) self.module_ui.grid(column=1, row=0, padx=0, pady=0, sticky="W E N S") self.module_ui.columnconfigure(0, weight=1) self.module_ui.rowconfigure(0, weight=1) self.clear_modules() self.columnconfigure(0, minsize=150) self.columnconfigure(1, weight=1) self.rowconfigure(0, weight=1)
Create a new module frame and add it to the given parent. Args: parent: A tk or ttk object
juraj-google-style
def call(self, inputs): del inputs with tf.compat.v1.name_scope(self._name): return tfd.MultivariateNormalDiag(self.loc, self.scale_diag)
Runs the model to generate multivariate normal distribution. Args: inputs: Unused. Returns: A MultivariateNormalDiag distribution with event shape [dimensions], batch shape [], and sample shape [sample_shape, dimensions].
juraj-google-style
def __init__(self, rot_mats: Optional[torch.Tensor]=None, quats: Optional[torch.Tensor]=None, normalize_quats: bool=True): if rot_mats is None and quats is None or (rot_mats is not None and quats is not None): raise ValueError('Exactly one input argument must be specified') if rot_mats is not None and rot_mats.shape[-2:] != (3, 3) or (quats is not None and quats.shape[-1] != 4): raise ValueError('Incorrectly shaped rotation matrix or quaternion') if quats is not None: quats = quats.to(dtype=torch.float32) if rot_mats is not None: rot_mats = rot_mats.to(dtype=torch.float32) if quats is not None and normalize_quats: quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True) self._rot_mats = rot_mats self._quats = quats
Args: rot_mats: A [*, 3, 3] rotation matrix tensor. Mutually exclusive with quats quats: A [*, 4] quaternion. Mutually exclusive with rot_mats. If normalize_quats is not True, must be a unit quaternion normalize_quats: If quats is specified, whether to normalize quats
github-repos
def reaction_charge(reaction, compound_charge): charge_sum = 0.0 for compound, value in reaction.compounds: charge = compound_charge.get(compound.name, float('nan')) charge_sum += charge * float(value) return charge_sum
Calculate the overall charge for the specified reaction. Args: reaction: :class:`psamm.reaction.Reaction`. compound_charge: a map from each compound to charge values.
juraj-google-style
def get_repository(self, path): parts = path.split('@', 1) if len(parts) == 1: parts = ("filesystem", parts[0]) repo_type, location = parts if repo_type == "filesystem": location = os.path.abspath(location) normalised_path = "%s@%s" % (repo_type, location) return self._get_repository(normalised_path)
Get a package repository. Args: path (str): Entry from the 'packages_path' config setting. This may simply be a path (which is managed by the 'filesystem' package repository plugin), or a string in the form "type@location", where 'type' identifies the repository plugin type to use. Returns: `PackageRepository` instance.
juraj-google-style
def on_modified(self, event): self._logger.debug('Detected modify event on watched path: %s', event.src_path) self._process_event(event)
Function called everytime a new file is modified. Args: event: Event to process.
juraj-google-style
def _unschedule_sending_init_updates(self): LOG.debug('Un-scheduling sending of initial Non-RTC UPDATEs (init. UPDATEs already sent: %s)', self._sent_init_non_rtc_update) if self._rtc_eor_timer: self._rtc_eor_timer.stop() self._rtc_eor_timer = None return True return False
Un-schedules sending of initial updates Stops the timer if set for sending initial updates. Returns: - True if timer was stopped - False if timer was already stopped and nothing was done
codesearchnet
def CheckForBadCharacters(filename, lines, error): for (linenum, line) in enumerate(lines): if (u'�' in line): error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if ('\x00' in line): error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found.
codesearchnet
def _set_weights(instance, symbolic_weights, weight_values, name, skip_mismatch=False): for i, weight_value in enumerate(weight_values): expected_shape = symbolic_weights[i].shape received_shape = weight_value.shape if expected_shape != received_shape: if skip_mismatch: warnings.warn(f'Skipping loading weights for {name}due to mismatch in shape for weight {symbolic_weights[i].path}. Weight expects shape {expected_shape}. Received saved weight with shape {received_shape}', stacklevel=2) continue raise ValueError(f'Shape mismatch in {name}for weight {symbolic_weights[i].path}. Weight expects shape {expected_shape}. Received saved weight with shape {received_shape}') symbolic_weights[i].assign(weight_value) if hasattr(instance, 'finalize_state') and symbolic_weights: instance.finalize_state()
Safely set weights into a model or a layer. Args: instance: Model or layer instance, symbolic_weights: symbolic tensors representing the weights of the variables to load, weight_values: values of the weights to load, skip_mismatch: Boolean, whether to skip loading of weights where there is a mismatch in the shape of the weights, name: name used to identify the group. Raises: ValueError: in case of mismatch between provided model/layer and weights.
github-repos
def create_store(reducer, initial_state=None, enhancer=None): if enhancer is not None: if not hasattr(enhancer, '__call__'): raise TypeError('Expected the enhancer to be a function.') return enhancer(create_store)(reducer, initial_state) if not hasattr(reducer, '__call__'): raise TypeError('Expected the reducer to be a function.') current_reducer = [reducer] current_state = [initial_state] current_listeners = [[]] next_listeners = [current_listeners[0]] is_dispatching = [False] def ensure_can_mutate_next_listeners(): if next_listeners[0] == current_listeners[0]: next_listeners[0] = current_listeners[0][:] def get_state(): return current_state[0] def subscribe(listener): if not hasattr(listener, '__call__'): raise TypeError('Expected listener to be a function.') is_subscribed = [True] ensure_can_mutate_next_listeners() next_listeners[0].append(listener) def unsubcribe(): if not is_subscribed[0]: return is_subscribed[0] = False ensure_can_mutate_next_listeners() index = next_listeners[0].index(listener) next_listeners[0].pop(index) return unsubcribe def dispatch(action): if not isinstance(action, dict): raise TypeError('Actions must be a dict. ' 'Use custom middleware for async actions.') if action.get('type') is None: raise ValueError('Actions must have a non-None "type" property. ' 'Have you misspelled a constant?') if is_dispatching[0]: raise Exception('Reducers may not dispatch actions.') try: is_dispatching[0] = True current_state[0] = current_reducer[0](current_state[0], action) finally: is_dispatching[0] = False listeners = current_listeners[0] = next_listeners[0] for listener in listeners: listener() return action def replace_reducer(next_reducer): if not hasattr(next_reducer, '__call__'): raise TypeError('Expected next_reducer to be a function') current_reducer[0] = next_reducer dispatch({'type': ActionTypes.INIT}) dispatch({'type': ActionTypes.INIT}) return StoreDict( dispatch=dispatch, subscribe=subscribe, get_state=get_state, replace_reducer=replace_reducer, )
redux in a nutshell. observable has been omitted. Args: reducer: root reducer function for the state tree initial_state: optional initial state data enhancer: optional enhancer function for middleware etc. Returns: a Pydux store
juraj-google-style
def shape4d(a, data_format='NHWC'): s2d = shape2d(a) if get_data_format(data_format, False) == 'NHWC': return [1] + s2d + [1] else: return [1, 1] + s2d
Ensuer a 4D shape, to use with 4D symbolic functions. Args: a: a int or tuple/list of length 2 Returns: list: of length 4. if ``a`` is a int, return ``[1, a, a, 1]`` or ``[1, 1, a, a]`` depending on data_format.
juraj-google-style
def __init__(self, event_type, event_data): self.type = event_type self.data = event_data
Creates a new event. Args: event_type (int): the type of the event, see :class:`~bigchaindb.events.EventTypes` event_data (obj): the data of the event.
juraj-google-style
def set(self, name, valu): byts = s_msgpack.en(valu) lkey = self.pref + name.encode('utf8') self.slab.put(lkey, byts, db=self.db) self.info[name] = valu
Set a name in the SlabDict. Args: name (str): The key name. valu (obj): A msgpack compatible value. Returns: None
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids('.')] if self.padding_side == 'right': return cls + token_ids_0 + question_suffix + sep + token_ids_1 + sep else: return cls + token_ids_0 + sep + token_ids_1 + question_suffix + sep
Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special tokens. A Splinter sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences for question answering: `[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]` Args: token_ids_0 (`List[int]`): The question token IDs if pad_on_right, else context tokens IDs token_ids_1 (`List[int]`, *optional*): The context token IDs if pad_on_right, else question token IDs Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def pick_env_and_run_and_report(self, env: env_tools.PreparedEnv, env_py2: Optional[env_tools.PreparedEnv], verbose: bool, previous_failures: Set['Check']) -> CheckResult: env.report_status_to_github('pending', 'Running...', self.context()) chosen_env = cast(env_tools.PreparedEnv, (env_py2 if self.needs_python2_env() else env)) os.chdir(cast(str, chosen_env.destination_directory)) result = self.run(chosen_env, verbose, previous_failures) if (result.unexpected_error is not None): env.report_status_to_github('error', 'Unexpected error.', self.context()) else: env.report_status_to_github(('success' if result.success else 'failure'), result.message, self.context()) return result
Evaluates this check in python 3 or 2.7, and reports to github. If the prepared environments are not linked to a github repository, with a known access token, reporting to github is skipped. Args: env: A prepared python 3 environment. env_py2: A prepared python 2.7 environment. verbose: When set, more progress output is produced. previous_failures: Checks that have already run and failed. Returns: A CheckResult instance.
codesearchnet
def finish_connection(self, conn_or_internal_id, successful, failure_reason=None): data = { 'id': conn_or_internal_id, 'success': successful, 'failure_reason': failure_reason } action = ConnectionAction('finish_connection', data, sync=False) self._actions.put(action)
Finish a connection attempt Args: conn_or_internal_id (string, int): Either an integer connection id or a string internal_id successful (bool): Whether this connection attempt was successful failure_reason (string): If this connection attempt failed, an optional reason for the failure.
juraj-google-style
def DisplayTree(node, children, level=0): value = '' node_type = '' if 'caseValue' in node: case_value = node['caseValue'] node_type = case_value['ProductDimension.Type'] if node_type == 'ProductCanonicalCondition': value = (case_value['condition'] if 'condition' in case_value else 'OTHER') elif node_type == 'ProductBiddingCategory': value = '%s(%s)' % (case_value['type'], case_value['value'] if 'value' in case_value else 'OTHER') else: value = (case_value['value'] if 'value' in case_value else 'OTHER') print ('%sid: %s, node_type: %s, value: %s\n' % (' ' * level, node['id'], node_type, value)) for child_node in children[node['id']]: DisplayTree(child_node, children, level + 1)
Recursively display a node and each of its children. Args: node: The node we're displaying the children of. children: Children of the parent node. level: How deep in the tree we are.
juraj-google-style
def xfrange(start, stop, step=1, maxSize=(- 1)): if (start <= stop): (stop, step) = ((stop + 1), abs(step)) else: (stop, step) = ((stop - 1), (- abs(step))) if (maxSize >= 0): size = lenRange(start, stop, step) if (size > maxSize): raise exceptions.MaxSizeException(('Size %d > %s (MAX_FRAME_SIZE)' % (size, maxSize))) return (f for f in xrange(start, stop, step))
Returns a generator that yields the frames from start to stop, inclusive. In other words it adds or subtracts a frame, as necessary, to return the stop value as well, if the stepped range would touch that value. Args: start (int): stop (int): step (int): Note that the sign will be ignored maxSize (int): Returns: generator: Raises: :class:`fileseq.exceptions.MaxSizeException`: if size is exceeded
codesearchnet
def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = latents latents = self.input_latents_norm(latents) context = self.input_context_norm(context) latents, self_attn_weights, present_key_value = self.self_attn(latents=latents, context=context, attention_mask=attention_mask) latents = residual + latents residual = latents latents = self.post_attention_layernorm(latents) latents = self.mlp(latents) latents = residual + latents outputs = (latents,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
Args: latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
github-repos
def gemini_query(self, query_id): logger.debug("Looking for query with id {0}".format(query_id)) return self.query(GeminiQuery).filter_by(id=query_id).first()
Return a gemini query Args: name (str)
juraj-google-style
def _PromptUserForAPFSVolumeIdentifiers(self, volume_system, volume_identifiers): print_header = True while True: if print_header: self._PrintAPFSVolumeIdentifiersOverview(volume_system, volume_identifiers) print_header = False lines = self._textwrapper.wrap(self._USER_PROMPT_APFS) self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\nVolume identifiers: ') try: selected_volumes = self._ReadSelectedVolumes(volume_system, prefix='apfs') if ((not selected_volumes) or (not set(selected_volumes).difference(volume_identifiers))): break except ValueError: pass self._output_writer.Write('\n') lines = self._textwrapper.wrap('Unsupported volume identifier(s), please try again or abort with Ctrl^C.') self._output_writer.Write('\n'.join(lines)) self._output_writer.Write('\n\n') return selected_volumes
Prompts the user to provide APFS volume identifiers. Args: volume_system (dfvfs.APFSVolumeSystem): volume system. volume_identifiers (list[str]): volume identifiers including prefix. Returns: list[str]: selected volume identifiers including prefix or None.
codesearchnet
def variant(self, document_id, gene_panels=None, case_id=None): query = {} if case_id: query['case_id'] = case_id query['variant_id'] = document_id else: query['_id'] = document_id variant_obj = self.variant_collection.find_one(query) if variant_obj: variant_obj = self.add_gene_info(variant_obj, gene_panels) if (variant_obj['chromosome'] in ['X', 'Y']): variant_obj['is_par'] = is_par(variant_obj['chromosome'], variant_obj['position']) return variant_obj
Returns the specified variant. Arguments: document_id : A md5 key that represents the variant or "variant_id" gene_panels(List[GenePanel]) case_id (str): case id (will search with "variant_id") Returns: variant_object(Variant): A odm variant object
codesearchnet
def get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses): ret = [] pool = {int(k): v for k, v in pool.items()} seeded = {'seeded'+str(index): answer for index, answer in enumerate(seeded_answers)} merged_pool = seeded.keys() for key in pool: merged_pool += pool[key].keys() random.shuffle(merged_pool) student_id = get_student_item_dict()['student_id'] for student in merged_pool: if len(ret) >= num_responses: break elif student == student_id: continue if student.startswith('seeded'): option = seeded[student]['answer'] rationale = seeded[student]['rationale'] else: student_item = get_student_item_dict(student) submission = sas_api.get_answers_for_student(student_item) rationale = submission.get_rationale(0) option = submission.get_vote(0) ret.append({'option': option, 'rationale': rationale}) return {"answers": ret}
Get answers from others with random algorithm, which randomly select answer from the pool. Student may get three answers for option 1 or one answer for option 1 and two answers for option 2. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm
juraj-google-style
def distinct(self, selector=identity): if self.closed(): raise ValueError('Attempt to call distinct() on a closed Queryable.') if (not is_callable(selector)): raise TypeError('distinct() parameter selector={0} is not callable'.format(repr(selector))) return self._create(self._generate_distinct_result(selector))
Eliminate duplicate elements from a sequence. Note: This method uses deferred execution. Args: selector: An optional single argument function the result of which is the value compared for uniqueness against elements already consumed. If omitted, the element value itself is compared for uniqueness. Returns: Unique elements of the source sequence as determined by the selector function. Note that it is unprojected elements that are returned, even if a selector was provided. Raises: ValueError: If the Queryable is closed. TypeError: If the selector is not callable.
codesearchnet
def SetUsername(self, username): self._username = username logger.debug('Elasticsearch username: {0!s}'.format(username))
Sets the username. Args: username (str): username to authenticate with.
codesearchnet
def transform_to_mods_multimono(marc_xml, uuid, url): marc_xml = _read_content_or_path(marc_xml) transformed = xslt_transformation(marc_xml, _absolute_template_path('MARC21toMultiMonographTitle.xsl')) return _apply_postprocessing(marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_multi_mono, uuid=uuid, url=url)
Convert `marc_xml` to multimonograph MODS data format. Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings.
codesearchnet
def get_link(self, task_id): links = [x for x in self.links if (x.task_id == task_id)] if (len(links) != 1): raise CoTError('No single Link matches task_id {}!\n{}'.format(task_id, self.dependent_task_ids())) return links[0]
Get a ``LinkOfTrust`` by task id. Args: task_id (str): the task id to find. Returns: LinkOfTrust: the link matching the task id. Raises: CoTError: if no ``LinkOfTrust`` matches.
codesearchnet
def listen(self): logger.info(('Listening on port ' + str(self.listener.listen_port))) self.listener.listen()
Starts the client listener to listen for server responses. Args: None Returns: None
codesearchnet
def CompileReport(self, mediator): report_text = [ 'Sessionize plugin identified {0:d} sessions and ' 'applied {1:d} tags.'.format( len(self._events_per_session), self._number_of_event_tags)] for session, event_count in enumerate(self._events_per_session): report_text.append('\tSession {0:d}: {1:d} events'.format( session, event_count)) report_text = '\n'.join(report_text) return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: analysis report.
juraj-google-style
def __setattr__(self, name: str, value: Any) -> None: if name.startswith('_'): super().__setattr__(name, value) else: self[name] = value
Set attribute of this Dict. NOTE(daiyip): When setting attributes, public attributes (not started with '_') are set as dict fields, while private attributes (started with '_') are set on the object instance. Args: name: Name of attribute. value: Value of attribute.
github-repos
def get_height_rect(self, x: int, y: int, width: int, height: int, string: str) -> int: string_ = string.encode('utf-8') return int(lib.get_height_rect(self.console_c, x, y, width, height, string_, len(string_)))
Return the height of this text word-wrapped into this rectangle. Args: x (int): The x coordinate from the left. y (int): The y coordinate from the top. width (int): Maximum width to render the text. height (int): Maximum lines to render the text. string (str): A Unicode string. Returns: int: The number of lines of text once word-wrapped.
codesearchnet
def lock(self, key, client): self.key = key self.client = client
Set the key that will be used to ensure messages come from one party Args: key (string): The key used to validate future messages client (string): A string that will be returned to indicate who locked this device.
codesearchnet
def parse_from_string(string, version_type): if not re.search('[0-9]+\\.[0-9]+\\.[a-zA-Z0-9]+', string): raise RuntimeError('Invalid version string: %s' % string) major, minor, extension = string.split('.', 2) extension_split = extension.split('-', 1) patch = extension_split[0] if len(extension_split) == 2: identifier_string = '-' + extension_split[1] else: identifier_string = '' return Version(major, minor, patch, identifier_string, version_type)
Returns version object from Semver string. Args: string: version string version_type: version parameter Raises: RuntimeError: If the version string is not valid.
github-repos
def get_input_shapes_map(input_tensors): input_arrays = [tensor[0] for tensor in input_tensors] input_shapes_list = [] for _, shape, _ in input_tensors: dims = None if shape: dims = [dim.value for dim in shape.dims] input_shapes_list.append(dims) input_shapes = {name: shape for name, shape in zip(input_arrays, input_shapes_list) if shape} return input_shapes
Gets a map of input names to shapes. Args: input_tensors: List of input tensor tuples `(name, shape, type)`. Returns: {string : list of integers}.
github-repos
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if self._unique_identifier is not None: self._unique_identifier.write( local_stream, kmip_version=kmip_version ) if self._offset is not None: self._offset.write(local_stream, kmip_version=kmip_version) if self._template_attribute is not None: self._template_attribute.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(RekeyRequestPayload, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the Rekey request payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def load_identity_signer(key_dir, key_name): key_path = os.path.join(key_dir, '{}.priv'.format(key_name)) if not os.path.exists(key_path): raise LocalConfigurationError( "No such signing key file: {}".format(key_path)) if not os.access(key_path, os.R_OK): raise LocalConfigurationError( "Key file is not readable: {}".format(key_path)) LOGGER.info('Loading signing key: %s', key_path) try: with open(key_path, 'r') as key_file: private_key_str = key_file.read().strip() except IOError as e: raise LocalConfigurationError( "Could not load key file: {}".format(str(e))) try: private_key = Secp256k1PrivateKey.from_hex(private_key_str) except signing.ParseError as e: raise LocalConfigurationError( "Invalid key in file {}: {}".format(key_path, str(e))) context = signing.create_context('secp256k1') crypto_factory = CryptoFactory(context) return crypto_factory.new_signer(private_key)
Loads a private key from the key directory, based on a validator's identity. Args: key_dir (str): The path to the key directory. key_name (str): The name of the key to load. Returns: Signer: the cryptographic signer for the key
juraj-google-style
def add_gripper(self, arm_name, gripper): if arm_name in self.grippers: raise ValueError("Attempts to add multiple grippers to one body") arm_subtree = self.worldbody.find(". for actuator in gripper.actuator: if actuator.get("name") is None: raise XMLError("Actuator has no name") if not actuator.get("name").startswith("gripper"): raise XMLError( "Actuator name {} does not have prefix 'gripper'".format( actuator.get("name") ) ) for body in gripper.worldbody: arm_subtree.append(body) self.merge(gripper, merge_body=False) self.grippers[arm_name] = gripper
Mounts gripper to arm. Throws error if robot already has a gripper or gripper type is incorrect. Args: arm_name (str): name of arm mount gripper (MujocoGripper instance): gripper MJCF model
juraj-google-style
def clean_all(G, settings): quiet = settings["quiet"] recon = settings["recon"] sprint = settings["sprint"] error = settings["error"] all_outputs = [] for node in G.nodes(data=True): if "output" in node[1]: for item in get_all_outputs(node[1]): all_outputs.append(item) all_outputs.append(".shastore") retcode = 0 for item in sorted(all_outputs): if os.path.isfile(item): if recon: sprint("Would remove file: {}".format(item)) continue sprint("Attempting to remove file '{}'", level="verbose") try: os.remove(item) sprint("Removed file", level="verbose") except: errmes = "Error: file '{}' failed to be removed" error(errmes.format(item)) retcode = 1 if not retcode and not recon: sprint("All clean", color=True) return retcode
Removes all the output files from all targets. Takes the graph as the only argument Args: The networkx graph object The settings dictionary Returns: 0 if successful 1 if removing even one file failed
juraj-google-style
def DEFINE_integer(flag_name, default_value, docstring, required=False): _define_helper(flag_name, default_value, docstring, int, required)
Defines a flag of type 'int'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as an int. docstring: A helpful message explaining the use of the flag.
juraj-google-style
def umount(self, forced=True): if self.is_mounted(): if is_osx(): cmd = ['/usr/sbin/diskutil', 'unmount', self.connection['mount_point']] if forced: cmd.insert(2, 'force') subprocess.check_call(cmd) else: cmd = ['umount', self.connection['mount_point']] if forced: cmd.insert(1, '-f') subprocess.check_call(cmd)
Try to unmount our mount point. Defaults to using forced method. If OS is Linux, it will not delete the mount point. Args: forced: Bool whether to force the unmount. Default is True.
codesearchnet
def encode_bqm_as_qp(solver, linear, quadratic): active = active_qubits(linear, quadratic) nan = float('nan') lin = [uniform_get(linear, qubit, (0 if (qubit in active) else nan)) for qubit in solver._encoding_qubits] lin = base64.b64encode(struct.pack(('<' + ('d' * len(lin))), *lin)) quad = [(quadratic.get((q1, q2), 0) + quadratic.get((q2, q1), 0)) for (q1, q2) in solver._encoding_couplers if ((q1 in active) and (q2 in active))] quad = base64.b64encode(struct.pack(('<' + ('d' * len(quad))), *quad)) return {'format': 'qp', 'lin': lin.decode('utf-8'), 'quad': quad.decode('utf-8')}
Encode the binary quadratic problem for submission to a given solver, using the `qp` format for data. Args: solver (:class:`dwave.cloud.solver.Solver`): The solver used. linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: encoded submission dictionary
codesearchnet
def downsample(data, percent): n_genes = data.shape[0] n_cells = data.shape[1] new_data = data.copy() total_count = float(data.sum()) to_remove = total_count*percent cell_sums = data.sum(0).astype(float) cell_gene_probs = data/cell_sums cell_probs = np.array(cell_sums/total_count).flatten() cells_selected = np.random.multinomial(to_remove, pvals=cell_probs) for i, num_selected in enumerate(cells_selected): cell_gene = np.array(cell_gene_probs[:,i]).flatten() genes_selected = np.random.multinomial(num_selected, pvals=cell_gene) if sparse.issparse(data): genes_selected = sparse.csc_matrix(genes_selected).T new_data[:,i] -= genes_selected new_data[new_data < 0] = 0 return new_data
downsample the data by removing a given percentage of the reads. Args: data: genes x cells array or sparse matrix percent: float between 0 and 1
juraj-google-style
def _write_entries(self, stream, entries, converter, properties=None): def iter_entries(): for c in entries: entry = converter(c) if entry is None: continue if properties is not None: entry = OrderedDict( (key, value) for key, value in iteritems(entry) if key == 'id' or key in properties) yield entry self._dump(stream, list(iter_entries()))
Write iterable of entries as YAML object to stream. Args: stream: File-like object. entries: Iterable of entries. converter: Conversion function from entry to YAML object. properties: Set of compartment properties to output (or None to output all).
juraj-google-style
def _get_rules_from_aws(self): list_of_rules = list() if self.profile: boto3.setup_default_session(profile_name=self.profile) if self.region: ec2 = boto3.client('ec2', region_name=self.region) else: ec2 = boto3.client('ec2') security_groups = ec2.describe_security_groups(Filters=self.filters) for group in security_groups['SecurityGroups']: group_dict = dict() group_dict['id'] = group['GroupId'] group_dict['name'] = group['GroupName'] group_dict['description'] = group.get('Description', None) if (group.get('IpPermissions', None) or group.get('IpPermissionsEgress', None)): group_dict['rules'] = list() for rule in group.get('IpPermissions', None): rule_dict = self._build_rule(rule) rule_dict['direction'] = 'INGRESS' group_dict['rules'].append(rule_dict) for rule in group.get('IpPermissionsEgress', None): rule_dict = self._build_rule(rule) rule_dict['direction'] = 'EGRESS' group_dict['rules'].append(rule_dict) list_of_rules.append(group_dict) return list_of_rules
Load the EC2 security rules off AWS into a list of dict. Returns: list
codesearchnet
def __getitem__(self, item: Union[Timestamp, slice]): if isinstance(item, slice): if item.step: raise ValueError('Step not supported.') start = cast(Timestamp, item.start) stop = cast(Timestamp, item.stop) return self.query(time=start, duration=stop - start) return self.query(time=item, include_query_end_time=True)
Finds operations overlapping a given time or time slice. Args: item: Either a Timestamp or a slice containing start and stop Timestamps. Returns: The scheduled operations that occurs during the given time.
juraj-google-style
def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]]=None) -> DataLoader: if eval_dataset is None and self.eval_dataset is None: raise ValueError('Trainer: evaluation requires an eval_dataset.') dataloader_key = eval_dataset if isinstance(eval_dataset, str) else 'eval' if hasattr(self, '_eval_dataloaders') and dataloader_key in self._eval_dataloaders and self.args.dataloader_persistent_workers: return self.accelerator.prepare(self._eval_dataloaders[dataloader_key]) eval_dataset = self.eval_dataset[eval_dataset] if isinstance(eval_dataset, str) else eval_dataset if eval_dataset is not None else self.eval_dataset return self._get_dataloader(dataset=eval_dataset, description='Evaluation', batch_size=self.args.eval_batch_size, sampler_fn=self._get_eval_sampler, dataloader_key=dataloader_key)
Returns the evaluation [`~torch.utils.data.DataLoader`]. Subclass and override this method if you want to inject some custom behavior. Args: eval_dataset (`str` or `torch.utils.data.Dataset`, *optional*): If a `str`, will use `self.eval_dataset[eval_dataset]` as the evaluation dataset. If a `Dataset`, will override `self.eval_dataset` and must implement `__len__`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed.
github-repos
def process(self, element): return re.findall("[\\w\\']+", element, re.UNICODE)
Returns an iterator over the words of this element. The element is a line of text. If the line is blank, note that, too. Args: element: the element being processed Returns: The processed element.
github-repos
def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: residual = hidden_states hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, _ = self.decomp1(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states, _ = self.decomp2(hidden_states) hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache'])
Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache.
github-repos
def remove_phenotype(self, institute, case, user, link, phenotype_id, is_group=False): LOG.info("Removing HPO term from case {0}".format(case['display_name'])) if is_group: updated_case = self.case_collection.find_one_and_update( {'_id': case['_id']}, { '$pull': { 'phenotype_terms': {'phenotype_id': phenotype_id}, 'phenotype_groups': {'phenotype_id': phenotype_id}, }, }, return_document=pymongo.ReturnDocument.AFTER ) else: updated_case = self.case_collection.find_one_and_update( {'_id': case['_id']}, { '$pull': { 'phenotype_terms': {'phenotype_id': phenotype_id}, }, }, return_document=pymongo.ReturnDocument.AFTER ) LOG.info("Creating event for removing phenotype term {0}" \ " from case {1}".format(phenotype_id, case['display_name'])) self.create_event( institute=institute, case=case, user=user, link=link, category='case', verb='remove_phenotype', subject=case['display_name'] ) LOG.debug("Case updated") return updated_case
Remove an existing phenotype from a case Args: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (dict): The url to be used in the event phenotype_id (str): A phenotype id Returns: updated_case(dict)
juraj-google-style
def makedir(self, dir_name, mode=PERM_DEF): dir_name = make_string_path(dir_name) ends_with_sep = self.ends_with_path_separator(dir_name) dir_name = self._path_without_trailing_separators(dir_name) if not dir_name: self.raise_os_error(errno.ENOENT, '') if self.is_windows_fs: dir_name = self.absnormpath(dir_name) parent_dir, _ = self.splitpath(dir_name) if parent_dir: base_dir = self.normpath(parent_dir) ellipsis = self._matching_string( parent_dir, self.path_separator + '..') if parent_dir.endswith(ellipsis) and not self.is_windows_fs: base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis) if not self.exists(base_dir): self.raise_os_error(errno.ENOENT, base_dir) dir_name = self.absnormpath(dir_name) if self.exists(dir_name, check_link=True): if self.is_windows_fs and dir_name == self.path_separator: error_nr = errno.EACCES else: error_nr = errno.EEXIST if ends_with_sep and self.is_macos and not self.exists(dir_name): self.remove_object(dir_name) else: self.raise_os_error(error_nr, dir_name) head, tail = self.splitpath(dir_name) self.add_object( head, FakeDirectory(tail, mode & ~self.umask, filesystem=self))
Create a leaf Fake directory. Args: dir_name: (str) Name of directory to create. Relative paths are assumed to be relative to '/'. mode: (int) Mode to create directory with. This argument defaults to 0o777. The umask is applied to this mode. Raises: OSError: if the directory name is invalid or parent directory is read only or as per :py:meth:`add_object`.
juraj-google-style
def InjectString(self, codestring, wait_for_completion=True): if (self.inferior.is_running and self.inferior.gdb.IsAttached()): try: self.inferior.gdb.InjectString(self.inferior.position, codestring, wait_for_completion=wait_for_completion) except RuntimeError: (exc_type, exc_value, exc_traceback) = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback) else: logging.error('Not attached to any process.')
Try to inject python code into current thread. Args: codestring: Python snippet to execute in inferior. (may contain newlines) wait_for_completion: Block until execution of snippet has completed.
codesearchnet
def NetshStaticIp(interface, ip=u'127.0.0.9', subnet=u'255.255.255.255', gw=u'127.0.0.1'): args = [ '/c', 'netsh', 'interface', 'ip', 'set', 'address', interface, 'static', ip, subnet, gw, '1' ] res = client_utils_common.Execute( 'cmd', args, time_limit=-1, bypass_whitelist=True) return res
Changes interface to a staticly set IP. Sets IP configs to local if no paramaters passed. Args: interface: Name of the interface. ip: IP address. subnet: Subnet mask. gw: IP address of the default gateway. Returns: A tuple of stdout, stderr, exit_status.
juraj-google-style
def check_satpy(readers=None, writers=None, extras=None): from satpy.readers import configs_for_reader from satpy.writers import configs_for_writer print('Readers') print('=======') for reader, res in sorted(check_yaml_configs(configs_for_reader(reader=readers), 'reader').items()): print(reader + ': ', res) print() print('Writers') print('=======') for writer, res in sorted(check_yaml_configs(configs_for_writer(writer=writers), 'writer').items()): print(writer + ': ', res) print() print('Extras') print('======') module_names = extras if extras is not None else ('cartopy', 'geoviews') for module_name, res in sorted(_check_import(module_names).items()): print(module_name + ': ', res) print()
Check the satpy readers and writers for correct installation. Args: readers (list or None): Limit readers checked to those specified writers (list or None): Limit writers checked to those specified extras (list or None): Limit extras checked to those specified Returns: bool True if all specified features were successfully loaded.
juraj-google-style
def _remove(self, removeList, selfValue): for removeValue in removeList: print(removeValue, removeList) removeEverything(removeValue, selfValue)
Remove elements from a list by matching the elements in the other list. This method only looks inside current instance's value, not recursive. There is no need for a recursive one anyway. Match by == operation. Args: removeList (list): The list of matching elements. selfValue (list): The list you remove value from. Usually ``self.value``
codesearchnet
def clone_with_git(repo_uri, dest_path): log.info('Cloning git repo %s to %s', repo_uri, dest_path) git.Repo.clone_from(repo_uri, dest_path, depth=1)
Create a clone by cloning a git repository. Args: repo_uri: The URI of the git repository to clone. dest_path: The location to clone to.
juraj-google-style
def makesubatoffset(self, bitoffset, *, _offsetideal=None): if (_offsetideal is None): _offsetideal = bitoffset if (bitoffset is 0): return self newpromise = TDOPromiseCollection(self._chain) for promise in self._promises: newpromise.add(promise, bitoffset, _offsetideal=_offsetideal) return newpromise
Create a copy of this PromiseCollection with an offset applied to each contained promise and register each with their parent. If this promise's primitive is being merged with another primitive, a new subpromise may be required to keep track of the new offset of data coming from the new primitive. Args: bitoffset: An integer offset of the data in the new primitive. _offsetideal: An integer offset to use if the associated primitive supports arbitrary TDO control. Returns: A new TDOPromiseCollection registered with this promise collection, and with the correct offset.
codesearchnet
def view(self, vleaf, fpath=None, cleanup=True, format=None): graph = self.create_graphviz_digraph(vleaf, format=format) graph.view(fpath, cleanup=cleanup)
View the graph. Args: vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt. fpath (`str`): The file path used to save. cleanup (`bool`): Clean up the source file after rendering. Default is True. format (str): Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration.
codesearchnet
def fill(self, name_or_slot, value): if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) elif isinstance(name_or_slot, Slot): slot = name_or_slot else: raise UnexpectedPipelineError(('Could not fill invalid output name: %r' % name_or_slot)) if (not slot._exists): raise SlotNotDeclaredError(('Cannot fill output with name "%s" that was just declared within the Pipeline context.' % slot.name)) self._context.fill_slot(self._pipeline_key, slot, value)
Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time.
codesearchnet
def check_version_info(redis_client): redis_reply = redis_client.get("VERSION_INFO") if redis_reply is None: return true_version_info = tuple(json.loads(ray.utils.decode(redis_reply))) version_info = _compute_version_info() if version_info != true_version_info: node_ip_address = ray.services.get_node_ip_address() error_message = ("Version mismatch: The cluster was started with:\n" " Ray: " + true_version_info[0] + "\n" " Python: " + true_version_info[1] + "\n" " Pyarrow: " + str(true_version_info[2]) + "\n" "This process on node " + node_ip_address + " was started with:" + "\n" " Ray: " + version_info[0] + "\n" " Python: " + version_info[1] + "\n" " Pyarrow: " + str(version_info[2])) if version_info[:2] != true_version_info[:2]: raise Exception(error_message) else: logger.warning(error_message)
Check if various version info of this process is correct. This will be used to detect if workers or drivers are started using different versions of Python, pyarrow, or Ray. If the version information is not present in Redis, then no check is done. Args: redis_client: A client for the primary Redis shard. Raises: Exception: An exception is raised if there is a version mismatch.
juraj-google-style
def create_cloudwatch_log_event(app_name, env, region, rules): session = boto3.Session(profile_name=env, region_name=region) cloudwatch_client = session.client('logs') log_group = rules.get('log_group') filter_name = rules.get('filter_name') filter_pattern = rules.get('filter_pattern') if not log_group: LOG.critical('Log group is required and no "log_group" is defined!') raise InvalidEventConfiguration('Log group is required and no "log_group" is defined!') if not filter_name: LOG.critical('Filter name is required and no filter_name is defined!') raise InvalidEventConfiguration('Filter name is required and no filter_name is defined!') if filter_pattern is None: LOG.critical('Filter pattern is required and no filter_pattern is defined!') raise InvalidEventConfiguration('Filter pattern is required and no filter_pattern is defined!') lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region) statement_id = '{}_cloudwatchlog_{}'.format(app_name, filter_name.replace(" ", "_")) principal = 'logs.{}.amazonaws.com'.format(region) account_id = get_env_credential(env=env)['accountId'] source_arn = "arn:aws:logs:{0}:{1}:log-group:{2}:*".format(region, account_id, log_group) add_lambda_permissions( function=lambda_alias_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=source_arn, env=env, region=region) cloudwatch_client.put_subscription_filter( logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn) LOG.info("Created Cloudwatch log event with filter: %s", filter_pattern)
Create cloudwatch log event for lambda from rules. Args: app_name (str): name of the lambda function env (str): Environment/Account for lambda function region (str): AWS region of the lambda function rules (str): Trigger rules from the settings
juraj-google-style
def list_load_balancers(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/loadBalancers?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
List the load balancers in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of load balancer list with properties.
juraj-google-style
def grep_projects(tofind_list, user_profile=None, verbose=True, new=False, **kwargs): import utool as ut user_profile = ensure_user_profile(user_profile) print('user_profile = {!r}'.format(user_profile)) kwargs = kwargs.copy() colored = kwargs.pop('colored', True) grepkw = {} grepkw['greater_exclude_dirs'] = user_profile.project_exclude_dirs grepkw['exclude_dirs'] = user_profile.project_exclude_dirs grepkw['dpath_list'] = user_profile.project_dpaths grepkw['include_patterns'] = user_profile.project_include_patterns grepkw['exclude_patterns'] = user_profile.project_exclude_patterns grepkw.update(kwargs) msg_list1 = [] msg_list2 = [] print_ = msg_list1.append print_('Greping Projects') print_(('tofind_list = %s' % (ut.repr4(tofind_list, nl=True),))) if verbose: print('\n'.join(msg_list1)) grep_result = ut.grep(tofind_list, **grepkw) (found_fpath_list, found_lines_list, found_lxs_list) = grep_result reflags = grepkw.get('reflags', 0) _exprs_flags = [ut.extend_regex2(expr, reflags) for expr in tofind_list] extended_regex_list = ut.take_column(_exprs_flags, 0) reflags_list = ut.take_column(_exprs_flags, 1) reflags = reflags_list[0] resultstr = ut.make_grep_resultstr(grep_result, extended_regex_list, reflags, colored=colored) msg_list2.append(resultstr) print_ = msg_list2.append print_('====================') print_(('found_fpath_list = ' + ut.repr4(found_fpath_list))) print_('') if verbose: print('\n'.join(msg_list2)) msg_list = (msg_list1 + msg_list2) if new: return GrepResult(found_fpath_list, found_lines_list, found_lxs_list, extended_regex_list, reflags) else: return msg_list
r""" Greps the projects defined in the current UserProfile Args: tofind_list (list): user_profile (None): (default = None) Kwargs: user_profile CommandLine: python -m utool --tf grep_projects grep_projects Example: >>> # DISABLE_DOCTEST >>> from utool.util_project import * # NOQA >>> import utool as ut >>> import sys >>> tofind_list = ut.get_argval('--find', type_=list, >>> default=[sys.argv[-1]]) >>> grep_projects(tofind_list)
codesearchnet
def build_info(self): if self.is_bootloader: self.log.error('Device is in fastboot mode, could not get build info.') return info = {} info['build_id'] = self.adb.getprop('ro.build.id') info['build_type'] = self.adb.getprop('ro.build.type') return info
Get the build info of this Android device, including build id and build type. This is not available if the device is in bootloader mode. Returns: A dict with the build info of this Android device, or None if the device is in bootloader mode.
codesearchnet
def set_nsxcontroller_ip(self, **kwargs): name = kwargs.pop('name') ip_addr = str((kwargs.pop('ip_addr', None))) nsxipaddress = ip_interface(unicode(ip_addr)) if nsxipaddress.version != 4: raise ValueError('NSX Controller ip must be IPV4') ip_args = dict(name=name, address=ip_addr) method_name = 'nsx_controller_connection_addr_address' method_class = self._brocade_tunnels nsxcontroller_attr = getattr(method_class, method_name) config = nsxcontroller_attr(**ip_args) output = self._callback(config) return output
Set nsx-controller IP Args: IP (str): IPV4 address. callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
juraj-google-style
def run_amylpred2(self, seq, outdir, run_amylmuts=False): outdir_amylpred = op.join(outdir, 'AMYLPRED2_results') if not op.exists(outdir_amylpred): os.mkdir(outdir_amylpred) url = "http: cj = CookieJar() opener = build_opener(HTTPCookieProcessor(cj)) formdata = {"email": self.email, "password": self.password} data_encoded = urlencode(formdata) data_encoded = data_encoded.encode('ASCII') response = opener.open(url, data_encoded) methods = ['AGGRESCAN', 'NETCSSP', 'PAFIG', 'APD', 'AMYLPATTERN', 'SECSTR', 'BSC', 'WALTZ', 'CONFENERGY', 'TANGO'] if run_amylmuts: methods.append('AMYLMUTS') output = {} timeCounts = 0 for met in methods: existing_results = glob.glob(op.join(outdir_amylpred, '*_{}.txt'.format(met))) if existing_results: results_file = existing_results[0] else: values = {'seq_data': seq, 'method': met} data = urlencode(values) data = data.encode('ASCII') url_input = "http: response = opener.open(url_input, data) result = str(response.read()) ind = str.find(result, 'Job ID') result2 = result[ind:ind + 50] ind1 = str.find(result2, ':') ind2 = str.find(result2, '<BR>') job_id = result2[ind1 + 2:ind2] url_result = 'http: print(url_result) print("Waiting for %s results" % met, end='.') while True: result = urlopen(url_result).read() if not result: time.sleep(1) timeCounts += 1 print('.', end='') else: response = requests.get(url_result) break results_file = op.join(outdir_amylpred, "{}_{}.txt".format(url_result.split('/')[-1].strip('.txt'), met)) with open(results_file, "wb") as handle: for data in response.iter_content(): handle.write(data) print("") method, hits = self.parse_method_results(results_file, met) output[met] = hits if timeCounts != 0: print("Time spent: %d seconds" % timeCounts) return output
Run all methods on the AMYLPRED2 web server for an amino acid sequence and gather results. Result files are cached in ``/path/to/outdir/AMYLPRED2_results``. Args: seq (str): Amino acid sequence as a string outdir (str): Directory to where output files should be saved run_amylmuts (bool): If AMYLMUTS method should be run, default False Returns: dict: Result for each method run
juraj-google-style
def tensor_rank(self, name='tensor_rank'): with self._name_scope(name): return self.shape.ndims
Rank (in the sense of tensors) of matrix corresponding to this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`. Args: name: A name for this `Op`. Returns: Python integer, or None if the tensor rank is undefined.
github-repos
def open_workshared_model(self, model_path, central=False, detached=False, keep_worksets=True, audit=False, show_workset_config=1): if detached: if audit: if keep_worksets: self._add_entry(templates.CENTRAL_OPEN_DETACH_AUDIT.format(model_path=model_path, workset_config=show_workset_config)) else: self._add_entry(templates.CENTRAL_OPEN_DETACH_AUDIT_DISCARD.format(model_path=model_path, workset_config=show_workset_config)) elif keep_worksets: self._add_entry(templates.CENTRAL_OPEN_DETACH.format(model_path=model_path, workset_config=show_workset_config)) else: self._add_entry(templates.CENTRAL_OPEN_DETACH_DISCARD.format(model_path=model_path, workset_config=show_workset_config)) elif central: if audit: self._add_entry(templates.CENTRAL_OPEN_AUDIT.format(model_path=model_path, workset_config=show_workset_config)) else: self._add_entry(templates.CENTRAL_OPEN.format(model_path=model_path, workset_config=show_workset_config)) elif audit: self._add_entry(templates.WORKSHARED_OPEN_AUDIT.format(model_path=model_path, workset_config=show_workset_config)) else: self._add_entry(templates.WORKSHARED_OPEN.format(model_path=model_path, workset_config=show_workset_config))
Append a open workshared model entry to the journal. This instructs Revit to open a workshared model. Args: model_path (str): full path to workshared model central (bool): if True opens central model and not local detached (bool): if True opens a detached model keep_worksets (bool): if True keeps worksets when detaching audit (bool): if True audits the model when opening
codesearchnet
def SetCTypesForLibrary(libname, fn_table): libpath = ctypes.util.find_library(libname) if not libpath: raise ErrorLibNotFound('Library %s not found' % libname) lib = ctypes.cdll.LoadLibrary(libpath) for (function, args, result) in fn_table: f = getattr(lib, function) f.argtypes = args f.restype = result return lib
Set function argument types and return types for an ObjC library. Args: libname: Library name string fn_table: List of (function, [arg types], return types) tuples Returns: ctypes.CDLL with types set according to fn_table Raises: ErrorLibNotFound: Can't find specified lib
juraj-google-style
def _MergeEntities(self, a, b): if a.shape_id != b.shape_id: raise MergeError('shape_id must be the same') distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2], b.points[0][:2]), ApproximateDistanceBetweenPoints(a.points[-1][:2], b.points[-1][:2])) if distance > self.largest_shape_distance: raise MergeError('The shape endpoints are too far away: %.1fm ' '(largest_shape_distance is %.1fm)' % (distance, self.largest_shape_distance)) return self._Migrate(b, self.feed_merger.b_schedule, False)
Merges the shapes by taking the new shape. Args: a: The first transitfeed.Shape instance. b: The second transitfeed.Shape instance. Returns: The merged shape. Raises: MergeError: If the ids are different or if the endpoints are further than largest_shape_distance apart.
juraj-google-style
def add_tools(self, *tools): for tool in tools: if not isinstance(tool, Tool): raise ValueError("All arguments to add_tool must be Tool subclasses.") self.toolbar.tools.append(tool)
Adds tools to the plot. Args: *tools (Tool) : the tools to add to the Plot Returns: None
juraj-google-style
def get_snapshot_by(self, volume_id_or_uri, field, value): uri = self.__build_volume_snapshot_uri(volume_id_or_uri) return self._client.get_by(field, value, uri=uri)
Gets all snapshots that match the filter. The search is case-insensitive. Args: volume_id_or_uri: Can be either the volume id or the volume uri. field: Field name to filter. value: Value to filter. Returns: list: Snapshots
codesearchnet
def get_file_size(file_object): position = file_object.tell() file_object.seek(0, 2) file_size = file_object.tell() file_object.seek(position, 0) return file_size
Returns the size, in bytes, of a file. Expects an object that supports seek and tell methods. Args: file_object (file_object) - The object that represents the file Returns: (int): size of the file, in bytes
codesearchnet
class BlipEncoder(nn.Module): def __init__(self, config: BlipConfig): super().__init__() self.config = config self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`BlipEncoderLayer`]. Args: config (`BlipConfig`): The corresponding vision configuration for the `BlipEncoder`.
github-repos
def add_controller(self, controller, timeout=None): assert controller.mri not in self._controllers, \ "Controller already exists for %s" % controller.mri self._controllers[controller.mri] = controller controller.setup(self) if self.state: should_publish = self._start_controllers([controller], timeout) if self.state == STARTED and should_publish: self._publish_controllers(timeout)
Add a controller to be hosted by this process Args: controller (Controller): Its controller timeout (float): Maximum amount of time to wait for each spawned object. None means forever
juraj-google-style
def GetCoinAssets(self): assets = set() for coin in self.GetCoins(): assets.add(coin.Output.AssetId) return list(assets)
Get asset ids of all coins present in the wallet. Returns: list: of UInt256 asset id's.
codesearchnet
def forward(self, input_ids: torch.Tensor, cache_position: torch.Tensor): _, seqlen = input_ids.shape position_ids = cache_position.unsqueeze(0) past_key_values = self.static_cache outs = self.model(input_ids=input_ids, attention_mask=None, position_ids=position_ids, cache_position=cache_position, past_key_values=past_key_values, use_cache=True) return outs.logits
Forward pass of the module, which is compatible with the ExecuTorch runtime. Args: input_ids (`torch.Tensor`): Tensor representing current input token id to the module. cache_position (`torch.Tensor`): Tensor representing current input position in the cache. Returns: torch.Tensor: Logits output from the model. This forward adapter serves two primary purposes: 1. **Making the Model `torch.export`-Compatible**: The adapter hides unsupported objects, such as the `Cache`, from the graph inputs and outputs, enabling the model to be exportable using `torch.export` without encountering issues. 2. **Ensuring Compatibility with `ExecuTorch` runtime**: The adapter matches the model's forward signature with that in `executorch/extension/llm/runner`, ensuring that the exported model can be executed in `ExecuTorch` out-of-the-box.
github-repos
def APFSContainerPathSpecGetVolumeIndex(path_spec): volume_index = getattr(path_spec, 'volume_index', None) if volume_index is not None: return volume_index location = getattr(path_spec, 'location', None) if location is None or not location.startswith('/apfs'): return None try: volume_index = int(location[5:], 10) - 1 except (TypeError, ValueError): volume_index = None if volume_index is None or volume_index < 0 or volume_index > 99: volume_index = None return volume_index
Retrieves the volume index from the path specification. Args: path_spec (PathSpec): path specification. Returns: int: volume index or None if the index cannot be determined.
juraj-google-style
def remove_attribute(self, attribute: str) -> None: attr_index = self.__attr_index(attribute) if attr_index is not None: self.yaml_node.value.pop(attr_index)
Remove an attribute from the node. Use only if is_mapping() returns True. Args: attribute: The name of the attribute to remove.
juraj-google-style
def forward(self, hidden_state): projected_latents = self.in_proj(hidden_state) quantized_representation, audio_codes = self.decode_latents(projected_latents) commitment_loss = F.mse_loss(projected_latents, quantized_representation.detach(), reduction='mean') codebook_loss = F.mse_loss(quantized_representation, projected_latents.detach(), reduction='mean') quantized_representation = projected_latents + (quantized_representation - projected_latents).detach() quantized_representation = self.out_proj(quantized_representation) return (quantized_representation, commitment_loss, codebook_loss, audio_codes, projected_latents)
Quantizes the input tensor using a fixed codebook and returns the corresponding codebook vectors. Args: hidden_state (`torch.FloatTensor` of shape `(batch_size, dimension, time_steps)`): Input tensor. Returns: quantized_representation (`torch.Tensor`of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. commitment_loss (`torch.FloatTensor`of shape `(1)`): Commitment loss to train encoder to predict vectors closer to codebook entries. codebook_loss (`torch.FloatTensor`of shape `(1)`): Codebook loss to update the codebook. audio_codes (`torch.LongTensor` of shape `(batch_size, time_steps)`): Codebook indices for each codebook, quantized discrete representation of input. projected_latents (torch.FloatTensor of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization).
github-repos
def plot_points(points, lattice=None, coords_are_cartesian=False, fold=False, ax=None, **kwargs): (ax, fig, plt) = get_ax3d_fig_plt(ax) if ('color' not in kwargs): kwargs['color'] = 'b' if (((not coords_are_cartesian) or fold) and (lattice is None)): raise ValueError('coords_are_cartesian False or fold True require the lattice') for p in points: if fold: p = fold_point(p, lattice, coords_are_cartesian=coords_are_cartesian) elif (not coords_are_cartesian): p = lattice.get_cartesian_coords(p) ax.scatter(*p, **kwargs) return (fig, ax)
Adds Points to a matplotlib Axes Args: points: list of coordinates lattice: Lattice object used to convert from reciprocal to cartesian coordinates coords_are_cartesian: Set to True if you are providing coordinates in cartesian coordinates. Defaults to False. Requires lattice if False. fold: whether the points should be folded inside the first Brillouin Zone. Defaults to False. Requires lattice if True. ax: matplotlib :class:`Axes` or None if a new figure should be created. kwargs: kwargs passed to the matplotlib function 'scatter'. Color defaults to blue Returns: matplotlib figure and matplotlib ax
codesearchnet
def _config_for_enable_caching_device(rnn_cell): default_enable_caching_device = ops.executing_eagerly_outside_functions() if rnn_cell._enable_caching_device != default_enable_caching_device: return {'enable_caching_device': rnn_cell._enable_caching_device} return {}
Return the dict config for RNN cell wrt to enable_caching_device field. Since enable_caching_device is a internal implementation detail for speed up the RNN variable read when running on the multi remote worker setting, we don't want this config to be serialized constantly in the JSON. We will only serialize this field when a none default value is used to create the cell. Args: rnn_cell: the RNN cell for serialize. Returns: A dict which contains the JSON config for enable_caching_device value or empty dict if the enable_caching_device value is same as the default value.
github-repos
def _pull_out_unaffected_blocks_lhs(lhs, rest, out_port, in_port): _, block_index = lhs.index_in_block(out_port) bs = lhs.block_structure nbefore, nblock, nafter = (sum(bs[:block_index]), bs[block_index], sum(bs[block_index + 1:])) before, block, after = lhs.get_blocks((nbefore, nblock, nafter)) if before != cid(nbefore) or after != cid(nafter): outer_lhs = before + cid(nblock - 1) + after inner_lhs = cid(nbefore) + block + cid(nafter) return outer_lhs << Feedback.create( SeriesProduct.create(inner_lhs, *rest), out_port=out_port, in_port=in_port) elif block == cid(nblock): outer_lhs = before + cid(nblock - 1) + after return outer_lhs << Feedback.create( SeriesProduct.create(*rest), out_port=out_port, in_port=in_port) raise CannotSimplify()
In a self-Feedback of a series product, where the left-most operand is reducible, pull all non-trivial blocks outside of the feedback. Args: lhs (Circuit): The reducible circuit rest (tuple): The other SeriesProduct operands out_port (int): The feedback output port index in_port (int): The feedback input port index Returns: Circuit: The simplified circuit
juraj-google-style
def get_most_unrolled_urls(tweet): unrolled_urls = [] for url in get_tweet_links(tweet): if url.get("unwound", {"url": None}).get("url", None) is not None: unrolled_urls.append(url["unwound"]["url"]) elif url.get("expanded_url", None) is not None: unrolled_urls.append(url["expanded_url"]) else: unrolled_urls.append(url["url"]) return unrolled_urls
For each url included in the Tweet "urls", get the most unrolled version available. Only return 1 url string per url in tweet.tweet_links In order of preference for "most unrolled" (keys from the dict at tweet.tweet_links): \n 1. `unwound`/`url` \n 2. `expanded_url` \n 3. `url` Args: tweet (Tweet): A Tweet object or dict Returns: list (list of strings): a list of the most unrolled url available
juraj-google-style
def StartService(service_name): try: win32serviceutil.StartService(service_name) logging.info("Service '%s' started.", service_name) except pywintypes.error as e: if (getattr(e, 'winerror', None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST): logging.debug("Tried to start '%s', but the service is not installed.", service_name) else: logging.exception("Encountered error trying to start '%s':", service_name)
Start a Windows service with the given name. Args: service_name: string The name of the service to be started.
codesearchnet
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)): return Kpoints('Automatic kpoint scheme', 0, Kpoints.supported_modes.Monkhorst, kpts=[kpts], kpts_shift=shift)
Convenient static constructor for an automatic Monkhorst pack Kpoint grid. Args: kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors. Defaults to (2,2,2) shift: Shift to be applied to the kpoints. Defaults to (0,0,0). Returns: Kpoints object
codesearchnet
def plot(self, **plot_kwargs: Any) -> None: fig = plt.figure() plt.plot(self._num_cfds_seq, self._gnd_state_probs, 'ro-', figure=fig, **plot_kwargs) plt.xlabel('Number of Cliffords', figure=fig) plt.ylabel('Ground State Probability', figure=fig) fig.show()
Plots the average ground state probability vs the number of Cliffords in the RB study. Args: **plot_kwargs: Arguments to be passed to matplotlib.pyplot.plot.
codesearchnet
def _strip_unnecessary_contents_from_stack(result, processed): if isinstance(result, (PrettyTensor, Loss)): if result.is_sequence(): for tensor in result.sequence: _strip_unnecessary_contents_from_stack(tensor, processed) return else: result = result.tensor if hasattr(result, 'op'): result = result.op if result in processed: return else: processed.add(result) trace = [] found = False for f, line_no, method, _ in result._traceback: if (method in ('_replace_deferred', '_construct') and f.endswith('pretty_tensor_class.py')): found = True continue trace.append((f, line_no, method, {})) result._traceback = trace if not found: return for inp in result.inputs: _strip_unnecessary_contents_from_stack(inp, processed)
Remove the distracting lines from the stored tracebacks. This also reduces memory overhead by removing the frame contents. This is very important when doing long unrolls. Args: result: The result to process. processed: A set of already processed nodes, used to stop early.
juraj-google-style
def color_lerp( c1: Tuple[int, int, int], c2: Tuple[int, int, int], a: float ) -> Color: return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a))
Return the linear interpolation between two colors. ``a`` is the interpolation value, with 0 returing ``c1``, 1 returning ``c2``, and 0.5 returing a color halfway between both. Args: c1 (Union[Tuple[int, int, int], Sequence[int]]): The first color. At a=0. c2 (Union[Tuple[int, int, int], Sequence[int]]): The second color. At a=1. a (float): The interpolation value, Returns: Color: The interpolated Color.
juraj-google-style
def group_pairs(pair_list): groupid_to_items = defaultdict(list) for item, groupid in pair_list: groupid_to_items[groupid].append(item) return groupid_to_items
Groups a list of items using the first element in each pair as the item and the second element as the groupid. Args: pair_list (list): list of 2-tuples (item, groupid) Returns: dict: groupid_to_items: maps a groupid to a list of items SeeAlso: group_items
juraj-google-style
def update(self, **kwargs): kwargs = {k: (np.array(v) if isinstance(v, (int, float)) else v) for (k, v) in kwargs.items()} self.args.update(kwargs)
Update the model arguments with additional arguments. Args: kwargs (dict): Optional keyword arguments to add to prior args.
codesearchnet
def command_factory(command): def communicate(body={}, root_dir=None): client = connect_socket(root_dir) body['mode'] = command if 'func' in body: del body['func'] data_string = pickle.dumps(body, -1) client.send(data_string) response = receive_data(client) return response return communicate
A factory which returns functions for direct daemon communication. This factory will create a function which sends a payload to the daemon and returns the unpickled object which is returned by the daemon. Args: command (string): The type of payload this should be. This determines as what kind of instruction this will be interpreted by the daemon. Returns: function: The created function.
juraj-google-style
def validated_value(self, raw_value): value = self.value(raw_value) try: for validator in self.validators: validator(value) except: raise else: return value
Return parsed parameter value and run validation handlers. Error message included in exception will be included in http error response Args: value: raw parameter value to parse validate Returns: None Note: Concept of validation for params is understood here as a process of checking if data of valid type (successfully parsed/processed by ``.value()`` handler) does meet some other constraints (lenght, bounds, uniqueness, etc.). It will internally call its ``value()`` handler.
codesearchnet
def transform_verbosity(self, description, use_verbose_format): if use_verbose_format is False: description = description.replace( _(", every minute"), '') description = description.replace(_(", every hour"), '') description = description.replace(_(", every day"), '') return description
Transforms the verbosity of the expression description by stripping verbosity from original description Args: description: The description to transform use_verbose_format: If True, will leave description as it, if False, will strip verbose parts second_expression: Seconds part Returns: The transformed description with proper verbosity
juraj-google-style