code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def send(query, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, ttl=DEFAULT_TTL, local_only=False, timeout_s=2): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) if local_only: sock.setsockopt( socket.IPPROTO_IP, socket.IP_MULTICAST_IF, struct.pack('!L', LOCALHOST_ADDRESS)) sock.settimeout(timeout_s) sock.sendto(query.encode('utf-8'), (address, port)) recv_queue = queue.Queue() def _handle_responses(): while True: try: data, address = sock.recvfrom(MAX_MESSAGE_BYTES) data = data.decode('utf-8') except socket.timeout: recv_queue.put(None) break else: _LOG.debug('Multicast response to query "%s": %s:%s', query, address[0], data) recv_queue.put((address[0], str(data))) response_thread = threading.Thread(target=_handle_responses) response_thread.start() while response_thread.is_alive(): recv_tuple = recv_queue.get() if not recv_tuple: break yield recv_tuple response_thread.join()
Sends a query to the given multicast socket and returns responses. Args: query: The string query to send. address: Multicast IP address component of the socket to send to. port: Multicast UDP port component of the socket to send to. ttl: TTL for multicast messages. 1 to keep traffic in-network. timeout_s: Seconds to wait for responses. Returns: A set of all responses that arrived before the timeout expired. Responses are tuples of (sender_address, message).
juraj-google-style
def get_system_time(): now = win32api.GetLocalTime() meridian = 'AM' hours = int(now[4]) if (hours == 12): meridian = 'PM' elif (hours == 0): hours = 12 elif (hours > 12): hours = (hours - 12) meridian = 'PM' return '{0:02d}:{1:02d}:{2:02d} {3}'.format(hours, now[5], now[6], meridian)
Get the system time. Returns: str: Returns the system time in HH:MM:SS AM/PM format. CLI Example: .. code-block:: bash salt 'minion-id' system.get_system_time
codesearchnet
def base256_encode(n, minwidth=0): if (n > 0): arr = [] while n: (n, rem) = divmod(n, 256) arr.append(rem) b = bytearray(reversed(arr)) elif (n == 0): b = bytearray(b'\x00') else: raise ValueError('Negative numbers not supported') if ((minwidth > 0) and (len(b) < minwidth)): padding = ((minwidth - len(b)) * b'\x00') b = (bytearray(padding) + b) b.reverse() return b
Encode the input with base256. Args: n (int): input value. minwidth: minimum return value length. Raises: ValueError: if a negative number is provided. Returns: bytearray:
codesearchnet
async def verify_chain_of_trust(chain): log_path = os.path.join(chain.context.config['task_log_dir'], 'chain_of_trust.log') scriptworker_log = logging.getLogger('scriptworker') with contextual_log_handler(chain.context, path=log_path, log_obj=scriptworker_log, formatter=AuditLogFormatter(fmt=chain.context.config['log_fmt'], datefmt=chain.context.config['log_datefmt'])): try: (await build_task_dependencies(chain, chain.task, chain.name, chain.task_id)) (await download_cot(chain)) verify_cot_signatures(chain) (await download_cot_artifacts(chain)) task_count = (await verify_task_types(chain)) check_num_tasks(chain, task_count) (await verify_worker_impls(chain)) (await trace_back_to_tree(chain)) except (BaseDownloadError, KeyError, AttributeError) as exc: log.critical('Chain of Trust verification error!', exc_info=True) if isinstance(exc, CoTError): raise else: raise CoTError(str(exc)) log.info('Good.')
Build and verify the chain of trust. Args: chain (ChainOfTrust): the chain we're operating on Raises: CoTError: on failure
codesearchnet
def call(self, inputs): del inputs latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size), sample_shape=1, name='latent_code') state = self.lstm.zero_state(1, dtype=tf.float32) t = 0 productions = [] stack = [self.grammar.start_symbol] while stack: symbol = stack.pop() (net, state) = self.lstm(latent_code, state) logits = (self.output_layer(net) + self.grammar.mask(symbol, on_value=0.0, off_value=(- 1000000000.0))) production = ed.OneHotCategorical(logits=logits, name=('production_' + str(t))) (_, rhs) = self.grammar.production_rules[tf.argmax(input=production, axis=(- 1))] for symbol in rhs: if (symbol in self.grammar.nonterminal_symbols): stack.append(symbol) productions.append(production) t += 1 return tf.stack(productions, axis=1)
Runs the model forward to generate a sequence of productions. Args: inputs: Unused. Returns: productions: Tensor of shape [1, num_productions, num_production_rules]. Slices along the `num_productions` dimension represent one-hot vectors.
codesearchnet
def _Open(self, path_spec=None, mode='rb'): if ((not self._file_object_set_in_init) and (not path_spec)): raise ValueError('Missing path specification.') if (not self._file_object_set_in_init): if (not path_spec.HasParent()): raise errors.PathSpecError('Unsupported path specification without parent.') self._encryption_method = getattr(path_spec, 'encryption_method', None) if (self._encryption_method is None): raise errors.PathSpecError('Path specification missing encryption method.') self._file_object = resolver.Resolver.OpenFileObject(path_spec.parent, resolver_context=self._resolver_context) self._path_spec = path_spec
Opens the file-like object. Args: path_spec (Optional[PathSpec]): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
codesearchnet
def RestrictFeedItemToGeoTarget(client, feed_item, location_id): feed_item_target_service = client.GetService( 'FeedItemTargetService', version='v201809') criterion_target = { 'xsi_type': 'FeedItemCriterionTarget', 'feedId': feed_item['feedId'], 'feedItemId': feed_item['feedItemId'], 'criterion': { 'xsi_type': 'Location', 'id': location_id } } operation = {'operator': 'ADD', 'operand': criterion_target} response = feed_item_target_service.mutate([operation]) new_location_target = response['value'][0] print('Feed item target for feed ID %d and feed item ID %d was created to ' 'restrict serving to location ID %d.' % (new_location_target['feedId'], new_location_target['feedItemId'], new_location_target['criterion']['id']))
Restrict a feed item to a geo target location. Args: client: An AdWordsClient instance. feed_item: A FeedItem. location_id: The Id of the location to restrict to.
juraj-google-style
def SetHeaders(self, soap_headers, http_headers): self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers)
Set the headers for the underlying client. Args: soap_headers: A SOAP element for the SOAP headers. http_headers: A dictionary for the http headers.
codesearchnet
def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True): num_pages_for_vocab_generation = approx_vocab_size vocab_file = vocab_filename(approx_vocab_size, strip) def my_generator(data_prefix): count = 0 for page in corpus_page_generator( all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp): revisions = page["revisions"] if revisions: text = get_text(revisions[-1], strip=strip) yield text count += 1 if count % 100 == 0: tf.logging.info("reading pages for vocab %d" % count) if count > num_pages_for_vocab_generation: break return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file, approx_vocab_size, my_generator(data_prefix))
Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder
juraj-google-style
def _PrintProcessingTime(self, processing_status): if (not processing_status): processing_time = '00:00:00' else: processing_time = (time.time() - processing_status.start_time) time_struct = time.gmtime(processing_time) processing_time = time.strftime('%H:%M:%S', time_struct) self._output_writer.Write('Processing time\t\t: {0:s}\n'.format(processing_time))
Prints the processing time. Args: processing_status (ProcessingStatus): processing status.
codesearchnet
def get_config(self): data = self.data if type(self.data).__module__ == np.__name__: data = self.data.tolist() try: json_data = json.dumps(data) except TypeError as e: raise TypeError(f'Data not JSON Serializable: {data}') from e targets = self.targets if type(self.targets).__module__ == np.__name__: targets = self.targets.tolist() try: json_targets = json.dumps(targets) except TypeError as e: raise TypeError(f'Targets not JSON Serializable: {targets}') from e return {'data': json_data, 'targets': json_targets, 'length': self.length, 'sampling_rate': self.sampling_rate, 'stride': self.stride, 'start_index': self.start_index, 'end_index': self.end_index, 'shuffle': self.shuffle, 'reverse': self.reverse, 'batch_size': self.batch_size}
Returns the TimeseriesGenerator configuration as Python dictionary. Returns: A Python dictionary with the TimeseriesGenerator configuration.
github-repos
def sort_imports(file: str, check_only: bool=True): with open(file, encoding='utf-8') as f: code = f.read() if '_import_structure' not in code or 'define_import_structure' in code: return main_blocks = split_code_in_indented_blocks(code, start_prompt='_import_structure = {', end_prompt='if TYPE_CHECKING:') for block_idx in range(1, len(main_blocks) - 1): block = main_blocks[block_idx] block_lines = block.split('\n') line_idx = 0 while line_idx < len(block_lines) and '_import_structure' not in block_lines[line_idx]: if 'import dummy' in block_lines[line_idx]: line_idx = len(block_lines) else: line_idx += 1 if line_idx >= len(block_lines): continue internal_block_code = '\n'.join(block_lines[line_idx:-1]) indent = get_indent(block_lines[1]) internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent) pattern = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key keys = [pattern.search(b).groups()[0] if pattern.search(b) is not None else None for b in internal_blocks] keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None] sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])] count = 0 reorderded_blocks = [] for i in range(len(internal_blocks)): if keys[i] is None: reorderded_blocks.append(internal_blocks[i]) else: block = sort_objects_in_import(internal_blocks[sorted_indices[count]]) reorderded_blocks.append(block) count += 1 main_blocks[block_idx] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]]) if code != '\n'.join(main_blocks): if check_only: return True else: print(f'Overwriting {file}.') with open(file, 'w', encoding='utf-8') as f: f.write('\n'.join(main_blocks))
Sort the imports defined in the `_import_structure` of a given init. Args: file (`str`): The path to the init to check/fix. check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init.
github-repos
def cast_to_seq(obj, alphabet=IUPAC.extended_protein): if isinstance(obj, Seq): return obj if isinstance(obj, SeqRecord): return obj.seq if isinstance(obj, str): obj = obj.upper() return Seq(obj, alphabet) else: raise ValueError('Must provide a string, Seq, or SeqRecord object.')
Return a Seq representation of a string or SeqRecord object. Args: obj (str, Seq, SeqRecord): Sequence string or Biopython SeqRecord object alphabet: See Biopython SeqRecord docs Returns: Seq: Seq representation of the sequence
codesearchnet
def get_files(self, retrieve=False): if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasFile'): files = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasFile ] return files else: return []
get pcdm:hasFile for this resource Args: retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload
juraj-google-style
def add(self, email): if (email not in self._collaborators): self._collaborators[email] = ShareRequestValue.Add self._dirty = True
Add a collaborator. Args: str : Collaborator email address.
codesearchnet
def get_cases(variant_source, case_lines=None, case_type='ped', variant_type='snv', variant_mode='vcf'): individuals = get_individuals( variant_source=variant_source, case_lines=case_lines, case_type=case_type, variant_mode=variant_mode ) case_objs = [] case_ids = set() compressed = False tabix_index = False if variant_source.endswith('.gz'): logger.debug("Found compressed variant source") compressed = True tabix_file = '.'.join([variant_source, 'tbi']) if os.path.exists(tabix_file): logger.debug("Found index file") tabix_index = True if len(individuals) > 0: for individual in individuals: case_ids.add(individual.case_id) else: case_ids = [os.path.basename(variant_source)] for case_id in case_ids: logger.info("Found case {0}".format(case_id)) case = Case( case_id=case_id, name=case_id, variant_source=variant_source, variant_type=variant_type, variant_mode=variant_mode, compressed=compressed, tabix_index=tabix_index ) for individual in individuals: if individual.case_id == case_id: logger.info("Adding ind {0} to case {1}".format( individual.name, individual.case_id )) case.add_individual(individual) case_objs.append(case) return case_objs
Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case))
juraj-google-style
def slot(self): if (self.type == EventType.TOUCH_FRAME): raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_touch_get_slot(self._handle)
The slot of this touch event. See the kernel's multitouch protocol B documentation for more information. If the touch event has no assigned slot, for example if it is from a single touch device, this property returns -1. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_UP`, :attr:`~libinput.constant.EventType.TOUCH_MOTION` or :attr:`~libinput.constant.EventType.TOUCH_CANCEL`, this property raises :exc:`AttributeError`. Returns: int: The slot of this touch event. Raises: AttributeError
codesearchnet
def check_imports(filename: Union[str, os.PathLike]) -> list[str]: imports = get_imports(filename) missing_packages = [] for imp in imports: try: importlib.import_module(imp) except ImportError as exception: logger.warning(f'Encountered exception while importing {imp}: {exception}') if 'No module named' in str(exception): missing_packages.append(imp) else: raise if len(missing_packages) > 0: raise ImportError(f'This modeling file requires the following packages that were not found in your environment: {', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`') return get_relative_imports(filename)
Check if the current Python environment contains all the libraries that are imported in a file. Will raise if a library is missing. Args: filename (`str` or `os.PathLike`): The module file to check. Returns: `list[str]`: The list of relative imports in the file.
github-repos
def persist_project(project): from benchbuild.utils.schema import Project, Session session = Session() projects = session.query(Project) \ .filter(Project.name == project.name) \ .filter(Project.group_name == project.group) name = project.name desc = project.__doc__ domain = project.domain group_name = project.group version = project.version() \ if callable(project.version) else project.version try: src_url = project.src_uri except AttributeError: src_url = 'unknown' if projects.count() == 0: newp = Project() newp.name = name newp.description = desc newp.src_url = src_url newp.domain = domain newp.group_name = group_name newp.version = version session.add(newp) else: newp_value = { "name": name, "description": desc, "src_url": src_url, "domain": domain, "group_name": group_name, "version": version } projects.update(newp_value) session.commit() return (projects, session)
Persist this project in the benchbuild database. Args: project: The project we want to persist.
juraj-google-style
def cancel(self, identifier: typing.Any, exc_type: typing.Optional[type]=None) -> bool: raise NotImplementedError()
Cancel an active coroutine and remove it from the schedule. Args: identifier (typing.Any): The identifier returned from add. exc_type (typing.Optional[type]): The exception type to throw into the coroutine on cancel. No exception is thrown if nothing is given. Instead the coroutine is no longer processed. Returns: bool: True if the coroutine is cancelled. False if the identifier is invalid or if the coroutine is complete.
codesearchnet
def _get_recursive_dependancies(self, dependencies_map, sourcepath, recursive=True): collected = set([]) collected.update(dependencies_map.get(sourcepath, [])) sequence = collected.copy() walkthrough = [] if recursive: while True: if (not sequence): break item = sequence.pop() walkthrough.append(item) current_item_dependancies = dependencies_map.get(item, []) for dependency in current_item_dependancies: if (dependency in walkthrough): continue else: collected.add(dependency) sequence.add(dependency) if (sourcepath in walkthrough): msg = "A circular import has occured by '{}'" raise CircularImport(msg.format(current_item_dependancies)) if (not sequence): break return collected
Return all dependencies of a source, recursively searching through its dependencies. This is a common method used by ``children`` and ``parents`` methods. Args: dependencies_map (dict): Internal buffer (internal buffers ``_CHILDREN_MAP`` or ``_PARENTS_MAP``) to use for searching. sourcepath (str): Source file path to start searching for dependencies. Keyword Arguments: recursive (bool): Switch to enable recursive finding (if True). Default to True. Raises: CircularImport: If circular error is detected from a source. Returns: set: List of dependencies paths.
codesearchnet
def _find_relation(self, span_doc: doc, r: List) -> Dict: rule = r[1][0] span_pivot = 0 relation = {} for e_id, element in enumerate(rule): if not span_doc[span_pivot:]: for extra_id, _, in enumerate(rule[e_id:]): relation[e_id+extra_id] = None break new_doc = self._tokenizer.tokenize_to_spacy_doc(span_doc[span_pivot:].text) if "OP" not in element: relation[e_id] = (span_pivot, span_pivot+1) span_pivot += 1 else: if e_id < len(rule)-1: tmp_rule_1 = [rule[e_id]] tmp_rule_2 = [rule[e_id+1]] tmp_matcher = Matcher(self._nlp.vocab) tmp_matcher.add(0, None, tmp_rule_1) tmp_matcher.add(1, None, tmp_rule_2) tmp_matches = sorted([x for x in tmp_matcher(new_doc) if x[1] != x[2]], key=lambda a: a[1]) if not tmp_matches: relation[e_id] = None else: matches_1 = [x for x in tmp_matches if x[0] == 0 and x[1] == 0] if not matches_1: relation[e_id] = None else: _, s1, e1 = matches_1[0] matches_2 = [x for x in tmp_matches if x[0] == 1] if not matches_2: relation[e_id] = (span_pivot, span_pivot + e1) span_pivot += e1 else: _, s2, e2 = matches_2[0] if e1 <= s2: relation[e_id] = (span_pivot, span_pivot + e1) span_pivot += e1 else: relation[e_id] = (span_pivot, span_pivot + s2) span_pivot += s2 else: relation[e_id] = (span_pivot, len(span_doc)) return relation
Get the relations between the each pattern in the spacy rule and the matches Args: span_doc: doc r: List Returns: Dict
juraj-google-style
def eig(tensor, name=None): if tensor.dtype == dtypes.float32 or tensor.dtype == dtypes.complex64: out_dtype = dtypes.complex64 elif tensor.dtype == dtypes.float64 or tensor.dtype == dtypes.complex128: out_dtype = dtypes.complex128 e, v = gen_linalg_ops.eig(tensor, Tout=out_dtype, compute_v=True, name=name) return (e, v)
Computes the eigen decomposition of a batch of matrices. The eigenvalues and eigenvectors for a non-Hermitian matrix in general are complex. The eigenvectors are not guaranteed to be linearly independent. Computes the eigenvalues and right eigenvectors of the innermost N-by-N matrices in `tensor` such that `tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1. Args: tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of each inner inner matrix is referenced. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is `[..., N]`. The eigenvalues are not necessarily ordered. v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most matrices contain eigenvectors of the corresponding matrices in `tensor`
github-repos
def has_datastore(self): (success, result) = self._read_from_hdx('datastore', self.data['id'], 'resource_id', self.actions()['datastore_search']) if (not success): logger.debug(result) elif result: return True return False
Check if the resource has a datastore. Returns: bool: Whether the resource has a datastore or not
codesearchnet
def _find_uninitialized(self): return set((name for (name, prop) in self._properties.iteritems() if (not prop._is_initialized(self))))
Internal helper to find uninitialized properties. Returns: A set of property names.
codesearchnet
def _genBgTerm_fromXX(self, vTot, vCommon, XX, a=None, c=None): vSpecific = (vTot - vCommon) SP.random.seed(0) if (c == None): c = SP.randn(self.P) XX += (0.001 * SP.eye(XX.shape[0])) L = LA.cholesky(XX, lower=True) R = self.genWeights(self.N, self.P) A = self.genTraitEffect() if (a is not None): A[(0, :)] = a Yc = SP.dot(L, SP.dot(R, A)) Yc *= (SP.sqrt(vCommon) / SP.sqrt(Yc.var(0).mean())) R = SP.randn(self.N, self.P) Yi = SP.dot(L, SP.dot(R, SP.diag(c))) Yi *= (SP.sqrt(vSpecific) / SP.sqrt(Yi.var(0).mean())) return (Yc, Yi)
generate background term from SNPs Args: vTot: variance of Yc+Yi vCommon: variance of Yc XX: kinship matrix a: common scales, it can be set for debugging purposes c: indipendent scales, it can be set for debugging purposes
codesearchnet
def get_data_xlsx(file_name, file_contents=None, on_demand=False): return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand)
Loads the new excel format files. Old format files will automatically get loaded as well. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
codesearchnet
def age(self): date = datetime.today().date() b = self.birthday if b: return int(((date - b).days / 365)) return None
Returns a user's age, based on their birthday. Returns: integer
codesearchnet
def send_location(self, room_id, geo_uri, name, thumb_url=None, thumb_info=None, timestamp=None): content_pack = {'geo_uri': geo_uri, 'msgtype': 'm.location', 'body': name} if thumb_url: content_pack['thumbnail_url'] = thumb_url if thumb_info: content_pack['thumbnail_info'] = thumb_info return self.send_message_event(room_id, 'm.room.message', content_pack, timestamp=timestamp)
Send m.location message event Args: room_id (str): The room ID to send the event in. geo_uri (str): The geo uri representing the location. name (str): Description for the location. thumb_url (str): URL to the thumbnail of the location. thumb_info (dict): Metadata about the thumbnail, type ImageInfo. timestamp (int): Set origin_server_ts (For application services only)
codesearchnet
def find_and_replace_channel_refs(self, text): match = True pattern = re.compile('< while match: match = pattern.search(text) if match: text = text.replace(match.group(0), (' return text
Find occurrences of Slack channel referenfces and attempts to replace them with just channel names. Args: text (string): The message text Returns: string: The message text with channel references replaced.
codesearchnet
def read(self, size=None): data = self.rfile.read(size) self.bytes_read += len(data) self._check_length() return data
Read a chunk from rfile buffer and return it. Args: size (int): amount of data to read Returns: bytes: Chunk from rfile, limited by size if specified.
juraj-google-style
def _load_schema(file_path, name=None): if name is None: name = os.path.splitext(os.path.basename(file_path))[0] if name not in _SCHEMAS: with open(file_path, 'r') as schema_file: _SCHEMAS[name] = json.load(schema_file) return _SCHEMAS[name]
Loads the QObj schema for use in future validations. Caches schema in _SCHEMAS module attribute. Args: file_path(str): Path to schema. name(str): Given name for schema. Defaults to file_path filename without schema. Return: schema(dict): Loaded schema.
juraj-google-style
def __call__(self, utterances_batch: List[str], history_batch: List[List[str]], states_batch: Optional[list] = None) -> Tuple[List[str], List[float]]: responses, confidences = self.model(utterances_batch) if isinstance(confidences[0], list): confidences = [max(c) for c in confidences] return responses, confidences
It returns the skill inference result. Output is batches of the skill inference results and estimated confidences. Args: utterances_batch: A batch of utterances. history_batch: A batch of list typed histories for each utterance. states_batch: Optional. A batch of arbitrary typed states for each utterance. Returns: Batches of the skill inference results and estimated confidences.
juraj-google-style
def write(self, data): ctx = context.get() if len(data) != 2: logging.error("Got bad tuple of length %d (2-tuple expected): %s", len(data), data) try: key = str(data[0]) value = str(data[1]) except TypeError: logging.error("Expecting a tuple, but got %s: %s", data.__class__.__name__, data) file_index = key.__hash__() % len(self._filehandles) pool = self._pools[file_index] if pool is None: filehandle = self._filehandles[file_index] pool = output_writers.GCSRecordsPool(filehandle=filehandle, ctx=ctx) self._pools[file_index] = pool proto = kv_pb.KeyValue() proto.set_key(key) proto.set_value(value) pool.append(proto.Encode())
Write data. Args: data: actual data yielded from handler. Type is writer-specific.
juraj-google-style
def Send(self, message): if not isinstance(message, common_pb2.Message): raise ValueError("Send requires a fleetspeak.Message") if message.destination.service_name == "system": raise ValueError( "Only predefined messages can have destination.service_name == \"system\"") return self._SendImpl(message)
Send a message through Fleetspeak. Args: message: A message protocol buffer. Returns: Size of the message in bytes. Raises: ValueError: If message is not a common_pb2.Message.
juraj-google-style
def _safe_setattr(obj, name, value): okey = id(obj) if okey in _set_failures or okey in _final_objs: return False import inspect try: if inspect.ismethod(obj): setattr(obj.__func__, name, value) return True else: if isinstance(obj, dict): obj[name] = value else: setattr(obj, name, value) return True except (TypeError, AttributeError): _set_failures.append(okey) msg.warn("Failed {}:{} attribute set on {}.".format(name, value, obj)) return False
Safely sets the attribute of the specified object. This includes not setting attributes for final objects and setting __func__ for instancemethod typed objects. Args: obj: object to set an attribute for. name (str): new attribute name. value: new attribute value. Returns: bool: True if the set attribute was successful.
juraj-google-style
def retrieve_token(self, token): headers = self.client._get_private_headers() endpoint = '/tokens/{}'.format(token) return self.client._get((self.client.URL_BASE + endpoint), headers=headers)
Retrieve Token details for a specific Token. Args: token: The identifier of the token. Returns:
codesearchnet
def iter_packages(self, name, range_=None, paths=None): for package in iter_packages(name, range_, paths): if (not self.excludes(package)): (yield package)
Same as iter_packages in packages.py, but also applies this filter. Args: name (str): Name of the package, eg 'maya'. range_ (VersionRange or str): If provided, limits the versions returned to those in `range_`. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package` iterator.
codesearchnet
def __init__(self, iterable=None, modify_time=None, update_time=None): if self.__class__ is Map: raise TypeError('Map is an abstract class.') self._data = {} self._index = [] self._last_modification_timestamp = modify_time self._last_update_timestamp = update_time self.log = logging.getLogger(__name__) if iterable is not None: for item in iterable: self.Add(item)
Construct a Map object. Args: iterable: A tuple or list that can be iterated over and added to the Map, defaults to None. modify_time: An optional modify time for this Map, defaults to None. defaults to None. update_time: An optional update time for this Map, defaults to None. defaults to None. Raises: TypeError: If the objects in the iterable are of the wrong type.
github-repos
def _process_datum(self, data, input_reader, ctx, transient_shard_state): if (data is not input_readers.ALLOW_CHECKPOINT): self.slice_context.incr(context.COUNTER_MAPPER_CALLS) handler = transient_shard_state.handler if isinstance(handler, map_job.Mapper): handler(self.slice_context, data) else: if input_reader.expand_parameters: result = handler(*data) else: result = handler(data) if util.is_generator(result): for output in result: if isinstance(output, operation.Operation): output(ctx) else: output_writer = transient_shard_state.output_writer if (not output_writer): logging.warning('Handler yielded %s, but no output writer is set.', output) else: output_writer.write(output) if ((self._time() - self._start_time) >= parameters.config._SLICE_DURATION_SEC): return False return True
Process a single data piece. Call mapper handler on the data. Args: data: a datum to process. input_reader: input reader. ctx: mapreduce context transient_shard_state: transient shard state. Returns: True if scan should be continued, False if scan should be stopped.
codesearchnet
def check(self, solution): return self.func(*(solution[v] for v in self.variables))
Check that a solution satisfies the constraint. Args: solution (container): An assignment for the variables in the constraint. Returns: bool: True if the solution satisfies the constraint; otherwise False. Examples: This example creates a constraint that :math:`a \\ne b` on binary variables and tests it for two candidate solutions, with additional unconstrained variable c. >>> import dwavebinarycsp >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)], ... ['a', 'b'], dwavebinarycsp.BINARY) >>> solution = {'a': 1, 'b': 1, 'c': 0} >>> const.check(solution) False >>> solution = {'a': 1, 'b': 0, 'c': 0} >>> const.check(solution) True
codesearchnet
def _examples_from_path_handler(self, request): examples_count = int(request.args.get('max_examples')) examples_path = request.args.get('examples_path') sampling_odds = float(request.args.get('sampling_odds')) self.example_class = (tf.train.SequenceExample if request.args.get('sequence_examples') == 'true' else tf.train.Example) try: platform_utils.throw_if_file_access_not_allowed(examples_path, self._logdir, self._has_auth_group) example_strings = platform_utils.example_protos_from_path( examples_path, examples_count, parse_examples=False, sampling_odds=sampling_odds, example_class=self.example_class) self.examples = [ self.example_class.FromString(ex) for ex in example_strings] self.generate_sprite(example_strings) json_examples = [ json_format.MessageToJson(example) for example in self.examples ] self.updated_example_indices = set(range(len(json_examples))) return http_util.Respond( request, {'examples': json_examples, 'sprite': True if self.sprite else False}, 'application/json') except common_utils.InvalidUserInputError as e: return http_util.Respond(request, {'error': e.message}, 'application/json', code=400)
Returns JSON of the specified examples. Args: request: A request that should contain 'examples_path' and 'max_examples'. Returns: JSON of up to max_examlpes of the examples in the path.
juraj-google-style
def __resource_descriptor(self, resource_path, methods): descriptor = {} method_map = {} sub_resource_index = collections.defaultdict(list) sub_resource_map = {} resource_path_tokens = resource_path.split('.') for (service, protorpc_meth_info) in methods: method_info = getattr(protorpc_meth_info, 'method_info', None) path = method_info.get_path(service.api_info) method_id = method_info.method_id(service.api_info) canonical_method_id = self._get_canonical_method_id(method_id) current_resource_path = self._get_resource_path(method_id) if (current_resource_path[:len(resource_path_tokens)] != resource_path_tokens): raise api_exceptions.ToolError('Internal consistency error in resource path {0}'.format(current_resource_path)) effective_resource_path = current_resource_path[len(resource_path_tokens):] if effective_resource_path: sub_resource_name = effective_resource_path[0] new_resource_path = '.'.join([resource_path, sub_resource_name]) sub_resource_index[new_resource_path].append((service, protorpc_meth_info)) else: method_map[canonical_method_id] = self.__method_descriptor(service, method_info, protorpc_meth_info) for (sub_resource, sub_resource_methods) in sub_resource_index.items(): sub_resource_name = sub_resource.split('.')[(- 1)] sub_resource_map[sub_resource_name] = self.__resource_descriptor(sub_resource, sub_resource_methods) if method_map: descriptor['methods'] = method_map if sub_resource_map: descriptor['resources'] = sub_resource_map return descriptor
Describes a resource. Args: resource_path: string, the path of the resource (e.g., 'entries.items') methods: list of tuples of type (endpoints.Service, protorpc.remote._RemoteMethodInfo), the methods that serve this resource. Returns: Dictionary describing the resource.
codesearchnet
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size.
github-repos
def load_validator(schema_path, schema): if (os.name == 'nt'): file_prefix = 'file: else: file_prefix = 'file:' resolver = RefResolver((file_prefix + schema_path.replace('\\', '/')), schema) validator = Draft4Validator(schema, resolver=resolver) return validator
Create a JSON schema validator for the given schema. Args: schema_path: The filename of the JSON schema. schema: A Python object representation of the same schema. Returns: An instance of Draft4Validator.
codesearchnet
def unset(entity, *types): if not types: types = (TypedField,) fields = list(entity._fields.keys()) remove = (x for x in fields if isinstance(x, types)) for field in remove: del entity._fields[field]
Unset the TypedFields on the input `entity`. Args: entity: A mixbox.Entity object. *types: A variable-length list of TypedField subclasses. If not provided, defaults to TypedField.
juraj-google-style
def _os_release_info(self): if os.path.isfile(self.os_release_file): with open(self.os_release_file) as release_file: return self._parse_os_release_content(release_file) return {}
Get the information items from the specified os-release file. Returns: A dictionary containing all information items.
codesearchnet
def to_dict(self): return {'name': self.name, 'id': self.id, 'type': self.type, 'workflow_id': self.workflow_id, 'queue': self.queue, 'start_time': self.start_time, 'arguments': self.arguments, 'acknowledged': self.acknowledged, 'func_name': self.func_name, 'hostname': self.hostname, 'worker_name': self.worker_name, 'worker_pid': self.worker_pid, 'routing_key': self.routing_key}
Return a dictionary of the job stats. Returns: dict: Dictionary of the stats.
codesearchnet
def op_list_to_dict(op_list, convert_variable_to_tensor=True): if not isinstance(op_list, (list, tuple, set)): raise TypeError(f'Variables to save should be passed in a dict or a list. Got {op_list}') op_list = nest.flatten(list(op_list)) op_list = sorted(op_list, key=lambda x: x.name) names_to_saveables = {} for var in op_list: resource_or_ref_variable = isinstance(var, resource_variable_ops.BaseResourceVariable) or isinstance(var, ref_variable.RefVariable) if isinstance(var, saveable_object.SaveableObject): names_to_saveables[var.name] = var elif isinstance(var, variables.PartitionedVariable): if var.name in names_to_saveables: raise ValueError(f'At least two variables have the same name: {var.name}') names_to_saveables[var.name] = var elif isinstance(var, variables.Variable) and var._save_slice_info: name = var._save_slice_info.full_name if name in names_to_saveables: if not isinstance(names_to_saveables[name], list): raise ValueError(f'Mixing slices and non-slices with the same name: {name}') names_to_saveables[name].append(var) else: names_to_saveables[name] = [var] elif isinstance(var, trackable.Trackable) and (not resource_or_ref_variable): trackable_saveables = [factory() if callable(factory) else factory for factory in saveable_objects_from_trackable(var, tf1_saver=True).values()] names_to_saveables.update(op_list_to_dict(trackable_saveables)) elif not getattr(var, '_in_graph_mode', True): if not isinstance(var, resource_variable_ops.BaseResourceVariable): raise ValueError(f'Can only save/restore ResourceVariables when eager execution is enabled. Got type: {type(var)}.') set_var = names_to_saveables.setdefault(var._shared_name, var) if set_var is not var: raise ValueError(f"Two different ResourceVariable objects with the same shared_name '{var._shared_name}' were passed to the Saver. This likely means that they were created in different Graphs or isolated contexts, and may not be checkpointed together.") else: if convert_variable_to_tensor: if isinstance(var, resource_variable_ops.BaseResourceVariable): var = var._graph_element else: var = ops.convert_to_tensor(var, as_ref=True) if not _tensor_comes_from_variable(var): raise TypeError(f'Variable to save is not a Variable: {var}') if var.op.type == 'ReadVariableOp': name = var.op.inputs[0].op.name else: name = var.op.name if name in names_to_saveables: raise ValueError(f'At least two variables have the same name: {name}') names_to_saveables[name] = var return names_to_saveables
Create a dictionary of names to operation lists. This method is only used when the variable name matters (e.g. when saving or restoring from a TF1 name-based checkpoint). In TF2, this can be called from `tf.train.Checkpoint.restore` when loading from a name-based checkpoint. Args: op_list: A (nested) list, tuple, or set of Variables or SaveableObjects. convert_variable_to_tensor: Whether or not to convert single Variables with no slice info into Tensors. Returns: A dictionary of names to the operations that must be saved under that name. Variables with save_slice_info are grouped together under the same key in no particular order. Raises: TypeError: If the type of op_list or its elements is not supported. ValueError: If at least two saveables share the same name.
github-repos
def filter_embeddings(embeddings, vocab, dim): if not isinstance(embeddings, dict): return _embeddings = np.zeros([len(vocab), dim]) for word in vocab: if word in embeddings: word_idx = vocab[word] _embeddings[word_idx] = embeddings[word] return _embeddings
Loads word vectors in numpy array. Args: embeddings (dict): a dictionary of numpy array. vocab (dict): word_index lookup table. Returns: numpy array: an array of word embeddings.
juraj-google-style
def astype(array, y): if isinstance(y, autograd.core.Node): return array.astype(numpy.array(y.value).dtype) return array.astype(numpy.array(y).dtype)
A functional form of the `astype` method. Args: array: The array or number to cast. y: An array or number, as the input, whose type should be that of array. Returns: An array or number with the same dtype as `y`.
juraj-google-style
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def log_flush_for_interval(self, log_type, interval): if (not log_type): log_type = 'policies' interval = interval.replace(' ', '+') flush_url = '{}/{}/interval/{}'.format(self.url, log_type, interval) self.jss.delete(flush_url)
Flush logs for an interval of time. Args: log_type (str): Only documented type is "policies". This will be applied by default if nothing is passed. interval (str): Combination of "Zero", "One", "Two", "Three", "Six", and "Day", "Week", "Month", "Year". e.g. ("Three+Months") Please note: The documentation for this specifies the singular form (e.g. "Month"), and plural ("Months") at different times, and further the construction is listed as "THREE MONTHS" elsewhere. Limited testing indicates that pluralization does not matter, nor does capitalization. Please test! No validation is performed on this prior to the request being made. Raises: JSSDeleteError if provided url_path has a >= 400 response.
codesearchnet
def set_config(self, key, value): keyname = ('config:' + key) self.kvstore.set(keyname, value)
Set a persistent config key to a value, stored in the registry Args: key (string): The key name value (string): The key value
codesearchnet
def __init__(self, ascii_codepage='cp1252', registry_file_reader=None): super(WinRegistry, self).__init__() self._ascii_codepage = ascii_codepage self._registry_file_reader = registry_file_reader self._registry_files = {} self._user_registry_files = {}
Initializes the Windows Registry. Args: ascii_codepage (Optional[str]): ASCII string codepage. registry_file_reader (Optional[WinRegistryFileReader]): Windows Registry file reader.
juraj-google-style
def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]: dynamic = False if padding is None: padding = (stride - 1 + dilation * (kernel_size - 1)) return (padding, dynamic) if isinstance(padding, str): padding = padding.lower() if padding == 'same': if stride == 1 and dilation * (kernel_size - 1) % 2 == 0: padding = (stride - 1 + dilation * (kernel_size - 1)) else: padding = 0 dynamic = True elif padding == 'valid': padding = 0 else: padding = (stride - 1 + dilation * (kernel_size - 1)) return (padding, dynamic)
Utility function to get the tuple padding value given the kernel_size and padding. Args: padding (Union[`str`, `int`], *optional*): Padding value, can be either `"same"`, `"valid"`. If a different value is provided the default padding from PyTorch is used. kernel_size (`int`, *optional*, defaults to 7): Kernel size of the convolution layers. stride (`int`, *optional*, defaults to 1): Stride value of the convolution layers. dilation (`int`, *optional*, defaults to 1): Dilation value of the convolution layers.
github-repos
def imshow_bboxes(img, bboxes, colors='green', top_k=(- 1), thickness=1, show=True, win_name='', wait_time=0, out_file=None): img = imread(img) if isinstance(bboxes, np.ndarray): bboxes = [bboxes] if (not isinstance(colors, list)): colors = [colors for _ in range(len(bboxes))] colors = [color_val(c) for c in colors] assert (len(bboxes) == len(colors)) for (i, _bboxes) in enumerate(bboxes): _bboxes = _bboxes.astype(np.int32) if (top_k <= 0): _top_k = _bboxes.shape[0] else: _top_k = min(top_k, _bboxes.shape[0]) for j in range(_top_k): left_top = (_bboxes[(j, 0)], _bboxes[(j, 1)]) right_bottom = (_bboxes[(j, 2)], _bboxes[(j, 3)]) cv2.rectangle(img, left_top, right_bottom, colors[i], thickness=thickness) if show: imshow(img, win_name, wait_time) if (out_file is not None): imwrite(img, out_file)
Draw bboxes on an image. Args: img (str or ndarray): The image to be displayed. bboxes (list or ndarray): A list of ndarray of shape (k, 4). colors (list[str or tuple or Color]): A list of colors. top_k (int): Plot the first k bboxes only if set positive. thickness (int): Thickness of lines. show (bool): Whether to show the image. win_name (str): The window name. wait_time (int): Value of waitKey param. out_file (str, optional): The filename to write the image.
codesearchnet
def _copy(src, dst, src_is_storage, dst_is_storage): if (src_is_storage and dst_is_storage): system_src = get_instance(src) system_dst = get_instance(dst) if (system_src is system_dst): if (system_src.relpath(src) == system_dst.relpath(dst)): raise same_file_error(("'%s' and '%s' are the same file" % (src, dst))) try: return system_dst.copy(src, dst) except (UnsupportedOperation, ObjectException): pass for (caller, called, method) in ((system_dst, system_src, 'copy_from_%s'), (system_src, system_dst, 'copy_to_%s')): if hasattr(caller, (method % called.storage)): try: return getattr(caller, (method % called.storage))(src, dst, called) except (UnsupportedOperation, ObjectException): continue with cos_open(src, 'rb') as fsrc: with cos_open(dst, 'wb') as fdst: for stream in (fsrc, fdst): try: buffer_size = getattr(stream, '_buffer_size') break except AttributeError: continue else: buffer_size = COPY_BUFSIZE copyfileobj(fsrc, fdst, buffer_size)
Copies file from source to destination Args: src (str or file-like object): Source file. dst (str or file-like object): Destination file. src_is_storage (bool): Source is storage. dst_is_storage (bool): Destination is storage.
codesearchnet
def _probe_services(self, handle): code = 10240 def event_filter_func(event): if ((event.command_class == 4) and (event.command == 2)): (event_handle,) = unpack('B', event.payload[0:1]) return (event_handle == handle) return False def end_filter_func(event): if ((event.command_class == 4) and (event.command == 1)): (event_handle,) = unpack('B', event.payload[0:1]) return (event_handle == handle) return False payload = struct.pack('<BHHBH', handle, 1, 65535, 2, code) try: response = self._send_command(4, 1, payload) except InternalTimeoutError: return (False, {'reason': 'Timeout waiting for command response'}) (handle, result) = unpack('<BH', response.payload) if (result != 0): return (False, None) events = self._wait_process_events(0.5, event_filter_func, end_filter_func) gatt_events = [x for x in events if event_filter_func(x)] end_events = [x for x in events if end_filter_func(x)] if (len(end_events) == 0): return (False, None) end_event = end_events[0] (_, result, _) = unpack('<BHH', end_event.payload) if (result != 0): self._logger.warn(('Error enumerating GATT table, protocol error code = %d (0x%X)' % (result, result))) return (False, None) services = {} for event in gatt_events: process_gatt_service(services, event) return (True, {'services': services})
Probe for all primary services and characteristics in those services Args: handle (int): the connection handle to probe
codesearchnet
def is_valid_callsign(self, callsign, timestamp=timestamp_now): try: if self.get_all(callsign, timestamp): return True except KeyError: return False
Checks if a callsign is valid Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: bool: True / False Example: The following checks if "DH1TW" is a valid callsign >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.is_valid_callsign("DH1TW") True
codesearchnet
def __init__(self, port=None, max_length=ControllerMaxLen.OFPCML_NO_BUFFER): super().__init__(action_type=ActionType.OFPAT_OUTPUT, length=16) self.port = port self.max_length = max_length
Create a ActionOutput with the optional parameters below. Args: port (:class:`Port` or :class:`int`): Output port. max_length (int): Max length to send to controller.
juraj-google-style
def stack(self, trees: Iterable[Tree[Array['*s']]]) -> Tree[Array['n_trees *s']]: return self.backend.map(_stack, *trees)
Stack a tree of `Iterable[Array]`. Supports `jax`, `tf`, `np`. Example: ```python etree.stack([ {'a': np.array([1])}, {'a': np.array([2])}, {'a': np.array([3])}, ]) == { 'a': np.array([[1], [2], [3]]) } ``` Args: trees: The list of tree to stack Returns: Tree of arrays.
github-repos
def insert_top(self, node): if (not isinstance(node, grammar.STATEMENTS)): raise ValueError self.to_insert_top.append(node)
Insert statements at the top of the function body. Note that multiple calls to `insert_top` will result in the statements being prepended in that order; this is different behavior from `prepend`. Args: node: The statement to prepend. Raises: ValueError: If the given node is not a statement.
codesearchnet
def get_dirty_items(item_list, flag_list): assert (len(item_list) == len(flag_list)) dirty_items = [item for (item, flag) in zip(item_list, flag_list) if (not flag)] return dirty_items
Returns each item in item_list where not flag in flag_list Args: item_list (list): flag_list (list): Returns: dirty_items
codesearchnet
def complain(distribution_name): try: pkg_resources.get_distribution(distribution_name) warnings.warn('The {pkg} distribution is now obsolete. Please `pip uninstall {pkg}`. In the future, this warning will become an ImportError.'.format(pkg=distribution_name), DeprecationWarning) except pkg_resources.DistributionNotFound: pass
Issue a warning if `distribution_name` is installed. In a future release, this method will be updated to raise ImportError rather than just send a warning. Args: distribution_name (str): The name of the obsolete distribution.
codesearchnet
def load_panel_app(adapter, panel_id=None, institute='cust000'): base_url = 'https: hgnc_map = adapter.genes_by_alias() if panel_id: panel_ids = [panel_id] if not panel_id: LOG.info("Fetching all panel app panels") data = get_request(base_url.format('list_panels')) json_lines = json.loads(data) panel_ids = [panel_info['Panel_Id'] for panel_info in json_lines['result']] for panel_id in panel_ids: panel_data = get_request(base_url.format('get_panel') + panel_id) parsed_panel = parse_panel_app_panel( panel_info = json.loads(panel_data)['result'], hgnc_map=hgnc_map, institute=institute ) parsed_panel['panel_id'] = panel_id if len(parsed_panel['genes']) == 0: LOG.warning("Panel {} is missing genes. Skipping.".format(parsed_panel['display_name'])) continue try: adapter.load_panel(parsed_panel=parsed_panel) except Exception as err: raise err
Load PanelApp panels into scout database If no panel_id load all PanelApp panels Args: adapter(scout.adapter.MongoAdapter) panel_id(str): The panel app panel id
juraj-google-style
def create_redis_client(redis_address, password=None): redis_ip_address, redis_port = redis_address.split(":") return redis.StrictRedis( host=redis_ip_address, port=int(redis_port), password=password)
Create a Redis client. Args: The IP address, port, and password of the Redis server. Returns: A Redis client.
juraj-google-style
def gaussian_pdf(std=10.0, mean=0.0): norm_const = 1.0 def pdf(x): return norm_const*np.exp(-0.5 * ((x-mean)/std)**2) * \ np.sin(np.pi/180.0 * x) norm_dev = quad(pdf, 0.0, 180.0)[0] norm_const /= norm_dev return pdf
Gaussian PDF for orientation averaging. Args: std: The standard deviation in degrees of the Gaussian PDF mean: The mean in degrees of the Gaussian PDF. This should be a number in the interval [0, 180) Returns: pdf(x), a function that returns the value of the spherical Jacobian- normalized Gaussian PDF with the given STD at x (degrees). It is normalized for the interval [0, 180].
juraj-google-style
def setup_service(api_name, api_version, credentials=None): if (not credentials): credentials = oauth2client.client.GoogleCredentials.get_application_default() return apiclient.discovery.build(api_name, api_version, credentials=credentials)
Configures genomics API client. Args: api_name: Name of the Google API (for example: "genomics") api_version: Version of the API (for example: "v2alpha1") credentials: Credentials to be used for the gcloud API calls. Returns: A configured Google Genomics API client with appropriate credentials.
codesearchnet
def add_institute(self, institute_obj): internal_id = institute_obj['internal_id'] display_name = institute_obj['internal_id'] if self.institute(institute_id=internal_id): raise IntegrityError('Institute {0} already exists in database'.format(display_name)) LOG.info('Adding institute with internal_id: {0} and display_name: {1}'.format(internal_id, display_name)) insert_info = self.institute_collection.insert_one(institute_obj) LOG.info('Institute saved')
Add a institute to the database Args: institute_obj(Institute)
codesearchnet
def reset(self, *args): self.resource = self.resource.reset(list(args)) return self
Resets any of the tokens for this Application. Note that you may have to reauthenticate afterwards. Usage: application.reset('api_token') application.reset('api_token', 'totp_secret') Args: *args (list of str): one or more of ['api_token', 'subscription_token', 'totp_secret'] Returns: The Application.
codesearchnet
def sg_log(tensor, opt): r return tf.log(tensor + tf.sg_eps, name=opt.name)
r"""Log transform a dense tensor See `tf.log()` in tensorflow. Args: tensor: A `Tensor` ( automatically given by chain ) opt: name: If provided, replace current tensor's name. Returns: A `Tensor`.
juraj-google-style
def convert_to_experiment_list(experiments): exp_list = experiments if (experiments is None): exp_list = [] elif isinstance(experiments, Experiment): exp_list = [experiments] elif (type(experiments) is dict): exp_list = [Experiment.from_json(name, spec) for (name, spec) in experiments.items()] if ((type(exp_list) is list) and all((isinstance(exp, Experiment) for exp in exp_list))): if (len(exp_list) > 1): logger.warning('All experiments will be using the same SearchAlgorithm.') else: raise TuneError('Invalid argument: {}'.format(experiments)) return exp_list
Produces a list of Experiment objects. Converts input from dict, single experiment, or list of experiments to list of experiments. If input is None, will return an empty list. Arguments: experiments (Experiment | list | dict): Experiments to run. Returns: List of experiments.
codesearchnet
def translate_file(estimator, subtokenizer, input_file, output_file=None, print_all_translations=True): batch_size = _DECODE_BATCH_SIZE (sorted_inputs, sorted_keys) = _get_sorted_inputs(input_file) num_decode_batches = (((len(sorted_inputs) - 1) def input_generator(): 'Yield encoded strings from sorted_inputs.' for (i, line) in enumerate(sorted_inputs): if ((i % batch_size) == 0): batch_num = ((i print(('Decoding batch %d out of %d.' % (batch_num, num_decode_batches))) (yield _encode_and_add_eos(line, subtokenizer)) def input_fn(): 'Created batched dataset of encoded inputs.' ds = tf.data.Dataset.from_generator(input_generator, tf.int64, tf.TensorShape([None])) ds = ds.padded_batch(batch_size, [None]) return ds translations = [] for (i, prediction) in enumerate(estimator.predict(input_fn)): translation = _trim_and_decode(prediction['outputs'], subtokenizer) translations.append(translation) if print_all_translations: print('Translating:') print(('\tInput: %s' % sorted_inputs[i])) print(('\tOutput: %s\n' % translation)) print(('=' * 100)) if (output_file is not None): if tf.gfile.IsDirectory(output_file): raise ValueError('File output is a directory, will not save outputs to file.') tf.logging.info(('Writing to file %s' % output_file)) with tf.gfile.Open(output_file, 'w') as f: for index in xrange(len(sorted_keys)): f.write(('%s\n' % translations[sorted_keys[index]]))
Translate lines in file, and save to output file if specified. Args: estimator: tf.Estimator used to generate the translations. subtokenizer: Subtokenizer object for encoding and decoding source and translated lines. input_file: file containing lines to translate output_file: file that stores the generated translations. print_all_translations: If true, all translations are printed to stdout. Raises: ValueError: if output file is invalid.
codesearchnet
def AddNEP5Token(self, token): if (token.ScriptHash.ToBytes() in self._tokens.keys()): logger.error('Token already in wallet') return self._tokens[token.ScriptHash.ToBytes()] = token
Add a NEP-5 compliant token to the wallet. Args: token (NEP5Token): an instance of type neo.Wallets.NEP5Token. Note: Prints a warning to the console if the token already exists in the wallet.
codesearchnet
def list_pop(list_, i, opts): assert isinstance(opts, ListPopOpts) if isinstance(list_, tensor_array_ops.TensorArray): raise ValueError('TensorArray does not support item removal') elif tensor_util.is_tf_type(list_): if list_.dtype == dtypes.variant: return _tf_tensor_list_pop(list_, i, opts) else: raise ValueError('tensor lists are expected to be Tensors with dtype=tf.variant, instead found %s' % list_) else: return _py_list_pop(list_, i)
The list pop function. Note: it is unspecified where list_ will be mutated or not. If list_ is a TensorFlow entity, it will not be typically mutated. If list_ is a plain list, it will be. In general, if the list is mutated then the return value should point to the original entity. Args: list_: An entity that supports pop semantics. i: Optional index to pop from. May be None. opts: A ListPopOpts. Returns: Tuple (x, out_list_): out_list_: same as list_, after the removal was performed. x: the removed element value. Raises: ValueError: if list_ is not of a known list-like type or the operation is not supported for that type.
github-repos
def device_type_from_string(cl_device_type_str): cl_device_type_str = cl_device_type_str.upper() if hasattr(cl.device_type, cl_device_type_str): return getattr(cl.device_type, cl_device_type_str) return None
Converts values like ``gpu`` to a pyopencl device type string. Supported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned. Args: cl_device_type_str (str): The string we want to convert to a device type. Returns: cl.device_type: the pyopencl device type.
juraj-google-style
def list_experiments(self, collection_name): exp = ExperimentResource(name='', collection_name=collection_name, coord_frame='foo') return self._list_resource(exp)
List all experiments that belong to a collection. Args: collection_name (string): Name of the parent collection. Returns: (list) Raises: requests.HTTPError on failure.
codesearchnet
def bofh_excuse(how_many=1): excuse_path = os.path.join(os.path.dirname(__file__), 'bofh_excuses.json') with open(excuse_path, 'r') as _f: excuse_dict = json.load(_f) return [generate_random_string(excuse_dict) for _ in range(int(how_many))]
Generate random BOFH themed technical excuses! Args: how_many: Number of excuses to generate. (Default: 1) Returns: A list of BOFH excuses.
codesearchnet
def get_rng(obj=None): seed = (((id(obj) + os.getpid()) + int(datetime.now().strftime('%Y%m%d%H%M%S%f'))) % 4294967295) if (_RNG_SEED is not None): seed = _RNG_SEED return np.random.RandomState(seed)
Get a good RNG seeded with time, pid and the object. Args: obj: some object to use to generate random seed. Returns: np.random.RandomState: the RNG.
codesearchnet
def Create(self, request, global_params=None): config = self.GetMethodConfig('Create') return self._RunMethod(config, request, global_params=global_params)
Create an association between a GCP project and a GitHub Enterprise server. Args: request: (CloudbuildProjectsGithubEnterpriseConfigsCreateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Operation) The response message.
github-repos
def FindClonedClients(token=None): index = client_index.CreateClientIndex(token=token) clients = index.LookupClients(['.']) hw_infos = _GetHWInfos(clients, token=token) clients_with_multiple_serials = [client_id for (client_id, serials) in iteritems(hw_infos) if (len(serials) > 1)] client_list = aff4.FACTORY.MultiOpen(clients_with_multiple_serials, age=aff4.ALL_TIMES, token=token) cloned_clients = [] for c in client_list: hwis = c.GetValuesForAttribute(c.Schema.HARDWARE_INFO) max_index = {} min_index = {} ids = set() for (i, hwi) in enumerate(hwis): s = hwi.serial_number max_index[s] = i if (s not in min_index): min_index[s] = i ids.add(s) ranges = [] for hwid in ids: ranges.append((min_index[hwid], max_index[hwid])) ranges.sort() for i in range((len(ranges) - 1)): if (ranges[i][1] > ranges[(i + 1)][0]): cloned_clients.append(c) msg = 'Found client with multiple, overlapping serial numbers: %s' logging.info(msg, c.urn) for hwi in c.GetValuesForAttribute(c.Schema.HARDWARE_INFO): logging.info('%s %s', hwi.age, hwi.serial_number) break return cloned_clients
A script to find multiple machines reporting the same client_id. This script looks at the hardware serial numbers that a client reported in over time (they get collected with each regular interrogate). We have seen that sometimes those serial numbers change - for example when a disk is put in a new machine - so reporting multiple serial numbers does not flag a client immediately as a cloned machine. In order to be shown here by this script, the serial number has to be alternating between two values. Args: token: datastore token. Returns: A list of clients that report alternating hardware ids.
codesearchnet
def build_from_token_counts(self, token_counts, min_count, num_iterations=4, reserved_tokens=None, max_subtoken_length=None): if (reserved_tokens is None): reserved_tokens = RESERVED_TOKENS else: for (default, proposed) in zip(RESERVED_TOKENS, reserved_tokens): if (default != proposed): raise ValueError('RESERVED_TOKENS must be a prefix of reserved_tokens.') alphabet_tokens = chain(six.iterkeys(token_counts), [native_to_unicode(t) for t in reserved_tokens]) self._init_alphabet_from_tokens(alphabet_tokens) self._init_subtokens_from_list(list(self._alphabet), reserved_tokens=reserved_tokens) if (min_count < 1): min_count = 1 for i in range(num_iterations): tf.logging.info('Iteration {0}'.format(i)) subtoken_counts = collections.defaultdict(int) for (token, count) in six.iteritems(token_counts): iter_start_time = time.time() escaped_token = _escape_token(token, self._alphabet) subtokens = self._escaped_token_to_subtoken_strings(escaped_token) start = 0 for subtoken in subtokens: last_position = (len(escaped_token) + 1) if (max_subtoken_length is not None): last_position = min(last_position, (start + max_subtoken_length)) for end in range((start + 1), last_position): new_subtoken = escaped_token[start:end] subtoken_counts[new_subtoken] += count start += len(subtoken) iter_time_secs = (time.time() - iter_start_time) if (iter_time_secs > 0.1): tf.logging.info(u'Processing token [{0}] took {1} seconds, consider setting Text2TextProblem.max_subtoken_length to a smaller value.'.format(token, iter_time_secs)) len_to_subtoken_strings = [] for (subtoken_string, count) in six.iteritems(subtoken_counts): lsub = len(subtoken_string) if (count >= min_count): while (len(len_to_subtoken_strings) <= lsub): len_to_subtoken_strings.append(set()) len_to_subtoken_strings[lsub].add(subtoken_string) new_subtoken_strings = [] for lsub in range((len(len_to_subtoken_strings) - 1), 0, (- 1)): subtoken_strings = len_to_subtoken_strings[lsub] for subtoken_string in subtoken_strings: count = subtoken_counts[subtoken_string] if (count >= min_count): if (subtoken_string not in self._alphabet): new_subtoken_strings.append((count, subtoken_string)) for l in range(1, lsub): subtoken_counts[subtoken_string[:l]] -= count new_subtoken_strings.extend(((subtoken_counts.get(a, 0), a) for a in self._alphabet)) new_subtoken_strings.sort(reverse=True) new_subtoken_strings = [subtoken for (_, subtoken) in new_subtoken_strings] if reserved_tokens: escaped_reserved_tokens = [_escape_token(native_to_unicode(t), self._alphabet) for t in reserved_tokens] new_subtoken_strings = (escaped_reserved_tokens + new_subtoken_strings) self._init_subtokens_from_list(new_subtoken_strings) tf.logging.info(('vocab_size = %d' % self.vocab_size))
Train a SubwordTextEncoder based on a dictionary of word counts. Args: token_counts: a dictionary of Unicode strings to int. min_count: an integer - discard subtokens with lower counts. num_iterations: an integer. how many iterations of refinement. reserved_tokens: List of reserved tokens. The global variable `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this argument is `None`, it will use `RESERVED_TOKENS`. max_subtoken_length: Maximum length of a subtoken. If this is not set, then the runtime and memory use of creating the vocab is quadratic in the length of the longest token. If this is set, then it is instead O(max_subtoken_length * length of longest token). Raises: ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it is not clear what the space is being reserved for, or when it will be filled in.
codesearchnet
def read_from_file(path, file_type='text', exception=ScriptWorkerException): FILE_TYPE_MAP = {'text': 'r', 'binary': 'rb'} if (file_type not in FILE_TYPE_MAP): raise exception('Unknown file_type {} not in {}!'.format(file_type, FILE_TYPE_MAP)) try: with open(path, FILE_TYPE_MAP[file_type]) as fh: return fh.read() except (OSError, FileNotFoundError) as exc: raise exception("Can't read_from_file {}: {}".format(path, str(exc)))
Read from ``path``. Small helper function to read from ``file``. Args: path (str): the path to read from. file_type (str, optional): the type of file. Currently accepts ``text`` or ``binary``. Defaults to ``text``. exception (Exception, optional): the exception to raise if unable to read from the file. Defaults to ``ScriptWorkerException``. Returns: None: if unable to read from ``path`` and ``exception`` is ``None`` str or bytes: the contents of ``path`` Raises: Exception: if ``exception`` is set.
codesearchnet
def write_compacted(g): d_nodes = {} d_edges = {} def conv(value): if isinstance(value, basestring): return value.strip('"') else: return value for node in g.nodes(): label = None attrs = [] for (k, v) in sorted(g.node_attributes(node)): v_ = conv(v) if (k == 'label'): label = v_ else: attrs.append((k, v_)) value = ((node, label) if label else node) d_nodes.setdefault(tuple(attrs), []).append(value) for edge in g.edges(): attrs = [(k, conv(v)) for (k, v) in sorted(g.edge_attributes(edge))] label = str(g.edge_label(edge)) value = (tuple((list(edge) + [label])) if label else edge) d_edges.setdefault(tuple(attrs), []).append(tuple(value)) doc = dict(nodes=d_nodes.items(), edges=d_edges.items()) contents = str(doc) return contents
Write a graph in our own compacted format. Returns: str.
codesearchnet
def validate_with_tags(self, tags, confidence): result = {'intent_type': self.name} intent_confidence = 0.0 local_tags = tags[:] used_tags = [] for (require_type, attribute_name) in self.requires: (required_tag, canonical_form, confidence) = find_first_tag(local_tags, require_type) if (not required_tag): result['confidence'] = 0.0 return (result, []) result[attribute_name] = canonical_form if (required_tag in local_tags): local_tags.remove(required_tag) used_tags.append(required_tag) intent_confidence += confidence if (len(self.at_least_one) > 0): best_resolution = resolve_one_of(tags, self.at_least_one) if (not best_resolution): result['confidence'] = 0.0 return (result, []) else: for key in best_resolution: result[key] = best_resolution[key][0].get('key') intent_confidence += 1.0 used_tags.append(best_resolution) if (best_resolution in local_tags): local_tags.remove(best_resolution) for (optional_type, attribute_name) in self.optional: (optional_tag, canonical_form, conf) = find_first_tag(local_tags, optional_type) if ((not optional_tag) or (attribute_name in result)): continue result[attribute_name] = canonical_form if (optional_tag in local_tags): local_tags.remove(optional_tag) used_tags.append(optional_tag) intent_confidence += 1.0 total_confidence = ((intent_confidence / len(tags)) * confidence) (target_client, canonical_form, confidence) = find_first_tag(local_tags, CLIENT_ENTITY_NAME) result['target'] = (target_client.get('key') if target_client else None) result['confidence'] = total_confidence return (result, used_tags)
Validate weather tags has required entites for this intent to fire Args: tags(list): Tags and Entities used for validation confidence(float): ? Returns: intent, tags: Returns intent and tags used by the intent on falure to meat required entities then returns intent with confidence of 0.0 and an empty list for tags.
codesearchnet
def _ParseFileData(self, knowledge_base, file_object): line_reader = line_reader_file.BinaryLineReader(file_object) try: reader = line_reader_file.BinaryDSVReader(line_reader, b':') except csv.Error as exception: raise errors.PreProcessFail( 'Unable to read: {0:s} with error: {1!s}'.format( self.ARTIFACT_DEFINITION_NAME, exception)) for row in reader: if len(row) < 7 or not row[0] or not row[2]: continue try: username = row[0].decode('utf-8') except UnicodeDecodeError: logger.error('Unable to decode username.') continue try: identifier = row[2].decode('utf-8') except UnicodeDecodeError: logger.error('Unable to decode identifier.') continue group_identifier = None if row[3]: try: group_identifier = row[3].decode('utf-8') except UnicodeDecodeError: logger.error('Unable to decode group identifier.') full_name = None if row[4]: try: full_name = row[4].decode('utf-8') except UnicodeDecodeError: logger.error('Unable to decode full name.') user_directory = None if row[5]: try: user_directory = row[5].decode('utf-8') except UnicodeDecodeError: logger.error('Unable to decode user directory.') shell = None if row[6]: try: shell = row[6].decode('utf-8') except UnicodeDecodeError: logger.error('Unable to decode shell.') user_account = artifacts.UserAccountArtifact( identifier=identifier, username=username) user_account.group_identifier = group_identifier user_account.full_name = full_name user_account.user_directory = user_directory user_account.shell = shell try: knowledge_base.AddUserAccount(user_account) except KeyError: pass
Parses file content (data) for user account preprocessing attributes. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
juraj-google-style
def expandvars(text, environ=None): if '$' not in text: return text i = 0 if environ is None: environ = os.environ while True: m = ENV_VAR_REGEX.search(text, i) if not m: break i, j = m.span(0) name = m.group(1) if name.startswith('{') and name.endswith('}'): name = name[1:-1] if name in environ: tail = text[j:] text = text[:i] + environ[name] i = len(text) text += tail else: i = j return text
Expand shell variables of form $var and ${var}. Unknown variables are left unchanged. Args: text (str): String to expand. environ (dict): Environ dict to use for expansions, defaults to os.environ. Returns: The expanded string.
juraj-google-style
def __init__(self, logger, script_type, default_shell=None): self.logger = logger self.script_type = script_type self.default_shell = default_shell or '/bin/bash'
Constructor. Args: logger: logger object, used to write to SysLog and serial port. script_type: string, the type of the script we are running. default_shell: string, the default shell to execute the script.
juraj-google-style
def addgroup(name, group): if six.PY2: name = _to_unicode(name) group = _to_unicode(group) name = _cmd_quote(name) group = _cmd_quote(group).lstrip('\'').rstrip('\'') user = info(name) if not user: return False if group in user['groups']: return True cmd = 'net localgroup "{0}" {1} /add'.format(group, name) ret = __salt__['cmd.run_all'](cmd, python_shell=True) return ret['retcode'] == 0
Add user to a group Args: name (str): The user name to add to the group group (str): The name of the group to which to add the user Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' user.addgroup jsnuffy 'Power Users'
juraj-google-style
def ListClients(self, request, timeout=None): return self._RetryLoop((lambda t: self._stub.ListClients(request, timeout=t)))
Provides basic information about Fleetspeak clients. Args: request: fleetspeak.admin.ListClientsRequest timeout: How many seconds to try for. Returns: fleetspeak.admin.ListClientsResponse
codesearchnet
def _CalculateHashesFileEntry( self, file_system, file_entry, parent_full_path, output_writer): full_path = file_system.JoinPath([parent_full_path, file_entry.name]) for data_stream in file_entry.data_streams: hash_value = self._CalculateHashDataStream(file_entry, data_stream.name) display_path = self._GetDisplayPath( file_entry.path_spec, full_path, data_stream.name) output_writer.WriteFileHash(display_path, hash_value or 'N/A') for sub_file_entry in file_entry.sub_file_entries: self._CalculateHashesFileEntry( file_system, sub_file_entry, full_path, output_writer)
Recursive calculates hashes starting with the file entry. Args: file_system (dfvfs.FileSystem): file system. file_entry (dfvfs.FileEntry): file entry. parent_full_path (str): full path of the parent file entry. output_writer (StdoutWriter): output writer.
juraj-google-style
def return_handler( self, call_node, function_nodes, saved_function_call_index, first_node ): if any(isinstance(node, YieldNode) for node in function_nodes): rhs_prefix = 'yld_' elif any(isinstance(node, ConnectToExitNode) for node in function_nodes): rhs_prefix = 'ret_' else: return LHS = CALL_IDENTIFIER + 'call_' + str(saved_function_call_index) RHS = rhs_prefix + get_call_names_as_string(call_node.func) return_node = RestoreNode( LHS + ' = ' + RHS, LHS, [RHS], line_number=call_node.lineno, path=self.filenames[-1] ) return_node.first_node = first_node self.nodes[-1].connect(return_node) self.nodes.append(return_node)
Handle the return from a function during a function call. Args: call_node(ast.Call) : The node that calls the definition. function_nodes(list[Node]): List of nodes of the function being called. saved_function_call_index(int): Unique number for each call. first_node(EntryOrExitNode or RestoreNode): Used to connect previous statements to this function.
juraj-google-style
def ExamineEvent(self, mediator, event): self._EnsureRequesterStarted() path_spec = event.pathspec event_identifiers = self._event_identifiers_by_pathspec[path_spec] event_identifier = event.GetIdentifier() event_identifiers.append(event_identifier) if event.data_type not in self.DATA_TYPES or not self._analyzer.lookup_hash: return lookup_hash = '{0:s}_hash'.format(self._analyzer.lookup_hash) lookup_hash = getattr(event, lookup_hash, None) if not lookup_hash: display_name = mediator.GetDisplayNameForPathSpec(path_spec) logger.warning(( 'Lookup hash attribute: {0:s}_hash missing from event that ' 'originated from: {1:s}.').format( self._analyzer.lookup_hash, display_name)) return path_specs = self._hash_pathspecs[lookup_hash] path_specs.append(path_spec) if len(path_specs) == 1: self.hash_queue.put(lookup_hash)
Evaluates whether an event contains the right data for a hash lookup. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event.
juraj-google-style
def _Fail(self, msg): raise TruthAssertionError(msg)
Fail unconditionally. Args: msg: string to include in the exception. Raises: TruthAssertionError: always, by design.
github-repos
def serialize_to_transport(self, encoding='utf-8', xslt_url=None): assert (encoding in ('utf-8', 'UTF-8')) dataone_exception_pyxb = self.get_pyxb() return d1_common.xml.serialize_for_transport(dataone_exception_pyxb, xslt_url=xslt_url)
Serialize to XML ``bytes`` with prolog. Args: encoding: str Encoding to use for XML doc bytes xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: bytes: XML holding a DataONEError based type.
codesearchnet
def close(self): if self.reuse: logger.debug("Ipcontroller not shutting down: reuse enabled") return if self.mode == "manual": logger.debug("Ipcontroller not shutting down: Manual mode") return try: pgid = os.getpgid(self.proc.pid) os.killpg(pgid, signal.SIGTERM) time.sleep(0.2) os.killpg(pgid, signal.SIGKILL) try: self.proc.wait(timeout=1) x = self.proc.returncode if x == 0: logger.debug("Controller exited with {0}".format(x)) else: logger.error("Controller exited with {0}. May require manual cleanup".format(x)) except subprocess.TimeoutExpired: logger.warn("Ipcontroller process:{0} cleanup failed. May require manual cleanup".format(self.proc.pid)) except Exception as e: logger.warn("Failed to kill the ipcontroller process[{0}]: {1}".format(self.proc.pid, e))
Terminate the controller process and its child processes. Args: - None
juraj-google-style
def call(self, inputs: List[Any], global_state: pg.geno.AttributeDict, step: int=0) -> List[Any]: raise NotImplementedError()
Subclasses should override this method. The `global_state` and `step` are optional for the subclasses' call signature. Args: inputs: A list of values as inputs. global_state: An `AttributeDict` object as the global state container, which is readable/writable during the operation. step: Number of examples historically proposed, which can be used for determining a cross over schedule. Returns: A list of values as output of current operation.
github-repos
def play_human(env): try: play(env, fps=env.metadata['video.frames_per_second']) except KeyboardInterrupt: pass env.close()
Play the environment using keyboard as a human. Args: env (gym.Env): the initialized gym environment to play Returns: None
juraj-google-style
def delete(filething): t = OggSpeex(filething) filething.fileobj.seek(0) t.delete(filething)
delete(filething) Arguments: filething (filething) Raises: mutagen.MutagenError Remove tags from a file.
juraj-google-style