code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def load_embeddings(lang='en', task='embeddings', type='cw', normalize=False): src_dir = ('_'.join((type, task)) if type else task) p = locate_resource(src_dir, lang) e = Embedding.load(p) if (type == 'cw'): e.apply_expansion(CaseExpander) e.apply_expansion(DigitExpander) if (type == 'sgns'): e.apply_expansion(CaseExpander) if (type == 'ue'): e.apply_expansion(CaseExpander) if normalize: e.normalize_words(inplace=True) return e
Return a word embeddings object for `lang` and of type `type` Args: lang (string): language code. task (string): parameters that define task. type (string): skipgram, cw, cbow ... noramlized (boolean): returns noramlized word embeddings vectors.
codesearchnet
def ValidateKey(cls, key_path): for prefix in cls.VALID_PREFIXES: if key_path.startswith(prefix): return if key_path.startswith('HKEY_CURRENT_USER\\'): raise errors.FormatError( 'HKEY_CURRENT_USER\\ is not supported instead use: ' 'HKEY_USERS\\%%users.sid%%\\') raise errors.FormatError( 'Unupported Registry key path: {0:s}'.format(key_path))
Validates this key against supported key names. Args: key_path (str): path of a Windows Registry key. Raises: FormatError: when key is not supported.
juraj-google-style
def _sign_input(cls, input_, message, key_pairs): if isinstance(input_.fulfillment, Ed25519Sha256): return cls._sign_simple_signature_fulfillment(input_, message, key_pairs) elif isinstance(input_.fulfillment, ThresholdSha256): return cls._sign_threshold_signature_fulfillment(input_, message, key_pairs) else: raise ValueError("Fulfillment couldn't be matched to " 'Cryptocondition fulfillment type.')
Signs a single Input. Note: This method works only for the following Cryptoconditions currently: - Ed25519Fulfillment - ThresholdSha256. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The Input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with.
juraj-google-style
def ExportNEP2(self, passphrase): if len(passphrase) < 2: raise ValueError("Passphrase must have a minimum of 2 characters") address_hash_tmp = hashlib.sha256(self.GetAddress().encode("utf-8")).digest() address_hash_tmp2 = hashlib.sha256(address_hash_tmp).digest() address_hash = address_hash_tmp2[:4] pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8') derived = scrypt.hash(pwd_normalized, address_hash, N=SCRYPT_ITERATIONS, r=SCRYPT_BLOCKSIZE, p=SCRYPT_PARALLEL_FACTOR, buflen=SCRYPT_KEY_LEN_BYTES) derived1 = derived[:32] derived2 = derived[32:] xor_ed = xor_bytes(bytes(self.PrivateKey), derived1) cipher = AES.new(derived2, AES.MODE_ECB) encrypted = cipher.encrypt(xor_ed) assembled = bytearray() assembled.extend(NEP_HEADER) assembled.extend(NEP_FLAG) assembled.extend(address_hash) assembled.extend(encrypted) encrypted_key_nep2 = base58.b58encode_check(bytes(assembled)) return encrypted_key_nep2.decode("utf-8")
Export the encrypted private key in NEP-2 format. Args: passphrase (str): The password to encrypt the private key with, as unicode string Returns: str: The NEP-2 encrypted private key
juraj-google-style
def decrypt(key, ciphertext, shift_function=shift_case_english): return [shift_function(key, symbol) for symbol in ciphertext]
Decrypt Shift enciphered ``ciphertext`` using ``key``. Examples: >>> ''.join(decrypt(3, "KHOOR")) HELLO >> decrypt(15, [0xcf, 0x9e, 0xaf, 0xe0], shift_bytes) [0xde, 0xad, 0xbe, 0xef] Args: key (int): The shift to use ciphertext (iterable): The symbols to decrypt shift_function (function (shift, symbol)): Shift function to apply to symbols in the ciphertext Returns: Decrypted ciphertext, list of plaintext symbols
codesearchnet
def _find_suite_class(): test_suites = _find_suite_classes_in_module(sys.modules['__main__']) if len(test_suites) == 0: logging.debug('No suite class found in the __main__ module, trying to find it in the module of the caller of suite_runner.run_suite_class method.') stacks = inspect.stack() if len(stacks) < 2: logging.debug('Failed to get the caller stack of run_suite_class. Got stacks: %s', stacks) else: run_suite_class_caller_frame_info = inspect.stack()[2] caller_frame = run_suite_class_caller_frame_info.frame module = inspect.getmodule(caller_frame) if module is None: logging.debug('Failed to find module for frame %s', caller_frame) else: test_suites = _find_suite_classes_in_module(module) if len(test_suites) != 1: logging.error('Expected 1 test class per file, found %s.', [t.__name__ for t in test_suites]) sys.exit(1) return test_suites[0]
Finds the test suite class. First search for test suite classes in the __main__ module. If no test suite class is found, search in the module that is calling `suite_runner.run_suite_class`. Walk through module members and find the subclass of BaseSuite. Only one subclass is allowed. Returns: The test suite class in the test module.
github-repos
def __init__(self, key_path_prefix, windows_path, unique_key_paths): super(WinRegistryFileMapping, self).__init__() self.key_path_prefix = key_path_prefix self.unique_key_paths = unique_key_paths self.windows_path = windows_path
Initializes the Windows Registry file mapping. Args: key_path_prefix (str): Windows Registry key path prefix. windows_path (str): Windows path to the Windows Registry file, such as: C:\\Windows\\System32\\config\\SYSTEM unique_key_paths (list[str]): key paths unique to the Windows Registry file.
juraj-google-style
def peek_record(self, model_class, record_id): if self._cache: return self._cache.get_record(model_class.__name__, record_id) else: return None
Return an instance of the model_class from the cache if it is present. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
codesearchnet
def run(self, dag): q_gate_list = ['cx', 'cy', 'cz', 'h', 'x', 'y', 'z'] cancellation_sets = defaultdict(lambda: []) for wire in dag.wires: wire_name = "{0}[{1}]".format(str(wire[0].name), str(wire[1])) wire_commutation_set = self.property_set['commutation_set'][wire_name] for com_set_idx, com_set in enumerate(wire_commutation_set): if com_set[0].type in ['in', 'out']: continue for node in com_set: num_qargs = len(node.qargs) if num_qargs == 1 and node.name in q_gate_list: cancellation_sets[(node.name, wire_name, com_set_idx)].append(node) if num_qargs == 1 and node.name in ['u1', 'rz', 't', 's']: cancellation_sets[('z_rotation', wire_name, com_set_idx)].append(node) elif num_qargs == 2 and node.qargs[0] == wire: second_op_name = "{0}[{1}]".format(str(node.qargs[1][0].name), str(node.qargs[1][1])) q2_key = (node.name, wire_name, second_op_name, self.property_set['commutation_set'][(node, second_op_name)]) cancellation_sets[q2_key].append(node) for cancel_set_key in cancellation_sets: set_len = len(cancellation_sets[cancel_set_key]) if ((set_len) > 1 and cancel_set_key[0] in q_gate_list): gates_to_cancel = cancellation_sets[cancel_set_key] for c_node in gates_to_cancel[:(set_len dag.remove_op_node(c_node) elif((set_len) > 1 and cancel_set_key[0] == 'z_rotation'): run = cancellation_sets[cancel_set_key] run_qarg = run[0].qargs[0] total_angle = 0.0 for current_node in run: if (current_node.condition is not None or len(current_node.qargs) != 1 or current_node.qargs[0] != run_qarg): raise TranspilerError("internal error") if current_node.name in ['u1', 'rz']: current_angle = float(current_node.op.params[0]) elif current_node.name == 't': current_angle = sympy.pi / 4 elif current_node.name == 's': current_angle = sympy.pi / 2 total_angle = current_angle + total_angle new_op = U1Gate(total_angle) new_qarg = (QuantumRegister(1, 'q'), 0) new_dag = DAGCircuit() new_dag.add_qreg(new_qarg[0]) new_dag.apply_operation_back(new_op, [new_qarg]) dag.substitute_node_with_dag(run[0], new_dag) for current_node in run[1:]: dag.remove_op_node(current_node) return dag
Run the CommutativeCancellation pass on a dag Args: dag (DAGCircuit): the DAG to be optimized. Returns: DAGCircuit: the optimized DAG. Raises: TranspilerError: when the 1 qubit rotation gates are not found
juraj-google-style
def __init__(self, variable): if not isinstance(variable, variables.Variable): raise ValueError('variable must be of type tf.ResourceVariable, but got: %s' % variable) if not variable.dtype.is_floating: raise ValueError('variable must be a floating point variable but has type: %s' % variable.dtype.name) self._variable = variable self._op = 'delegate'
Creates an AutoCastVariable instance. Args: variable: A floating-point resource variable to wrap. Raises: ValueError: If `variable` is not a floating-point resource variable
github-repos
def handle_closed_task(self, task_name, record): if (task_name not in self.tasks): return if self.main_failed: self.mark_parent_tasks_as_failed(self.cur_task) if self.tasks[task_name].failed: record.msg = ColorFormatter.colored('red', END_TASK_ON_ERROR_MSG) else: record.msg = ColorFormatter.colored('green', END_TASK_MSG) record.msg += (' (in %s)' % self.tasks[task_name].elapsed_time()) if (self.should_show_by_depth() or self.tasks[task_name].force_show): if self.tasks[task_name].force_show: self.handle_error() self.pretty_emit(record, is_header=True) self.close_children_tasks(task_name) self.tasks.pop(task_name)
Do everything needed when a task is closed Params: task_name (str): name of the task that is finishing record (logging.LogRecord): log record with all the info Returns: None
codesearchnet
def __init__(self, filename, flag): super(LMDBNoLockDatabase, self).__init__() create = bool(flag == 'c') if flag == 'n': if os.path.isfile(filename): os.remove(filename) create = True self._lmdb = lmdb.Environment( path=filename, map_size=1024**4, map_async=True, writemap=True, readahead=False, subdir=False, create=create, lock=True)
Constructor for the LMDBNoLockDatabase class. Args: filename (str): The filename of the database file. flag (str): a flag indicating the mode for opening the database. Refer to the documentation for anydbm.open().
juraj-google-style
def _countdown(self, waitTime=0, printString='Waiting %*d seconds...', verbose=True): if (waitTime <= 0): waitTime = self.__retryDelay for remaining in range(waitTime, 0, (- 1)): _vPrint(verbose, ('\r' + (printString % (len(str(waitTime)), remaining))), end='', flush=True) time.sleep(1) if verbose: _vPrint(verbose, ('\r' + (printString % (len(str(waitTime)), 0))))
Makes a pretty countdown. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' printString (Optional[str]): A counter message to display. Defaults to 'Waiting %*d seconds...' verbose (Optional[bool]): If False, all extra printouts will be suppressed. Defaults to True.
codesearchnet
def cancel(self, force=False): return self.rest_client._sc._delegator._cancel_job(self, force)
Cancel this job. Args: force (bool, optional): Forcefully cancel this job. Returns: bool: True if the job was cancelled, otherwise False if an error occurred.
juraj-google-style
def speak(self, message): campfire = self.get_campfire() if not isinstance(message, Message): message = Message(campfire, message) result = self._connection.post( "room/%s/speak" % self.id, {"message": message.get_data()}, parse_data=True, key="message" ) if result["success"]: return Message(campfire, result["data"]) return result["success"]
Post a message. Args: message (:class:`Message` or string): Message Returns: bool. Success
juraj-google-style
def attribute(self, stream, name): if stream not in self._inputs: raise ValueError("Stream is not an input of this operator.") if len(self._inputs) == 1: return Expression('attribute', name) else: iport = self._op().inputPorts[self._inputs.index(stream)] return Expression('attribute', iport._alias + '.' + name)
Expression for an input attribute. An input attribute is an attribute on one of the input ports of the operator invocation. `stream` must have been used to declare this invocation. Args: stream(Stream): Stream the attribute is from. name(str): Name of the attribute. Returns: Expression: Expression representing the input attribute.
juraj-google-style
def UpdateTaskAsPendingMerge(self, task): with self._lock: is_abandoned = task.identifier in self._tasks_abandoned is_processing = task.identifier in self._tasks_processing is_queued = task.identifier in self._tasks_queued if not is_queued and not is_processing and not is_abandoned: raise KeyError('Status of task {0:s} is unknown.'.format( task.identifier)) if is_abandoned and task.has_retry: raise KeyError('Will not merge a task {0:s} with retry task.'.format( task.identifier)) if is_queued: logger.debug('Task {0:s} was queued, now merging.'.format( task.identifier)) del self._tasks_queued[task.identifier] if is_processing: logger.debug('Task {0:s} was processing, now merging.'.format( task.identifier)) del self._tasks_processing[task.identifier] if is_abandoned: logger.debug('Task {0:s} was abandoned, now merging.'.format( task.identifier)) del self._tasks_abandoned[task.identifier] self._tasks_pending_merge.PushTask(task) self.SampleTaskStatus(task, 'pending_merge') task.UpdateProcessingTime() self._UpdateLatestProcessingTime(task)
Updates the task manager to reflect the task is ready to be merged. Args: task (Task): task. Raises: KeyError: if the task was not queued, processing or abandoned, or the task was abandoned and has a retry task.
juraj-google-style
def indices(self): return self._indices
The indices of non-zero values in the represented dense tensor. Returns: A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the number of non-zero values in the tensor, and `ndims` is the rank.
github-repos
def __init__(self, rate=None, burst_size=None, experimenter=None): super().__init__(MeterBandType.OFPMBT_EXPERIMENTER, rate, burst_size) self.experimenter = experimenter
Create a MeterBandExperimenter with the optional parameters below. Args: rate (int): Rate for remarking packets. burst_size (int): Size of bursts. experimenter (int): Experimenter ID which takes the same form as in :class:`.ExperimenterHeader`.
juraj-google-style
def process(self, element): return re.findall("[\\w\\']+", element, re.UNICODE)
Returns an iterator over the words of this element. The element is a line of text. If the line is blank, note that, too. Args: element: the element being processed Returns: The processed element.
github-repos
def sandbox_call(func: Callable[..., Any], *args, timeout: Optional[float]=None, **kwargs) -> Any: def _call(q, *args, **kwargs): def _run(): r = func(*args, **kwargs) try: return pickle.dumps(r) except BaseException as e: raise errors.SerializationError(f'Cannot serialize sandbox result: {r}', e) from e try: q.put(_run()) except Exception as e: q.put(e) q = multiprocessing.Queue() p = multiprocessing.Process(target=_call, args=tuple([q] + list(args)), kwargs=kwargs) try: p.start() x = q.get(timeout=timeout) except queue.Empty as e: if p.is_alive(): p.kill() raise TimeoutError(f'Execution time exceed {timeout} seconds.') from e finally: q.close() if isinstance(x, Exception): raise x try: return pickle.loads(x) except Exception as e: raise errors.SerializationError('Cannot deserialize the output from sandbox.', e) from e
Calls a function with sandboxing. Args: func: Function to call. *args: Positional arguments for `func` timeout: Execution timeout in seconds. If None, wait `func` to complete. **kwargs: Keyword arguments for `func`. Returns: Return value from `func`. Raises: TimeoutError: If the execution time exceeds the timeout. Exception: Exception raised from `func`.
github-repos
def delete(self, url, callback, json=None): return self.adapter.delete(url, callback, json=json)
Delete a URL. Args: url(string): URL for the request callback(func): The response callback function Keyword Args: json(dict): JSON body for the request Returns: The result of the callback handling the resopnse from the executed request
juraj-google-style
def __init__( self, cipher_mode=None, encryption_method=None, initialization_vector=None, key=None, parent=None, **kwargs): if not encryption_method or not parent: raise ValueError('Missing encryption method or parent value.') super(EncryptedStreamPathSpec, self).__init__(parent=parent, **kwargs) self.cipher_mode = cipher_mode self.encryption_method = encryption_method self.initialization_vector = initialization_vector self.key = key
Initializes a path specification. Note that the encrypted stream path specification must have a parent. Args: cipher_mode (Optional[str]): cipher mode. encryption_method (Optional[str]): method used to the encrypt the data. initialization_vector (Optional[bytes]): initialization vector. key (Optional[bytes]): key. parent (Optional[PathSpec]): parent path specification. Raises: ValueError: when encryption method or parent are not set.
juraj-google-style
def on_session_init(self, request): return framework.OnSessionInitResponse(framework.OnSessionInitAction.PROCEED)
Overrides on-session-init callback. Args: request: An instance of `OnSessionInitRequest`. Returns: An instance of `OnSessionInitResponse`.
github-repos
def on_moved(self, event): if not self._event_error: pathtools_options = { 'included_patterns': self.patterns, 'excluded_patterns': self.ignore_patterns, 'case_sensitive': self.case_sensitive, } if match_path(event.dest_path, **pathtools_options): self.logger.info(u"Change detected from a move on: %s", event.dest_path) self.compile_dependencies(event.dest_path)
Called when a file or a directory is moved or renamed. Many editors don't directly change a file, instead they make a transitional file like ``*.part`` then move it to the final filename. Args: event: Watchdog event, either ``watchdog.events.DirMovedEvent`` or ``watchdog.events.FileModifiedEvent``.
juraj-google-style
def has_auth_params(self, scheme): for k, v in iteritems(self.schemes[scheme][u'params']): if not v: return False return True
Check whether all information required for a given auth scheme have been supplied. Args: scheme (str): Name of the authentication scheme to check. One of Gem-Identify, Gem-Device, Gem-Application Returns: True if all required parameters for the specified scheme are present or False otherwise.
juraj-google-style
def dp010(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `dp010`'.format(value)) self._dp010 = value
Corresponds to IDD Field `dp010` Dew-point temperature corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `dp010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def hdfs_path(ctx, path): HADOOP_SCHEMES = ['adl: if any((path.startswith(scheme) for scheme in HADOOP_SCHEMES)): return path elif path.startswith('/'): return (ctx.defaultFS + path) elif (ctx.defaultFS.startswith('hdfs: return '{0}/user/{1}/{2}'.format(ctx.defaultFS, getpass.getuser(), path) elif ctx.defaultFS.startswith('file: return '{0}/{1}/{2}'.format(ctx.defaultFS, ctx.working_dir[1:], path) else: logging.warn('Unknown scheme {0} with relative path: {1}'.format(ctx.defaultFS, path)) return '{0}/{1}'.format(ctx.defaultFS, path)
Convenience function to create a Tensorflow-compatible absolute HDFS path from relative paths Args: :ctx: TFNodeContext containing the metadata specific to this node in the cluster. :path: path to convert Returns: An absolute path prefixed with the correct filesystem scheme.
codesearchnet
def get_structures(self, chemsys_formula_id, final=True): prop = ('final_structure' if final else 'initial_structure') data = self.get_data(chemsys_formula_id, prop=prop) return [d[prop] for d in data]
Get a list of Structures corresponding to a chemical system, formula, or materials_id. Args: chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O), or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234). final (bool): Whether to get the final structure, or the initial (pre-relaxation) structure. Defaults to True. Returns: List of Structure objects.
codesearchnet
def set_soft_device_placement(enabled): context().soft_device_placement = enabled
Set if soft device placements should be allowed. Args: enabled: Whether to enable soft device placement.
github-repos
def halt(self): res = int(self._dll.JLINKARM_Halt()) if res == 0: time.sleep(1) return True return False
Halts the CPU Core. Args: self (JLink): the ``JLink`` instance Returns: ``True`` if halted, ``False`` otherwise.
juraj-google-style
def write(self, originalPrefix, newPrefix=None): numSpaces = max(2, (25 - len(self.name))) if (self.value is None): line = ('%s\n' % self.name) elif (self.name == 'WMS'): line = ('%s %s\n' % (self.name, self.value)) elif (newPrefix is None): line = ('%s%s%s\n' % (self.name, (' ' * numSpaces), self.value)) elif (originalPrefix in self.value): line = ('%s%s%s\n' % (self.name, (' ' * numSpaces), self.value.replace(originalPrefix, newPrefix))) else: line = ('%s%s%s\n' % (self.name, (' ' * numSpaces), self.value)) return line
Write project card to string. Args: originalPrefix (str): Original name to give to files that follow the project naming convention (e.g: prefix.gag). newPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None. Returns: str: Card and value as they would be written to the project file.
codesearchnet
def diff_commonSuffix(self, text1, text2): if ((not text1) or (not text2) or (text1[(- 1)] != text2[(- 1)])): return 0 pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerend = 0 while (pointermin < pointermid): if (text1[(- pointermid):(len(text1) - pointerend)] == text2[(- pointermid):(len(text2) - pointerend)]): pointermin = pointermid pointerend = pointermin else: pointermax = pointermid pointermid = (((pointermax - pointermin) return pointermid
Determine the common suffix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the end of each string.
codesearchnet
def connection_required(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): "Wrapper function to check that the given ``JLink`` has been\n connected to a target.\n\n Args:\n self (JLink): the ``JLink`` instance\n args: list of arguments to pass to the wrapped function\n kwargs: key-word arguments dict to pass to the wrapped function\n\n Returns:\n The return value of the wrapped function.\n\n Raises:\n JLinkException: if the JLink's target is not connected.\n " if (not self.target_connected()): raise errors.JLinkException('Target is not connected.') return func(self, *args, **kwargs) return wrapper
Decorator to specify that a target connection is required in order for the given method to be used. Args: func (function): function being decorated Returns: The wrapper function.
codesearchnet
def add_it(workbench, file_list, labels): md5s = [] for filename in file_list: if (filename != '.DS_Store'): with open(filename, 'rb') as pe_file: base_name = os.path.basename(filename) md5 = workbench.store_sample(pe_file.read(), base_name, 'exe') workbench.add_node(md5, md5[:6], labels) md5s.append(md5) return md5s
Add the given file_list to workbench as samples, also add them as nodes. Args: workbench: Instance of Workbench Client. file_list: list of files. labels: labels for the nodes. Returns: A list of md5s.
codesearchnet
def camelcase(string): string = re.sub('^[\\-_\\.]', '', str(string)) if (not string): return string return (lowercase(string[0]) + re.sub('[\\-_\\.\\s]([a-z])', (lambda matched: uppercase(matched.group(1))), string[1:]))
Convert string into camel case. Args: string: String to convert. Returns: string: Camel case string.
codesearchnet
def _ParseStorageMediaOptions(self, options): self._ParseStorageMediaImageOptions(options) self._ParseVSSProcessingOptions(options) self._ParseCredentialOptions(options) self._ParseSourcePathOption(options)
Parses the storage media options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
juraj-google-style
def end_of(self, event_id, import_options=True): event_id = str(event_id) if event_id in DatePickerDictionary.items: linked_picker = DatePickerDictionary.items[event_id] self.config['linked_to'] = linked_picker.config['id'] if import_options: backup_moment_format = self.config['options']['format'] self.config['options'].update(linked_picker.config['options']) self.config['options'].update(self.options_param) if self.format_param or 'format' in self.options_param: self.config['options']['format'] = backup_moment_format else: self.format = linked_picker.format self.config['options']['useCurrent'] = False self._link_to(linked_picker) else: raise KeyError( 'start-date not specified for event_id "%s"' % event_id) return self
Set Date-Picker as the end-date of a date-range. Args: - event_id (string): User-defined unique id for linking two fields - import_options (bool): inherit options from start-date input, default: TRUE
juraj-google-style
def recipe_cm_user_editor(config, recipe_name): drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https:
A tool for rapidly bulk editing Campaign Manager profiles, roles, and sub accounts. Args: recipe_name (string) - Name of document to deploy to.
github-repos
def sync_firmware(self): serial_no = self.serial_number if self.firmware_newer(): try: self.invalidate_firmware() self.update_firmware() except errors.JLinkException as e: pass res = self.open(serial_no=serial_no) if self.firmware_newer(): raise errors.JLinkException('Failed to sync firmware version.') return res elif self.firmware_outdated(): try: self.update_firmware() except errors.JLinkException as e: pass if self.firmware_outdated(): raise errors.JLinkException('Failed to sync firmware version.') return self.open(serial_no=serial_no) return None
Syncs the emulator's firmware version and the DLL's firmware. This method is useful for ensuring that the firmware running on the J-Link matches the firmware supported by the DLL. Args: self (JLink): the ``JLink`` instance Returns: ``None``
codesearchnet
def _get_sliced_variables(var_list): unsliced_variables = [] sliced_variables = collections.defaultdict(lambda: []) for var in var_list: if var._save_slice_info: sliced_variables[var._save_slice_info.full_name].append(var) else: unsliced_variables.append(var) return unsliced_variables, sliced_variables
Separates the sliced (partitioned) and unsliced variables in var_list. Args: var_list: a list of variables. Returns: A list of unsliced variables in var_list, and a dict mapping names to parts for the sliced variables in var_list.
juraj-google-style
def list_apppools(): ret = dict() ps_cmd = [] ps_cmd.append("Get-ChildItem -Path 'IIS:\\AppPools' | Select-Object Name, State") ps_cmd.append(", @{ Name = 'Applications'; Expression = { $AppPool = $_.Name;") ps_cmd.append("$AppPath = 'machine/webroot/apphost';") ps_cmd.append("$FilterBase = '/system.applicationHost/sites/site/application';") ps_cmd.append('$FilterBase += "[@applicationPool = \'$($AppPool)\' and @path";') ps_cmd.append('$FilterRoot = "$($FilterBase) = \'/\']/parent::*";') ps_cmd.append('$FilterNonRoot = "$($FilterBase) != \'/\']";') ps_cmd.append('Get-WebConfigurationProperty -Filter $FilterRoot -PsPath $AppPath -Name Name') ps_cmd.append('| ForEach-Object { $_.Value };') ps_cmd.append('Get-WebConfigurationProperty -Filter $FilterNonRoot -PsPath $AppPath -Name Path') ps_cmd.append("| ForEach-Object { $_.Value } | Where-Object { $_ -ne '/' }") ps_cmd.append('} }') cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True) try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) except ValueError: raise CommandExecutionError('Unable to parse return data as Json.') for item in items: applications = list() if isinstance(item['Applications'], dict): if ('value' in item['Applications']): applications += item['Applications']['value'] else: applications.append(item['Applications']) ret[item['name']] = {'state': item['state'], 'applications': applications} if (not ret): log.warning('No application pools found in output: %s', cmd_ret['stdout']) return ret
List all configured IIS application pools. Returns: dict: A dictionary of IIS application pools and their details. CLI Example: .. code-block:: bash salt '*' win_iis.list_apppools
codesearchnet
def adb_call(args=None, shell=False, timeout=None, stderr=None) -> bytes: return self._exec_adb_cmd(name, args, shell=shell, timeout=timeout, stderr=stderr)
Wrapper for an ADB command. Args: args: string or list of strings, arguments to the adb command. See subprocess.Proc() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Proc() docs. timeout: float, the number of seconds to wait before timing out. If not specified, no timeout takes effect. stderr: a Byte stream, like io.BytesIO, stderr of the command will be written to this object if provided. Returns: The output of the adb command run if exit code is 0.
github-repos
def _WrapEndMarker(tree): if isinstance(tree, pytree.Leaf) and tree.type == token.ENDMARKER: return pytree.Node(pygram.python_symbols.file_input, [tree]) return tree
Wrap a single ENDMARKER token in a "file_input" node. Arguments: tree: (pytree.Node) The root node of the parsed tree. Returns: The root node of the parsed tree. If the tree is a single ENDMARKER node, then that node is wrapped in a "file_input" node. That will ensure we don't skip comments attached to that node.
github-repos
def __init__(self, allow_soft_placement=True, disable_detailed_stats=True, disable_timeline=True, devices=None): self._tf_cluster = None self._generate_timeline = not disable_timeline if devices is None: self._tf_cluster = tf_cluster.TF_NewCluster(allow_soft_placement, disable_detailed_stats) else: devices_serialized = [device.SerializeToString() for device in devices] self._tf_cluster = tf_cluster.TF_NewVirtualCluster(devices_serialized)
Creates a Cluster. Args: allow_soft_placement: If True, TF will automatically fix illegal placements instead of erroring out if the placement isn't legal. disable_detailed_stats: If True, detailed statistics will not be available. disable_timeline: If True, the timeline information will not be reported. devices: A list of devices of type device_properties_pb2.NamedDevice. If None, a device list will be created based on the spec of the local machine.
github-repos
def update_script_from_item(self, item): script, path_to_script, script_item = item.get_script() dictator = list(script_item.to_dict().values())[0] for instrument in list(script.instruments.keys()): script.instruments[instrument]['settings'] = dictator[instrument]['settings'] del dictator[instrument] for sub_script_name in list(script.scripts.keys()): sub_script_item = script_item.get_subscript(sub_script_name) self.update_script_from_item(sub_script_item) del dictator[sub_script_name] script.update(dictator) script.data_path = self.gui_settings['data_folder']
updates the script based on the information provided in item Args: script: script to be updated item: B26QTreeItem that contains the new settings of the script
juraj-google-style
def extract_lookups_from_string(value): lookups = set() for match in LOOKUP_REGEX.finditer(value): groupdict = match.groupdict() raw = match.groups()[0] lookup_type = groupdict['type'] lookup_input = groupdict['input'] lookups.add(Lookup(lookup_type, lookup_input, raw)) return lookups
Extract any lookups within a string. Args: value (str): string value we're extracting lookups from Returns: list: list of :class:`stacker.lookups.Lookup` if any
codesearchnet
def p45(msg): d = hex2bin(data(msg)) if (d[26] == '0'): return None p = bin2int(d[27:38]) return p
Average static pressure. Args: msg (String): 28 bytes hexadecimal message string Returns: int: static pressure in hPa
codesearchnet
def add_country_locations(self, countries, locations=None, use_live=True): allcountriesadded = True for country in countries: if (not self.add_country_location(country, locations=locations, use_live=use_live)): allcountriesadded = False return allcountriesadded
Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country names, converted to iso 3 codes. If any country is already added, it is ignored. Args: countries (List[str]): list of countries to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries added or False if any already present.
codesearchnet
def _set_bearer_user_vars_local(token, allowed_client_ids, scopes): result = urlfetch.fetch(('%s?%s' % (_TOKENINFO_URL, urllib.urlencode({'access_token': token})))) if (result.status_code != 200): try: error_description = json.loads(result.content)['error_description'] except (ValueError, KeyError): error_description = '' _logger.error('Token info endpoint returned status %s: %s', result.status_code, error_description) return token_info = json.loads(result.content) if ('email' not in token_info): _logger.warning("Oauth token doesn't include an email address.") return if (token_info.get('email_verified') != 'true'): _logger.warning("Oauth token email isn't verified.") return client_id = token_info.get('azp') if ((list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK) and (client_id not in allowed_client_ids)): _logger.warning('Client ID is not allowed: %s', client_id) return (_, sufficient_scopes) = _process_scopes(scopes) authorized_scopes = token_info.get('scope', '').split(' ') if (not _are_scopes_sufficient(authorized_scopes, sufficient_scopes)): _logger.warning("Oauth token scopes don't match any acceptable scopes.") return os.environ[_ENV_AUTH_EMAIL] = token_info['email'] os.environ[_ENV_AUTH_DOMAIN] = '' _logger.debug('Local dev returning user from token.')
Validate the oauth bearer token on the dev server. Since the functions in the oauth module return only example results in local development, this hits the tokeninfo endpoint and attempts to validate the token. If it's valid, we'll set _ENV_AUTH_EMAIL and _ENV_AUTH_DOMAIN so we can get the user from the token. Args: token: String with the oauth token to validate. allowed_client_ids: List of client IDs that are acceptable. scopes: List of acceptable scopes.
codesearchnet
def StartTiming(self, profile_name): if profile_name not in self._profile_measurements: self._profile_measurements[profile_name] = CPUTimeMeasurement() self._profile_measurements[profile_name].SampleStart()
Starts timing CPU time. Args: profile_name (str): name of the profile to sample.
juraj-google-style
def recipe_cm_campaign_audit(config, recipe_name): drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https:
A tool for rapidly bulk checking Campaign Manager campaigns Args: recipe_name (string) - Name of document to deploy to.
github-repos
def start_site(name): ps_cmd = ['Start-WebSite', "'{0}'".format(name)] cmd_ret = _srvmgr(ps_cmd) return (cmd_ret['retcode'] == 0)
Start a Web Site in IIS. .. versionadded:: 2017.7.0 Args: name (str): The name of the website to start. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.start_site name='My Test Site'
codesearchnet
def master_key_from_seed(seed): S = get_bytes(seed) I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest() Il, Ir = I[:32], I[32:] parse_Il = int.from_bytes(Il, 'big') if parse_Il == 0 or parse_Il >= bitcoin_curve.n: raise ValueError("Bad seed, resulting in invalid key!") return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0)
Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key.
juraj-google-style
def GetFeedMapping(client, feed, placeholder_type): feed_mapping_service = client.GetService('FeedMappingService', 'v201809') attribute_mappings = {} more_pages = True selector = { 'fields': ['FeedMappingId', 'AttributeFieldMappings'], 'predicates': [ { 'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']] }, { 'field': 'PlaceholderType', 'operator': 'EQUALS', 'values': [placeholder_type] } ], 'paging': { 'startIndex': 0, 'numberResults': PAGE_SIZE } } while more_pages: page = feed_mapping_service.get(selector) if 'entries' in page: for feed_mapping in page['entries']: for attribute_mapping in feed_mapping['attributeFieldMappings']: if attribute_mapping['feedAttributeId'] in attribute_mappings: attribute_mappings[attribute_mapping['feedAttributeId']].append( attribute_mapping['fieldId']) else: attribute_mappings[attribute_mapping['feedAttributeId']] = [ attribute_mapping['fieldId']] selector['paging']['startIndex'] += PAGE_SIZE more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries']) return attribute_mappings
Gets the Feed Mapping for a given Feed. Args: client: an AdWordsClient instance. feed: the Feed we are retrieving the Feed Mapping for. placeholder_type: the Placeholder Type we are looking for. Returns: A dictionary containing the Feed Mapping.
juraj-google-style
def cost(self, logits, target): logits = tf.reshape(logits, [self._num_steps * self._batch_size, -1]) target = tf.reshape(target, [self._num_steps * self._batch_size, -1]) xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target) loss = tf.reduce_sum(xent) return loss / self._batch_size
Returns cost. Args: logits: model output. target: target. Returns: Cross-entropy loss for a sequence of logits. The loss will be averaged across time steps if time_average_cost was enabled at construction time.
juraj-google-style
def transactional(func, args, kwds, **options): return transactional_async.wrapped_decorator( func, args, kwds, **options).get_result()
Decorator to make a function automatically run in a transaction. Args: **ctx_options: Transaction options (see transaction(), but propagation default to TransactionOptions.ALLOWED). This supports two forms: (1) Vanilla: @transactional def callback(arg): ... (2) With options: @transactional(retries=1) def callback(arg): ...
juraj-google-style
def read_nmr_efg(self): header_pattern = '^\\s+NMR quadrupolar parameters\\s+$\\n^\\s+Cq : quadrupolar parameter\\s+Cq=e[*]Q[*]V_zz/h$\\n^\\s+eta: asymmetry parameters\\s+\\(V_yy - V_xx\\)/ V_zz$\\n^\\s+Q : nuclear electric quadrupole moment in mb \\(millibarn\\)$\\n^-{50,}$\\n^\\s+ion\\s+Cq\\(MHz\\)\\s+eta\\s+Q \\(mb\\)\\s+$\\n^-{50,}\\s*$\\n' row_pattern = '\\d+\\s+(?P<cq>[-]?\\d+\\.\\d+)\\s+(?P<eta>[-]?\\d+\\.\\d+)\\s+(?P<nuclear_quadrupole_moment>[-]?\\d+\\.\\d+)' footer_pattern = '-{50,}\\s*$' self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float, last_one_only=True, attribute_name='efg')
Parse the NMR Electric Field Gradient interpretted values. Returns: Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR. Each dict key/value pair corresponds to a component of the tensors.
codesearchnet
def check_or(state, *tests): success = False first_feedback = None for test in iter_tests(tests): try: multi(state, test) success = True except TestFail as e: if (not first_feedback): first_feedback = e.feedback if success: return state state.report(first_feedback)
Test whether at least one SCT passes. If all of the tests fail, the feedback of the first test will be presented to the student. Args: state: State instance describing student and solution code, can be omitted if used with Ex() tests: one or more sub-SCTs to run :Example: The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. :: Ex().check_or( has_code('SELECT'), has_code('WHERE') ) The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. :: Ex().check_node('SelectStmt', 0).check_or( check_edge('where_clause'), check_edge('limit_clause') )
codesearchnet
def factory(attr_type, data): constructors = { MFT_ATTR_STANDARD_INFORMATION: MftAttrStandardInformation, MFT_ATTR_ATTRIBUTE_LIST: MftAttrAttributeList, MFT_ATTR_FILENAME: MftAttrFilename, MFT_ATTR_OBJECT_ID: MftAttrObjectId, MFT_ATTR_SECURITY_DESCRIPTOR: MftAttrSecurityDescriptor, MFT_ATTR_VOLUME_NAME: MftAttrVolumeName, MFT_ATTR_VOLUME_INFO: MftAttrVolumeInfo, MFT_ATTR_DATA: MftAttrData, MFT_ATTR_INDEX_ROOT: MftAttrIndexRoot, MFT_ATTR_INDEX_ALLOCATION: MftAttrIndexAllocation, MFT_ATTR_BITMAP: MftAttrBitmap, MFT_ATTR_REPARSE_POINT: MftAttrReparsePoint, MFT_ATTR_LOGGED_TOOLSTREAM: MftAttrLoggedToolstream, } if attr_type not in constructors: return None return constructors[attr_type](data)
Returns Initialized attribute object based on attr_type \ (eg. :class:`MftAttrStandardInformation`) Args: attr_type (uint): Attribute type number (eg. 0x10 - \ $STANDARD_INFORMATION) data (byte array): Data to initialize attribute object with.
juraj-google-style
def get_nested_streams(dmap): return list({s for dmap in get_nested_dmaps(dmap) for s in dmap.streams})
Recurses supplied DynamicMap to find all streams Args: dmap: DynamicMap to recurse to look for streams Returns: List of streams that were found
codesearchnet
def sites_at_edges(self): min_x = min([s.r[0] for s in self.sites]) max_x = max([s.r[0] for s in self.sites]) min_y = min([s.r[1] for s in self.sites]) max_y = max([s.r[1] for s in self.sites]) min_z = min([s.r[2] for s in self.sites]) max_z = max([s.r[2] for s in self.sites]) x_max = [s for s in self.sites if (s.r[0] == min_x)] x_min = [s for s in self.sites if (s.r[0] == max_x)] y_max = [s for s in self.sites if (s.r[1] == min_y)] y_min = [s for s in self.sites if (s.r[1] == max_y)] z_max = [s for s in self.sites if (s.r[2] == min_z)] z_min = [s for s in self.sites if (s.r[2] == max_z)] return (x_max, x_min, y_max, y_min, z_max, z_min)
Finds the six sites with the maximum and minimum coordinates along x, y, and z. Args: None Returns: (List(List)): In the order [ +x, -x, +y, -y, +z, -z ]
codesearchnet
def get_matching_files(filename): return get_matching_files_v2(filename)
Returns a list of files that match the given pattern(s). Args: filename: string or iterable of strings. The glob pattern(s). Returns: A list of strings containing filenames that match the given pattern(s). Raises: * errors.OpError: If there are filesystem / directory listing errors. * errors.NotFoundError: If pattern to be matched is an invalid directory.
github-repos
def handler_for_name(fq_name): resolved_name = for_name(fq_name) if isinstance(resolved_name, (type, types.ClassType)): return resolved_name() elif isinstance(resolved_name, types.MethodType): return getattr(resolved_name.im_class(), resolved_name.__name__) else: return resolved_name
Resolves and instantiates handler by fully qualified name. First resolves the name using for_name call. Then if it resolves to a class, instantiates a class, if it resolves to a method - instantiates the class and binds method to the instance. Args: fq_name: fully qualified name of something to find. Returns: handler instance which is ready to be called.
juraj-google-style
def _set_details(self, content): try: self.details = str(content) except UnicodeEncodeError: if sys.version_info < (3, 0): self.details = unicode(content) else: logging.error( 'Unable to decode "%s" in Py3, encoding in utf-8.', content) self.details = content.encode('utf-8')
Sets the `details` field. Args: content: the content to extract details from.
juraj-google-style
def get_mapping(self, superset, subset): if self._supercell: raise ValueError('cannot compute mapping to supercell') if self._primitive_cell: raise ValueError('cannot compute mapping with primitive cell option') if (len(subset) > len(superset)): raise ValueError('subset is larger than superset') (superset, subset, _, _) = self._preprocess(superset, subset, True) match = self._strict_match(superset, subset, 1, break_on_match=False) if ((match is None) or (match[0] > self.stol)): return None return match[4]
Calculate the mapping from superset to subset. Args: superset (Structure): Structure containing at least the sites in subset (within the structure matching tolerance) subset (Structure): Structure containing some of the sites in superset (within the structure matching tolerance) Returns: numpy array such that superset.sites[mapping] is within matching tolerance of subset.sites or None if no such mapping is possible
codesearchnet
def safe_group_name(group_name, group_max_length=100, ellipsis=True): ellipsis_value = '' if ellipsis: ellipsis_value = ' ...' if ((group_name is not None) and (len(group_name) > group_max_length)): group_name_array = group_name.split(' ') group_name = '' for word in group_name_array: word = u'{}'.format(word) if (((len(group_name) + len(word)) + len(ellipsis_value)) >= group_max_length): group_name = '{}{}'.format(group_name, ellipsis_value) group_name = group_name.lstrip(' ') break group_name += ' {}'.format(word) return group_name
Truncate group name to match limit breaking on space and optionally add an ellipsis. .. note:: Currently the ThreatConnect group name limit is 100 characters. Args: group_name (string): The raw group name to be truncated. group_max_length (int): The max length of the group name. ellipsis (boolean): If true the truncated name will have '...' appended. Returns: (string): The truncated group name with optional ellipsis.
codesearchnet
def format_h1(s, format="text", indents=0): _CHAR = "=" if format.startswith("text"): return format_underline(s, _CHAR, indents) elif format.startswith("markdown"): return [" elif format.startswith("rest"): return format_underline(s, _CHAR, 0)
Encloses string in format text Args: s: string format: string starting with "text", "markdown", or "rest" indents: number of leading intenting spaces Returns: list >>> print("\\n".join(format_h2("Header 1", indents=10))) Header 1 -------- >>> print("\\n".join(format_h2("Header 1", "markdown", 0))) ## Header 1
juraj-google-style
def Parse(self, parser_mediator): file_entry = parser_mediator.GetFileEntry() if not file_entry: raise errors.UnableToParseFile('Invalid file entry') parser_mediator.AppendToParserChain(self) try: self.ParseFileEntry(parser_mediator, file_entry) finally: parser_mediator.PopFromParserChain()
Parsers the file entry and extracts event objects. Args: parser_mediator (ParserMediator): a parser mediator. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def intent(self, user: str=None, token: Optional[str]=None) -> 'IntentAPI': if self.is_real_user: raise ValueError("Can't get child intent of real user") if token: return IntentAPI(user, self.real_user(user, token), self.bot_intent(), self.state_store, self.intent_log) return IntentAPI(user, self.user(user), self.bot_intent(), self.state_store, self.intent_log)
Get the intent API for a specific user. Args: user: The Matrix ID of the user whose intent API to get. Returns: The IntentAPI for the given user.
codesearchnet
def islink(self, path=None, header=None): if header is None: header = self._head(self.get_client_kwargs(path)) for key in ('x-oss-object-type', 'type'): try: return header.pop(key) == 'Symlink' except KeyError: continue return False
Returns True if object is a symbolic link. Args: path (str): File path or URL. header (dict): Object header. Returns: bool: True if object is Symlink.
juraj-google-style
def convert_lrelu(params, w_name, scope_name, inputs, layers, weights, names): print('Converting lrelu ...') if names == 'short': tf_name = 'lRELU' + random_string(3) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) leakyrelu = \ keras.layers.LeakyReLU(alpha=params['alpha'], name=tf_name) layers[scope_name] = leakyrelu(layers[inputs[0]])
Convert leaky relu layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def __init__(self, buckets): self.buckets = buckets
Creates a new Buckets. Args: buckets: A c pointer of TFE_MonitoringBuckets.
github-repos
def Parse(text, message): if not isinstance(text, six.text_type): text = text.decode('utf-8') try: if sys.version_info < (2, 7): js = json.loads(text) else: js = json.loads(text, object_pairs_hook=_DuplicateChecker) except ValueError as e: raise ParseError('Failed to load JSON: {0}.'.format(str(e))) _ConvertMessage(js, message) return message
Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol beffer message to merge into. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems.
juraj-google-style
def get_parent_dir_for_name(module_name): name_split = module_name.split('.') if not name_split: return None try: spec = importlib.util.find_spec(name_split[0]) except ValueError: return None if not spec or not spec.origin: return None base_path = os.path.dirname(spec.origin) return os.path.join(base_path, *name_split[1:-1])
Get parent directory for module with the given name. Args: module_name: Module name, e.g. tf_keras.api._v2.keras. Returns: Path to the parent directory if module is found and None otherwise. Given example above, it should return: /root_path/tf_keras/api/_v2.
github-repos
def require_fresh_games(self, number_fresh): latest = self.latest_game_number table_state = self.bt_table.row(TABLE_STATE) table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh)) table_state.commit() print("== Setting wait cell to ", int(latest + number_fresh), flush=True)
Require a given number of fresh games to be played. Args: number_fresh: integer, number of new fresh games needed Increments the cell `table_state=metadata:wait_for_game_number` by the given number of games. This will cause `self.wait_for_fresh_games()` to block until the game counter has reached this number.
juraj-google-style
def resolve_import(name, is_from, is_star): if name.startswith('.') or is_builtin(name): return None ret = _resolve_import(name) if ret is None and is_from and not is_star: package, _ = name.rsplit('.', 1) ret = _resolve_import(package) return ret
Use python to resolve an import. Args: name: The fully qualified module name. Returns: The path to the module source file or None.
juraj-google-style
def correct_dihedral(self, construction_table, use_lookup=None): if (use_lookup is None): use_lookup = settings['defaults']['use_lookup'] problem_index = self.check_dihedral(construction_table) bond_dict = self._give_val_sorted_bond_dict(use_lookup=use_lookup) c_table = construction_table.copy() for i in problem_index: loc_i = c_table.index.get_loc(i) (b, a, problem_d) = c_table.loc[(i, ['b', 'a', 'd'])] try: c_table.loc[(i, 'd')] = ((bond_dict[a] - {b, a, problem_d}) - set(c_table.index[loc_i:]))[0] except IndexError: visited = (set(c_table.index[loc_i:]) | {b, a, problem_d}) tmp_bond_dict = OrderedDict([(j, (bond_dict[j] - visited)) for j in bond_dict[problem_d]]) found = False while (tmp_bond_dict and (not found)): new_tmp_bond_dict = OrderedDict() for new_d in tmp_bond_dict: if (new_d in visited): continue angle = self.get_angle_degrees([b, a, new_d])[0] if (5 < angle < 175): found = True c_table.loc[(i, 'd')] = new_d else: visited.add(new_d) for j in tmp_bond_dict[new_d]: new_tmp_bond_dict[j] = (bond_dict[j] - visited) tmp_bond_dict = new_tmp_bond_dict if (not found): other_atoms = c_table.index[:loc_i].difference({b, a}) molecule = self.get_distance_to(origin=i, sort=True, other_atoms=other_atoms) k = 0 while ((not found) and (k < len(molecule))): new_d = molecule.index[k] angle = self.get_angle_degrees([b, a, new_d])[0] if (5 < angle < 175): found = True c_table.loc[(i, 'd')] = new_d k = (k + 1) if (not found): message = 'The atom with index {} has no possibility to get nonlinear reference atoms'.format raise UndefinedCoordinateSystem(message(i)) return c_table
Reindexe the dihedral defining atom if linear reference is used. Uses :meth:`~Cartesian.check_dihedral` to obtain the problematic indices. Args: construction_table (pd.DataFrame): use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: pd.DataFrame: Appropiately renamed construction table.
codesearchnet
def reduce_sum(tensors): return _apply_reduce('sum', tensors)
Returns a tensor with the reduce sum across `tensors`. The computation is done with a reduce operation, so only one tensor is returned. Args: tensors: The input tensors across which to sum; must be assigned to GPU devices. Returns: A tensor containing the sum of the input tensors. Raises: LookupError: If context is not currently using a GPU device.
github-repos
def __init__(self, code_page=None, time_zone=None): super(SystemConfigurationArtifact, self).__init__() self.code_page = code_page self.hostname = None self.keyboard_layout = None self.operating_system = None self.operating_system_product = None self.operating_system_version = None self.time_zone = time_zone self.user_accounts = []
Initializes a system configuration artifact. Args: code_page (Optional[str]): system code page. time_zone (Optional[str]): system time zone.
juraj-google-style
def roc_auc(logits, labels, weights_fn=None): del weights_fn with tf.variable_scope("roc_auc", values=[logits, labels]): predictions = tf.argmax(logits, axis=-1) _, auc = tf.metrics.auc(labels, predictions, curve="ROC") return auc, tf.constant(1.0)
Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights
juraj-google-style
def trainable_weights(self): if self.trainable: children_weights = self._gather_children_attribute('trainable_variables') return self._dedup_weights(self._trainable_weights + children_weights) else: return []
List of all trainable weights tracked by this layer. Trainable weights are updated via gradient descent during training. Returns: A list of trainable variables.
github-repos
def route(self, method, pattern): def decorator(callback): self._router.add(method, pattern, callback) return callback return decorator
Decorator to add route for a request with any HTTP method. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. pattern (str): Routing pattern the path must match. Returns: function: Decorator function to add route.
juraj-google-style
def append(self, node, dirty=True): self._children[node.id] = node node.parent = self if dirty: self.touch() return node
Add a new child node. Args: node (gkeepapi.Node): Node to add. dirty (bool): Whether this node should be marked dirty.
juraj-google-style
def device(self) -> str: return pywrap_tf_session.TF_OperationDevice(self._c_op)
The name of the device to which this op has been assigned, if any. Returns: The string name of the device to which this op has been assigned, or an empty string if it has not been assigned to a device.
github-repos
def sphere_selector_using_residues(self, radius, force_rerun=False): log.debug('{}: running sphere selector...'.format(self.id)) if not self.sphgen_path or not self.bindingsite_path: return ValueError('Please run sphgen and binding_site_mol2') selsph = op.join(self.dock_dir, '{}_selsph_binding.sph'.format(self.id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=selsph): cmd = "sphere_selector {} {} {}".format(self.sphgen_path, self.bindingsite_path, radius) rename = "mv selected_spheres.sph {}".format(selsph) os.system(cmd) os.system(rename) if ssbio.utils.is_non_zero_file(selsph): self.sphsel_path = selsph log.debug('{}: successful sphere selection'.format(self.sphsel_path)) else: log.critical('{}: sphere_selector_using_residues failed to run on sph file'.format(self.sphgen_path))
Select spheres based on binding site residues Args: radius (int, float): Radius around binding residues to dock to force_rerun (bool): If method should be rerun even if output file exists
juraj-google-style
def make_json_serializable(doc: Dict): for (k, v) in doc.items(): if isinstance(v, datetime.date): doc[k] = v.strftime('%Y-%m-%d') elif isinstance(v, datetime.datetime): doc[k] = v.isoformat()
Make the document JSON serializable. This is a poor man's implementation that handles dates and nothing else. This method modifies the given document in place. Args: doc: A Python Dictionary, typically a CDR object. Returns: None
codesearchnet
def set_session(self, headers=None): if headers is None: headers = { 'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3)' ' AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/48.0.2564.116 Safari/537.36') } elif not isinstance(headers, dict): raise TypeError('"headers" must be a dict object') self.session = Session(self.proxy_pool) self.session.headers.update(headers)
Init session with default or custom headers Args: headers: A dict of headers (default None, thus using the default header to init the session)
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def delete(self, *, auto_commit=False): try: db.session.delete(self.resource) if auto_commit: db.session.commit() except SQLAlchemyError: self.log.exception('Failed deleting resource: {}'.format(self.id)) db.session.rollback()
Removes a resource from the database Args: auto_commit (bool): Automatically commit the transaction. Default: `False` Returns: `None`
juraj-google-style
def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False): residual = hidden_states hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states cross_attn_weights = None hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(seq_len, batch, embed_dim)`. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings that are added to the queries and keys in the self-attention layer. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes. level_start_index (`torch.LongTensor`, *optional*): Level start index. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def update(self, data=None, timeout=-1, force=''): uri = self.data['uri'] resource = deepcopy(self.data) resource.update(data) if resource.get('serverHardwareUri') is None: resource.pop('enclosureBay', None) resource.pop('enclosureUri', None) self.data = self._helper.update(resource, uri, force, timeout) return self
Updates server profile template. Args: data: Data to update the resource. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. force: Force the update operation. Returns: A dict with the updated resource data.
juraj-google-style
def ConsumeString(self): the_bytes = self.ConsumeByteString() try: return six.text_type(the_bytes, 'utf-8') except UnicodeDecodeError as e: raise self._StringParseError(e)
Consumes a string value. Returns: The string parsed. Raises: ParseError: If a string value couldn't be consumed.
codesearchnet
def check_query(query): q = query.lower() if ('select ' not in q): raise InvalidQuery('SELECT word not found in the query: {0}'.format(query)) if (' from ' not in q): raise InvalidQuery('FROM word not found in the query: {0}'.format(query))
Check query sanity Args: query: query string Returns: None
codesearchnet
def start_test(self, idempotence_key=None, base_path='', **kwargs): if not idempotence_key: idempotence_key = uuid.uuid4().hex pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key) context = _PipelineContext('', 'default', base_path) future = PipelineFuture(self.output_names, force_strict=True) self._set_values_internal( context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING) context.start_test(self)
Starts this pipeline in test fashion. Args: idempotence_key: Dummy idempotence_key to use for this root pipeline. base_path: Dummy base URL path to use for this root pipeline. kwargs: Ignored keyword arguments usually passed to start().
juraj-google-style
def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) return -tf.math.log(aggregation_ops_total_mass) * aggregate_mask
Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (in case of answer supervision) per example.
github-repos
def process_exists(self, task_type, task_id): return self.get_process_exit_code(task_type, task_id) is None
Returns whether the subprocess still exists given the task type and id. Args: task_type: The task type. task_id: The task id. Returns: Boolean; whether the subprocess still exists. If the subprocess has exited, this returns False.
github-repos
def forward(self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if get_parameter_dtype(self) == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e+30 * p_mask return x
Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`): The final hidden states of the model. p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. Returns: `torch.FloatTensor`: The start logits for SQuAD.
github-repos
def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, generator, max_subtoken_length=None, reserved_tokens=None): if (data_dir and vocab_filename): vocab_filepath = os.path.join(data_dir, vocab_filename) if tf.gfile.Exists(vocab_filepath): tf.logging.info('Found vocab file: %s', vocab_filepath) return text_encoder.SubwordTextEncoder(vocab_filepath) else: vocab_filepath = None tf.logging.info('Generating vocab file: %s', vocab_filepath) vocab = text_encoder.SubwordTextEncoder.build_from_generator(generator, vocab_size, max_subtoken_length=max_subtoken_length, reserved_tokens=reserved_tokens) if vocab_filepath: tf.gfile.MakeDirs(data_dir) vocab.store_to_file(vocab_filepath) return vocab
Inner implementation for vocab generators. Args: data_dir: The base directory where data and vocab files are stored. If None, then do not save the vocab even if it doesn't exist. vocab_filename: relative filename where vocab file is stored vocab_size: target size of the vocabulary constructed by SubwordTextEncoder generator: a generator that produces tokens from the vocabulary max_subtoken_length: an optional integer. Set this to a finite value to avoid quadratic costs during vocab building. reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS` should be a prefix of `reserved_tokens`. If `None`, defaults to `RESERVED_TOKENS`. Returns: A SubwordTextEncoder vocabulary object.
codesearchnet
def from_esri_code(code): code = str(code) proj4 = utils.crscode_to_string("esri", code, "proj4") crs = from_proj4(proj4) return crs
Load crs object from esri code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The ESRI code as an integer. Returns: - A CS instance of the indicated type.
juraj-google-style