code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _write_module_descriptor_file(handle, module_dir): readme = _module_descriptor_file(module_dir) readme_content = ( "Module: %s\nDownload Time: %s\nDownloader Hostname: %s (PID:%d)" % (handle, str(datetime.datetime.today()), socket.gethostname(), os.getpid())) tf_utils.atomic_write_string_to_file(readme, readme_content, overwrite=True)
Writes a descriptor file about the directory containing a module. Args: handle: Module name/handle. module_dir: Directory where a module was downloaded.
juraj-google-style
def update(self, **kwargs): return self.client.api.update_container(self.id, **kwargs)
Update resource configuration of the containers. Args: blkio_weight (int): Block IO (relative weight), between 10 and 1000 cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota cpu_shares (int): CPU shares (relative weight) cpuset_cpus (str): CPUs in which to allow execution cpuset_mems (str): MEMs in which to allow execution mem_limit (int or str): Memory limit mem_reservation (int or str): Memory soft limit memswap_limit (int or str): Total memory (memory + swap), -1 to disable swap kernel_memory (int or str): Kernel memory limit restart_policy (dict): Restart policy dictionary Returns: (dict): Dictionary containing a ``Warnings`` key. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def register_intent_parser(self, intent_parser): if (hasattr(intent_parser, 'validate') and callable(intent_parser.validate)): self.intent_parsers.append(intent_parser) else: raise ValueError(('%s is not an intent parser' % str(intent_parser)))
"Enforce" the intent parser interface at registration time. Args: intent_parser(intent): Intent to be registered. Raises: ValueError: on invalid intent
codesearchnet
def __init__(self, connection_param, queue, output_exchange, output_key): super(PikaDaemon, self).__init__(queue) self.connection_param = connection_param self.queue = queue self.output_exchange = output_exchange self.content_type = "application/json" self.output_key = output_key
Pika and Daemon wrapper for handling AMQP connections. Args: connection_param (pika.ConnectionParameters): object setting the connection queue (str): name of queue where the daemon should listen output_exchange (str): name of exchange where the daemon should put responses output_key (str): routing key for output exchange
juraj-google-style
def all_elements_equal(value): if is_scalar(value): return True return np.array(value == value.flatten()[0]).all()
Checks if all elements in the given value are equal to each other. If the input is a single value the result is trivial. If not, we compare all the values to see if they are exactly the same. Args: value (ndarray or number): a numpy array or a single number. Returns: bool: true if all elements are equal to each other, false otherwise
juraj-google-style
def filter_by_hoys(self, hoys): _moys = tuple(int(hour * 60) for hour in hoys) return self.filter_by_moys(_moys)
Filter the Data Collection based on an analysis period. Args: hoys: A List of hours of the year 0..8759 Return: A new Data Collection with filtered data
juraj-google-style
def qubits_tab(backend): props = backend.properties().to_dict() header_html = "<div><font style='font-weight:bold'>{key}</font>: {value}</div>" header_html = header_html.format(key='last_update_date', value=props['last_update_date']) update_date_widget = widgets.HTML(value=header_html) qubit_html = "<table>" qubit_html += qubit_html += "<tr><th></th><th>Frequency</th><th>T1</th><th>T2</th>" qubit_html += "<th>U1 gate error</th><th>U2 gate error</th><th>U3 gate error</th>" qubit_html += "<th>Readout error</th></tr>" qubit_footer = "</table>" for qub in range(len(props['qubits'])): name = 'Q%s' % qub qubit_data = props['qubits'][qub] gate_data = props['gates'][3*qub:3*qub+3] t1_info = qubit_data[0] t2_info = qubit_data[1] freq_info = qubit_data[2] readout_info = qubit_data[3] freq = str(round(freq_info['value'], 5))+' '+freq_info['unit'] T1 = str(round(t1_info['value'], 5))+' ' + t1_info['unit'] T2 = str(round(t2_info['value'], 5))+' ' + t2_info['unit'] U1 = str(round(gate_data[0]['parameters'][0]['value'], 5)) U2 = str(round(gate_data[1]['parameters'][0]['value'], 5)) U3 = str(round(gate_data[2]['parameters'][0]['value'], 5)) readout_error = round(readout_info['value'], 5) qubit_html += "<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td>" qubit_html += "<td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" qubit_html = qubit_html % (name, freq, T1, T2, U1, U2, U3, readout_error) qubit_html += qubit_footer qubit_widget = widgets.HTML(value=qubit_html) out = widgets.VBox([update_date_widget, qubit_widget]) return out
The qubits properties widget Args: backend (IBMQbackend): The backend. Returns: VBox: A VBox widget.
juraj-google-style
def segment_max(data, segment_ids, num_segments=None, sorted=False): _segment_reduce_validation(data, segment_ids) if any_symbolic_tensors((data,)): return SegmentMax(num_segments, sorted).symbolic_call(data, segment_ids) return backend.math.segment_max(data, segment_ids, num_segments=num_segments, sorted=sorted)
Computes the max of segments in a tensor. Args: data: Input tensor. segment_ids: A N-D tensor containing segment indices for each element in `data`. data.shape[:len(segment_ids.shape)] should match. num_segments: An integer representing the total number of segments. If not specified, it is inferred from the maximum value in `segment_ids`. sorted: A boolean indicating whether `segment_ids` is sorted. Defaults to `False`. Returns: A tensor containing the max of segments, where each element represents the max of the corresponding segment in `data`. Example: >>> data = keras.ops.convert_to_tensor([1, 2, 10, 20, 100, 200]) >>> segment_ids = keras.ops.convert_to_tensor([0, 0, 1, 1, 2, 2]) >>> num_segments = 3 >>> keras.ops.segment_max(data, segment_ids, num_segments) array([2, 20, 200], dtype=int32)
github-repos
def __init__(self, url=None, extract_method=None, path=None): self.url = url self.path = path self._extract_method = extract_method
Resource constructor. Args: url: `str`, the URL at which to download the resource. extract_method: `ExtractMethod` to be used to extract resource. If not set, will be guessed from downloaded file name `original_fname`. path: `str`, path of resource on local disk. Can be None if resource has not be downloaded yet. In such case, `url` must be set.
juraj-google-style
def nb_ll(data, P, R): (genes, cells) = data.shape clusters = P.shape[1] lls = np.zeros((cells, clusters)) for c in range(clusters): P_c = P[(:, c)].reshape((genes, 1)) R_c = R[(:, c)].reshape((genes, 1)) ll = (gammaln((R_c + data)) - gammaln(R_c)) ll += ((data * np.log(P_c)) + xlog1py(R_c, (- P_c))) lls[(:, c)] = ll.sum(0) return lls
Returns the negative binomial log-likelihood of the data. Args: data (array): genes x cells P (array): NB success probability param - genes x clusters R (array): NB stopping param - genes x clusters Returns: cells x clusters array of log-likelihoods
codesearchnet
def _create_outbound_stream(self, config=None): if (config is None): raise ValueError('No stream config to create stream from.') name = self._get_stream_name(config) stream_handlers = self._get_stream_handlers(config, name) stream_input = config.get('input', None) stream_output = config.get('output', None) if (type(stream_output) is int): return PortOutputStream(name, stream_input, stream_output, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL}) else: if (stream_output is not None): log.warn('Output of stream {} is not an integer port. Stream outputs can only be ports.'.format(name)) return ZMQStream(name, stream_input, stream_handlers, zmq_args={'zmq_context': self.broker.context, 'zmq_proxy_xsub_url': self.broker.XSUB_URL, 'zmq_proxy_xpub_url': self.broker.XPUB_URL})
Creates an outbound stream from its config. Params: config: stream configuration as read by ait.config Returns: stream: a Stream Raises: ValueError: if any of the required config values are missing
codesearchnet
def translate_item_ids(self, item_ids, language, is_nested=None): if (is_nested is None): def is_nested_fun(x): return True elif isinstance(is_nested, bool): def is_nested_fun(x): return is_nested else: is_nested_fun = is_nested all_item_type_ids = ItemType.objects.get_all_item_type_ids() groupped = proso.list.group_by(item_ids, by=(lambda item_id: all_item_type_ids[item_id])) result = {} for (item_type_id, items) in groupped.items(): with timeit('translating item type {}'.format(item_type_id)): item_type = ItemType.objects.get_all_types()[item_type_id] model = ItemType.objects.get_model(item_type_id) kwargs = {'{}__in'.format(item_type['foreign_key']): items} if ('language' in item_type): kwargs[item_type['language']] = language if (any([(not is_nested_fun(item_id)) for item_id in items]) and hasattr(model.objects, 'prepare_related')): objs = model.objects.prepare_related() elif hasattr(model.objects, 'prepare'): objs = model.objects.prepare() else: objs = model.objects for obj in objs.filter(**kwargs): item_id = getattr(obj, item_type['foreign_key']) result[item_id] = obj.to_json(nested=is_nested_fun(item_id)) return result
Translate a list of item ids to JSON objects which reference them. Args: item_ids (list[int]): item ids language (str): language used for further filtering (some objects for different languages share the same item) is_nested (function): mapping from item ids to booleans, where the boolean value indicates whether the item is nested Returns: dict: item id -> JSON object
codesearchnet
def get_pkg_module_names(package_path): module_names = set() for fobj, modname, _ in pkgutil.iter_modules(path=[package_path]): filename = os.path.join(fobj.path, '%s.py' % modname) if os.path.exists(filename): module_names.add(os.path.abspath(filename)) return module_names
Returns module filenames from package. Args: package_path: Path to Python package. Returns: A set of module filenames.
juraj-google-style
def get_attributes(path): if (not os.path.exists(path)): raise CommandExecutionError('Path not found: {0}'.format(path)) attributes = {} intAttributes = win32file.GetFileAttributes(path) attributes['archive'] = ((intAttributes & 32) == 32) attributes['reparsePoint'] = ((intAttributes & 1024) == 1024) attributes['compressed'] = ((intAttributes & 2048) == 2048) attributes['directory'] = ((intAttributes & 16) == 16) attributes['encrypted'] = ((intAttributes & 16384) == 16384) attributes['hidden'] = ((intAttributes & 2) == 2) attributes['normal'] = ((intAttributes & 128) == 128) attributes['notIndexed'] = ((intAttributes & 8192) == 8192) attributes['offline'] = ((intAttributes & 4096) == 4096) attributes['readonly'] = ((intAttributes & 1) == 1) attributes['system'] = ((intAttributes & 4) == 4) attributes['temporary'] = ((intAttributes & 256) == 256) attributes['mountedVolume'] = False if ((attributes['reparsePoint'] is True) and (attributes['directory'] is True)): fileIterator = win32file.FindFilesIterator(path) findDataTuple = next(fileIterator) if (findDataTuple[6] == 2684354563): attributes['mountedVolume'] = True attributes['symbolicLink'] = False if (attributes['reparsePoint'] is True): fileIterator = win32file.FindFilesIterator(path) findDataTuple = next(fileIterator) if (findDataTuple[6] == 2684354572): attributes['symbolicLink'] = True return attributes
Return a dictionary object with the Windows file attributes for a file. Args: path (str): The path to the file or directory Returns: dict: A dictionary of file attributes CLI Example: .. code-block:: bash salt '*' file.get_attributes c:\\temp\\a.txt
codesearchnet
def Matches(self, file_entry, search_depth): if self._location_segments is None: location_match = None else: location_match = self._CheckLocation(file_entry, search_depth) if not location_match: return False, location_match if search_depth != self._number_of_location_segments: return False, location_match match = self._CheckFileEntryType(file_entry) if match is not None and not match: return False, location_match match = self._CheckIsAllocated(file_entry) if match is not None and not match: return False, location_match return True, location_match
Determines if the file entry matches the find specification. Args: file_entry (FileEntry): file entry. search_depth (int): number of location path segments to compare. Returns: tuple: contains: bool: True if the file entry matches the find specification, False otherwise. bool: True if the location matches, False if not or None if no location specified.
juraj-google-style
def __init__(self, profile_datum): self.total_op_time = profile_datum.op_time self.total_exec_time = profile_datum.exec_time device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name) self._node_to_exec_count = {device_and_node: 1}
Constructor. Args: profile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to initialize this object with.
github-repos
def load_gene(ensembl, gene_id, de_novos=[]): transcripts = minimise_transcripts(ensembl, gene_id, de_novos) genes = [] for transcript_id in transcripts: gene = construct_gene_object(ensembl, transcript_id) genes.append(gene) if len(genes) == 0: raise IndexError("{0}: no suitable transcripts".format(gene_id)) return genes
sort out all the necessary sequences and positions for a gene Args: ensembl: EnsemblRequest object to request data from ensembl gene_id: HGNC symbol for gene de_novos: list of de novo positions, so we can check they all fit in the gene transcript Returns: list of Transcript objects for gene, including genomic ranges and sequences
juraj-google-style
def mkdir(path, mode=511, dir_fd=None): system = get_instance(path) relative = system.relpath(path) parent_dir = dirname(relative.rstrip('/')) if parent_dir: parent = ((path.rsplit(relative, 1)[0] + parent_dir) + '/') if (not system.isdir(parent)): raise ObjectNotFoundError(("No such file or directory: '%s'" % parent)) if system.isdir(system.ensure_dir_path(path)): raise ObjectExistsError(("File exists: '%s'" % path)) system.make_dir(relative, relative=True)
Create a directory named path with numeric mode mode. Equivalent to "os.mkdir". Args: path (path-like object): Path or URL. mode (int): The mode parameter is passed to os.mkdir(); see the os.mkdir() description for how it is interpreted. Not supported on cloud storage objects. dir_fd: directory descriptors; see the os.remove() description for how it is interpreted. Not supported on cloud storage objects. Raises: FileExistsError : Directory already exists. FileNotFoundError: Parent directory not exists.
codesearchnet
def remove_backup(name): if (name not in list_backups()): log.debug('Backup already removed: %s', name) return True ps_cmd = ['Remove-WebConfigurationBackup', '-Name', "'{0}'".format(name)] cmd_ret = _srvmgr(ps_cmd) if (cmd_ret['retcode'] != 0): msg = 'Unable to remove web configuration: {0}\nError: {1}'.format(name, cmd_ret['stderr']) raise CommandExecutionError(msg) return (name not in list_backups())
Remove an IIS Configuration backup from the System. .. versionadded:: 2017.7.0 Args: name (str): The name of the backup to remove Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_backup backup_20170209
codesearchnet
def get_password(request, mapping) -> None: LOGGER.debug('Received request "%s"', request) if 'host' not in request: LOGGER.error('host= entry missing in request. ' 'Cannot query without a host') return host = request['host'] if 'path' in request: host = '/'.join([host, request['path']]) def skip(line, skip): return line[skip:] LOGGER.debug('Iterating mapping to match against host "%s"', host) for section in mapping.sections(): if fnmatch.fnmatch(host, section): LOGGER.debug('Section "%s" matches requested host "%s"', section, host) pass_target = mapping.get(section, 'target').replace( "${host}", request['host']) password_extractor = SpecificLineExtractor( 0, 0, option_suffix='_password') password_extractor.configure(mapping[section]) username_extractor = _username_extractors[mapping[section].get( 'username_extractor', fallback=_line_extractor_name)] username_extractor.configure(mapping[section]) LOGGER.debug('Requesting entry "%s" from pass', pass_target) output = subprocess.check_output( ['pass', 'show', pass_target]).decode('utf-8') lines = output.splitlines() password = password_extractor.get_value(pass_target, lines) username = username_extractor.get_value(pass_target, lines) if password: print('password={password}'.format( password=password)) if 'username' not in request and username: print('username={username}'.format( username=username)) return LOGGER.warning('No mapping matched') sys.exit(1)
Resolve the given credential request in the provided mapping definition. The result is printed automatically. Args: request: The credential request specified as a dict of key-value pairs. mapping: The mapping configuration as a ConfigParser instance.
juraj-google-style
def _build_system_message(self, error: str) -> LogMessage: return self._base_log.copy() | LogMessage(log_type=LogType.SYSTEM.value, error=error)
Adds system error information to base log message. Args: * error: error that occurred Returns: * Log: dictionary containing log data
github-repos
def find_runner(program): if (os.path.isfile(program) and (not os.access(program, os.X_OK))): try: opened = open(program) except PermissionError: return None first_line = opened.readline().strip() if first_line.startswith(' return shlex.split(first_line[2:]) if program.endswith('.py'): return [sys.executable] return None
Return a command that will run program. Args: program: The string name of the program to try to run. Returns: commandline list of strings to run the program (eg. with subprocess.call()) or None
codesearchnet
def purity(labels, true_labels): purity = 0.0 for i in set(labels): indices = (labels == i) true_clusters = true_labels[indices] if (len(true_clusters) == 0): continue counts = Counter(true_clusters) (lab, count) = counts.most_common()[0] purity += count return (float(purity) / len(labels))
Calculates the purity score for the given labels. Args: labels (array): 1D array of integers true_labels (array): 1D array of integers - true labels Returns: purity score - a float bewteen 0 and 1. Closer to 1 is better.
codesearchnet
def cut_sphere( self, radius=15., origin=None, outside_sliced=True, preserve_bonds=False): if origin is None: origin = np.zeros(3) elif pd.api.types.is_list_like(origin): origin = np.array(origin, dtype='f8') else: origin = self.loc[origin, ['x', 'y', 'z']] molecule = self.get_distance_to(origin) if outside_sliced: molecule = molecule[molecule['distance'] < radius] else: molecule = molecule[molecule['distance'] > radius] if preserve_bonds: molecule = self._preserve_bonds(molecule) return molecule
Cut a sphere specified by origin and radius. Args: radius (float): origin (list): Please note that you can also pass an integer. In this case it is interpreted as the index of the atom which is taken as origin. outside_sliced (bool): Atoms outside/inside the sphere are cut out. preserve_bonds (bool): Do not cut covalent bonds. Returns: Cartesian:
juraj-google-style
def to(self, jid: str): if ((jid is not None) and (not isinstance(jid, str))): raise TypeError("'to' MUST be a string") self._to = (aioxmpp.JID.fromstr(jid) if (jid is not None) else None)
Set jid of the receiver. Args: jid (str): the jid of the receiver.
codesearchnet
def _setup_logger(self, logging_level: int, log_to_console: bool): self.logger = logging.getLogger('discord') self.logger.handlers = [] self.logger.setLevel(logging_level) formatter = logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%S') file_handler = logging.FileHandler('pycord.log') file_handler.setFormatter(formatter) file_handler.setLevel(logging_level) self.logger.addHandler(file_handler) if log_to_console: stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) stream_handler.setLevel(logging_level) self.logger.addHandler(stream_handler)
Sets up the internal logger Args: logging_level: what logging level to use log_to_console: whether or not to log to the console
juraj-google-style
def _frame_advance(self, action): self.controllers[0][:] = action _LIB.Step(self._env)
Advance a frame in the emulator with an action. Args: action (byte): the action to press on the joy-pad Returns: None
codesearchnet
def _scale_tensor(tensor, range_min, range_max, scale_min, scale_max): if range_min == range_max: return tensor float_tensor = tf.to_float(tensor) scaled_tensor = tf.divide((tf.subtract(float_tensor, range_min) * tf.constant(float(scale_max - scale_min))), tf.constant(float(range_max - range_min))) shifted_tensor = scaled_tensor + tf.constant(float(scale_min)) return shifted_tensor
Scale a tensor to scale_min to scale_max. Args: tensor: input tensor. Should be a numerical tensor. range_min: min expected value for this feature/tensor. range_max: max expected Value. scale_min: new expected min value. scale_max: new expected max value. Returns: scaled tensor.
juraj-google-style
def __init__(self, names=None): if not names: raise errors.FormatError('Missing names value.') super(ArtifactGroupSourceType, self).__init__() self.names = names
Initializes a source type. Args: names (Optional[str]): artifact definition names. Raises: FormatError: when artifact names is not set.
juraj-google-style
def __call__(self, match_quality_matrix): assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) default_match_labels = match_quality_matrix.new_full((match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8) return (default_matches, default_match_labels) assert torch.all(match_quality_matrix >= 0) matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return (matches, match_labels)
Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in `set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates whether a prediction is a true or false positive or ignored
github-repos
def case_mme_delete(self, case_obj, user_obj): institute_obj = self.institute(case_obj['owner']) for individual in case_obj['individuals']: if individual['phenotype'] == 2: self.create_event(institute=institute_obj, case=case_obj, user=user_obj, link='', category='case', verb='mme_remove', subject=individual['display_name'], level='specific') case_obj['mme_submission'] = None updated_case = self.update_case(case_obj) return updated_case
Delete a MatchMaker submission from a case record and creates the related event. Args: case_obj(dict): a scout case object user_obj(dict): a scout user object Returns: updated_case(dict): the updated scout case
juraj-google-style
def _GetMaps(name, commands, default_options): global_options = copy.copy(default_options) options_map = collections.defaultdict(lambda: copy.copy(default_options)) subcommands_map = collections.defaultdict(set) for command in commands: if len(command) == 1: if _IsOption(command[0]): global_options.add(command[0]) else: subcommands_map[name].add(command[0]) elif command: subcommand = command[-2] arg = _FormatForCommand(command[-1]) if _IsOption(arg): args_map = options_map else: args_map = subcommands_map args_map[subcommand].add(arg) args_map[subcommand.replace('_', '-')].add(arg) return (global_options, options_map, subcommands_map)
Returns sets of subcommands and options for each command. Args: name: The first token in the commands, also the name of the command. commands: A list of all possible commands that tab completion can complete to. Each command is a list or tuple of the string tokens that make up that command. default_options: A dict of options that can be used with any command. Use this if there are flags that can always be appended to a command. Returns: global_options: A set of all options of the first token of the command. subcommands_map: A dict storing set of subcommands for each command/subcommand. options_map: A dict storing set of options for each subcommand.
github-repos
def get_content_type(content_type): m = email.message.Message() m['Content-Type'] = content_type return m.get_content_type()
Extract the MIME type value from a content type string. Removes any subtype and parameter values that may be present in the string. Args: content_type: str String with content type and optional subtype and parameter fields. Returns: str: String with only content type Example: :: Input: multipart/form-data; boundary=aBoundaryString Returns: multipart/form-data
codesearchnet
def from_data(cls, data): obj = cls() with contextlib.closing(BytesIO(data)) as file_handle: obj.load_file(file_handle) return obj
Load an FCS file from a bytes-like object. Args: data: buffer containing contents of an FCS file. Returns: FCSParser instance with data loaded
juraj-google-style
def update_one_time_key_counts(self, counts): self.one_time_keys_manager.server_counts = counts if self.one_time_keys_manager.should_upload(): logger.info('Uploading new one-time keys.') self.upload_one_time_keys()
Update data on one-time keys count and upload new ones if necessary. Args: counts (dict): Counts of keys currently on the HS for each key type.
codesearchnet
def __init__(self, batch_size: int, ngram_len: int, context_history_size: int, device: torch.device): self.context = torch.zeros((batch_size, ngram_len - 1), dtype=torch.int64, device=device) self.context_history = torch.zeros((batch_size, context_history_size), dtype=torch.int64, device=device) self.num_calls = 0
Initializes the state. Args: batch_size (`int`): Batch size. ngram_len (`int`): Ngram length. context_history_size (`int`): Size of the tensor to keep track of seen contexts. device (`int`): Device to use.
github-repos
def plot_correlation(self, freq=None, title=None, figsize=(12, 6), **kwargs): if title is None: title = self._get_default_plot_title( freq, 'Return Correlation Matrix') rets = self._get_series(freq).to_returns().dropna() return rets.plot_corr_heatmap(title=title, figsize=figsize, **kwargs)
Utility function to plot correlations. Args: * freq (str): Pandas data frequency alias string * title (str): Plot title * figsize (tuple (x,y)): figure size * kwargs: passed to Pandas' plot_corr_heatmap function
juraj-google-style
def _greedy_infer(self, features, decode_length, use_tpu=False): if use_tpu: return self._slow_greedy_infer_tpu(features, decode_length) return self._slow_greedy_infer(features, decode_length)
A greedy inference method. Models should ideally implement a more efficient version of this function. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. use_tpu: A bool, whether to build the inference graph for TPU. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if beam_size == 1 or [batch_size, top_beams, <= decode_length] "scores": None "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. "losses": a dictionary: {loss-name (string): floating point `Scalar`} }
codesearchnet
def test_moment_matching(samples, number_moments, dist, stride=0): sample_moments = [] expected_moments = [] variance_sample_moments = [] for i in range(1, number_moments + 1): if len(samples.shape) == 2: strided_range = samples.flat[::(i - 1) * stride + 1] else: strided_range = samples[::(i - 1) * stride + 1, ...] sample_moments.append(np.mean(strided_range ** i, axis=0)) expected_moments.append(dist.moment(i)) variance_sample_moments.append((dist.moment(2 * i) - dist.moment(i) ** 2) / len(strided_range)) z_test_scores = [] for i in range(1, number_moments + 1): total_variance = variance_sample_moments[i - 1] + i * np.finfo(samples.dtype).eps tiny = np.finfo(samples.dtype).tiny assert np.all(total_variance > 0) total_variance = np.where(total_variance < tiny, tiny, total_variance) z_test_scores.append(abs((sample_moments[i - 1] - expected_moments[i - 1]) / np.sqrt(total_variance))) return z_test_scores
Return z-test scores for sample moments to match analytic moments. Given `samples`, check that the first sample `number_moments` match the given `dist` moments by doing a z-test. Args: samples: Samples from target distribution. number_moments: Python `int` describing how many sample moments to check. dist: SciPy distribution object that provides analytic moments. stride: Distance between samples to check for statistical properties. A stride of 0 means to use all samples, while other strides test for spatial correlation. Returns: Array of z_test scores.
github-repos
def ListAttrs(cls): precondition.AssertType(cls, type) if PY2: return [item.decode('ascii') for item in dir(cls)] else: return dir(cls)
A compatibility wrapper for listing class attributes. This method solves similar Python 2 compatibility issues for `dir` function as `GetName` does for `__name__` invocations. See documentation for `GetName` for more details. Once support for Python 2 is dropped all invocations of this function should be replaced with ordinary `dir` calls. Args: cls: A class object to list the attributes for. Returns: A list of attribute names as unicode strings.
codesearchnet
def parse_statement(self, statement, orig_contents): children = [] is_block = False name = statement.getName() if (name == 'block'): children_statements = statement[1] for child in children_statements: parsed = self.parse_statement(child, orig_contents=orig_contents) children.append(parsed) locn = statement[0]['location'] statement = statement[0][1] name = statement.getName() is_block = True else: stmt_language = get_statement() locn = statement['location'] statement = statement['match'] statement_string = str(u''.join(statement.asList())) try: statement = stmt_language.parseString(statement_string)[0] except (pyparsing.ParseException, pyparsing.ParseSyntaxException) as exc: raise SensorGraphSyntaxError('Error parsing statement in sensor graph file', message=exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), column=pyparsing.col(locn, orig_contents)) except SensorGraphSemanticError as exc: raise SensorGraphSemanticError(exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), **exc.params) name = statement.getName() if (name not in statement_map): raise ArgumentError('Unknown statement in sensor graph file', parsed_statement=statement, name=name) line = pyparsing.line(locn, orig_contents).strip() line_number = pyparsing.lineno(locn, orig_contents) column = pyparsing.col(locn, orig_contents) location_info = LocationInfo(line, line_number, column) if is_block: return statement_map[name](statement, children=children, location=location_info) return statement_map[name](statement, location_info)
Parse a statement, possibly called recursively. Args: statement (int, ParseResult): The pyparsing parse result that contains one statement prepended with the match location orig_contents (str): The original contents of the file that we're parsing in case we need to convert an index into a line, column pair. Returns: SensorGraphStatement: The parsed statement.
codesearchnet
def bit_flip(p: Optional[float]=None) -> Union[(common_gates.XPowGate, BitFlipChannel)]: if (p is None): return pauli_gates.X return _bit_flip(p)
r""" Construct a BitFlipChannel that flips a qubit state with probability of a flip given by p. If p is None, return a guaranteed flip in the form of an X operation. This channel evolves a density matrix via $$ \rho \rightarrow M_0 \rho M_0^\dagger + M_1 \rho M_1^\dagger $$ With $$ \begin{aligned} M_0 =& \sqrt{p} \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \\ M_1 =& \sqrt{1-p} \begin{bmatrix} 0 & 1 \\ 1 & -0 \end{bmatrix} \end{aligned} $$ Args: p: the probability of a bit flip. Raises: ValueError: if p is not a valid probability.
codesearchnet
def _parse_graph(self): if self.exists: self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers) else: self.rdf.graph = rdflib.Graph() self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph) for (ns_prefix, ns_uri) in self.rdf.prefixes.__dict__.items(): self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False) for (ns_prefix, ns_uri) in self.rdf.graph.namespaces(): setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri)) setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix) self.rdf._orig_graph = copy.deepcopy(self.rdf.graph) self.parse_object_like_triples()
use Content-Type from headers to determine parsing method Args: None Return: None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist
codesearchnet
def parse_panel_app_gene(app_gene, hgnc_map): gene_info = {} confidence_level = app_gene['LevelOfConfidence'] if not confidence_level == 'HighEvidence': return gene_info hgnc_symbol = app_gene['GeneSymbol'] hgnc_ids = get_correct_ids(hgnc_symbol, hgnc_map) if not hgnc_ids: LOG.warning("Gene %s does not exist in database. Skipping gene...", hgnc_symbol) return gene_info if len(hgnc_ids) > 1: LOG.warning("Gene %s has unclear identifier. Choose random id", hgnc_symbol) gene_info['hgnc_symbol'] = hgnc_symbol for hgnc_id in hgnc_ids: gene_info['hgnc_id'] = hgnc_id gene_info['reduced_penetrance'] = INCOMPLETE_PENETRANCE_MAP.get(app_gene['Penetrance']) inheritance_models = [] for model in MODELS_MAP.get(app_gene['ModeOfInheritance'],[]): inheritance_models.append(model) gene_info['inheritance_models'] = inheritance_models return gene_info
Parse a panel app formated gene Args: app_gene(dict): Dict with panel app info hgnc_map(dict): Map from hgnc_symbol to hgnc_id Returns: gene_info(dict): Scout infromation
juraj-google-style
def index_normalize(index_val): index_val = index_val.lower().strip() index_val = re.sub('^\\W*', '', index_val) index_val = re.sub('\\W*$', '', index_val) index_val = re.sub('\\W+', '_', index_val) index_val = re.sub('_+', '_', index_val) return index_val
Normalize dictionary calculated key When parsing, keys within a dictionary may come from the input text. To ensure there is no space or other special caracters, one should use this function. This is useful because DictExt dictionaries can be access with a dotted notation that only supports ``A-Za-z0-9_`` chars. Args: index_val (str): The candidate string to a dictionary key. Returns: str: A normalized string with only ``A-Za-z0-9_`` chars Examples: >>> index_normalize('this my key') 'this_my_key' >>> index_normalize('this -my- %key%') 'this_my_key'
codesearchnet
def decode_csv(csv_string, column_names): import csv r = next(csv.reader([csv_string])) if (len(r) != len(column_names)): raise ValueError(('csv line %s does not have %d columns' % (csv_string, len(column_names)))) return {k: v for (k, v) in zip(column_names, r)}
Parse a csv line into a dict. Args: csv_string: a csv string. May contain missing values "a,,c" column_names: list of column names Returns: Dict of {column_name, value_from_csv}. If there are missing values, value_from_csv will be ''.
codesearchnet
def assignee(self, main_type, sub_type, unique_id, assignee_id, action='ADD', params=None): params = params or {} url = '/v2/{}/{}/{}/assignees/{}'.format(main_type, sub_type, unique_id, assignee_id) if action == 'GET': return self.tcex.session.get(url, params=params) if action == 'DELETE': return self.tcex.session.delete(url) if action == 'ADD': return self.tcex.session.post(url) return None
Args: main_type: sub_type: unique_id: assignee_id: action: params: Return:
juraj-google-style
def read_from_text(path: str): return beam_io.ReadFromText(path) | beam.Map(lambda s: beam.Row(line=s))
Reads lines from a text files. The resulting PCollection consists of rows with a single string field named "line." Args: path (str): The file path to read from. The path can contain glob characters such as ``*`` and ``?``.
github-repos
def _get_query_params(self): result = {} if (self.next_page_token is not None): result[self._PAGE_TOKEN] = self.next_page_token if (self.max_results is not None): result[self._MAX_RESULTS] = (self.max_results - self.num_results) result.update(self.extra_params) return result
Getter for query parameters for the next request. Returns: dict: A dictionary of query parameters.
codesearchnet
def _PathStripPrefix(self, path): if path.startswith('\\\\.\\') or path.startswith('\\\\?\\'): if len(path) < 7 or path[5] != ':' or path[6] != self._PATH_SEPARATOR: return None path = path[7:] elif path.startswith('\\\\'): return None elif len(path) >= 3 and path[1] == ':': if path[2] != self._PATH_SEPARATOR: return None path = path[3:] elif path.startswith('\\'): path = path[1:] else: return None return path
Strips the prefix from a path. Args: path (str): Windows path to strip the prefix from. Returns: str: path without the prefix or None if the path is not supported.
juraj-google-style
def from_directory(input_dir, optional_files=None): sub_d = {} for (fname, ftype) in [('INCAR', Incar), ('KPOINTS', Kpoints), ('POSCAR', Poscar), ('POTCAR', Potcar)]: fullzpath = zpath(os.path.join(input_dir, fname)) sub_d[fname.lower()] = ftype.from_file(fullzpath) sub_d['optional_files'] = {} if (optional_files is not None): for (fname, ftype) in optional_files.items(): sub_d['optional_files'][fname] = ftype.from_file(os.path.join(input_dir, fname)) return VaspInput(**sub_d)
Read in a set of VASP input from a directory. Note that only the standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless optional_filenames is specified. Args: input_dir (str): Directory to read VASP input from. optional_files (dict): Optional files to read in as well as a dict of {filename: Object type}. Object type must have a static method from_file.
codesearchnet
def convert(self): if not self._has_valid_tensors(): if not self._input_arrays_with_shape or not (self._output_arrays or self._control_output_arrays): raise ValueError('If input_tensors and output_tensors are None, both input_arrays_with_shape and output_arrays|control_output_arrays must be defined.') return super(TFLiteFrozenGraphConverter, self).convert()
Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format, either a TFLite Flatbuffer or a Graphviz graph depending on value in `output_format`. Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor.
github-repos
def add_tensor_summary(x, types, name=None, collections=None, main_tower_only=True): types = set(types) if (name is None): name = x.op.name ctx = get_current_tower_context() if (main_tower_only and (ctx is not None) and (not ctx.is_main_training_tower)): return SUMMARY_TYPES_DIC = {'scalar': (lambda : tf.summary.scalar((name + '-summary'), x, collections=collections)), 'histogram': (lambda : tf.summary.histogram((name + '-histogram'), x, collections=collections)), 'sparsity': (lambda : tf.summary.scalar((name + '-sparsity'), tf.nn.zero_fraction(x), collections=collections)), 'mean': (lambda : tf.summary.scalar((name + '-mean'), tf.reduce_mean(x), collections=collections)), 'rms': (lambda : tf.summary.scalar((name + '-rms'), rms(x), collections=collections))} for typ in types: SUMMARY_TYPES_DIC[typ]()
Summarize a tensor by different methods. Args: x (tf.Tensor): a tensor to summarize types (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms name (str): summary name. Defaults to be the op name. collections (list[str]): collections of the summary ops. main_tower_only (bool): Only run under main training tower. If set to True, calling this function under other TowerContext has no effect. Example: .. code-block:: python with tf.name_scope('mysummaries'): # to not mess up tensorboard add_tensor_summary( tensor, ['histogram', 'rms', 'sparsity'], name='mytensor')
codesearchnet
def GetEventTaggingRules(self): tagging_rules = {} label_name = None with io.open(self._path, 'r', encoding='utf-8') as tagging_file: for line in tagging_file.readlines(): line = line.rstrip() stripped_line = line.lstrip() if ((not stripped_line) or (stripped_line[0] == ' continue if (not line[0].isspace()): label_name = line tagging_rules[label_name] = [] continue if (not label_name): continue filter_object = event_filter.EventObjectFilter() try: filter_object.CompileFilter(stripped_line) except errors.ParseError as exception: raise errors.TaggingFileError('Unable to compile filter for label: {0:s} with error: {1!s}'.format(label_name, exception)) if (filter_object not in tagging_rules[label_name]): tagging_rules[label_name].append(filter_object) return tagging_rules
Retrieves the event tagging rules from the tagging file. Returns: dict[str, FilterObject]: tagging rules, that consists of one or more filter objects per label. Raises: TaggingFileError: if a filter expression cannot be compiled.
codesearchnet
def parse_query_param(url, param): try: return parse.parse_qs(parse.urlparse(url).query)[param][0] except: return None
Parses the query string of a URL and returns the value of a parameter. Args: url: A URL. param: A string representing the name of the parameter. Returns: The value of the parameter.
juraj-google-style
def augpath(path, augsuf='', augext='', augpref='', augdir=None, newext=None, newfname=None, ensure=False, prefix=None, suffix=None): if (prefix is not None): augpref = prefix if (suffix is not None): augsuf = suffix (dpath, fname) = split(path) (fname_noext, ext) = splitext(fname) if (newfname is not None): fname_noext = newfname if (newext is None): newext = ext new_fname = ''.join((augpref, fname_noext, augsuf, newext, augext)) if (augdir is not None): new_dpath = join(dpath, augdir) if ensure: ensuredir(new_dpath) else: new_dpath = dpath newpath = join(new_dpath, new_fname) return newpath
augments end of path before the extension. augpath Args: path (str): augsuf (str): augment filename before extension Returns: str: newpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug' >>> newpath = augpath(path, augsuf) >>> result = str(newpath) >>> print(result) somefile_aug.txt Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug2' >>> newext = '.bak' >>> augdir = 'backup' >>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir) >>> result = str(newpath) >>> print(result) backup/somefile_aug2.bak
codesearchnet
def add_option(self, section, name, value): if self._is_live(): raise RuntimeError('Submitted units cannot update their options') option = { 'section': section, 'name': name, 'value': value } self._data['options'].append(option) return True
Add an option to a section of the unit file Args: section (str): The name of the section, If it doesn't exist it will be created name (str): The name of the option to add value (str): The value of the option Returns: True: The item was added
juraj-google-style
def get_metadata(self, entity_type, entity_id): if (not is_valid_uuid(entity_id)): raise StorageArgumentException('Invalid UUID for entity_id: {0}'.format(entity_id)) return self._authenticated_request.to_endpoint('{}/{}/metadata/'.format(entity_type, entity_id)).return_body().get()
Get metadata of an entity. Args: entity_type (str): Type of the entity. Admitted values: ['project', 'folder', 'file']. entity_id (str): The UUID of the entity to be modified. Returns: A dictionary of the metadata:: { u'bar': u'200', u'foo': u'100' } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
codesearchnet
def infer_annotation(type_comments): assert type_comments args = {} returns = set() for comment in type_comments: (arg_types, return_type) = parse_type_comment(comment) for (i, arg_type) in enumerate(arg_types): args.setdefault(i, set()).add(arg_type) returns.add(return_type) combined_args = [] for i in sorted(args): arg_infos = list(args[i]) kind = argument_kind(arg_infos) if (kind is None): raise InferError(('Ambiguous argument kinds:\n' + '\n'.join(type_comments))) types = [arg.type for arg in arg_infos] combined = combine_types(types) if (str(combined) == 'None'): combined = UnionType([ClassType('None'), AnyType()]) if ((kind != ARG_POS) and ((len(str(combined)) > 120) or isinstance(combined, UnionType))): combined = AnyType() combined_args.append(Argument(combined, kind)) combined_return = combine_types(returns) return (combined_args, combined_return)
Given some type comments, return a single inferred signature. Args: type_comments: Strings of form '(arg1, ... argN) -> ret' Returns: Tuple of (argument types and kinds, return type).
codesearchnet
def sagemaker_auth(overrides={}, path="."): api_key = overrides.get(env.API_KEY, Api().api_key) if api_key is None: raise ValueError( "Can't find W&B ApiKey, set the WANDB_API_KEY env variable or run `wandb login`") overrides[env.API_KEY] = api_key with open(os.path.join(path, "secrets.env"), "w") as file: for k, v in six.iteritems(overrides): file.write("{}={}\n".format(k, v))
Write a secrets.env file with the W&B ApiKey and any additional secrets passed. Args: overrides (dict, optional): Additional environment variables to write to secrets.env path (str, optional): The path to write the secrets file.
juraj-google-style
def set_expiration(self, key, ignore_missing=False, additional_seconds=None, seconds=None): if ((key not in self.time_dict) and ignore_missing): return elif ((key not in self.time_dict) and (not ignore_missing)): raise Exception('Key missing from `TimedDict` and `ignore_missing` is False.') if (additional_seconds is not None): self.time_dict[key] += additional_seconds elif (seconds is not None): self.time_dict[key] = (time.time() + seconds)
Alters the expiration time for a key. If the key is not present, then raise an Exception unless `ignore_missing` is set to `True`. Args: key: The key whose expiration we are changing. ignore_missing (bool): If set, then return silently if the key does not exist. Default is `False`. additional_seonds (int): Add this many seconds to the current expiration time. seconds (int): Expire the key this many seconds from now.
codesearchnet
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = tf.cast(input_ids != padding_idx, tf.int64) incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + padding_idx
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: tf.Tensor x: Returns: tf.Tensor
github-repos
def _get_head_block(self, request): if request.head_id: if self._id_regex.fullmatch(request.head_id) is None: LOGGER.debug('Invalid head id requested: %s', request.head_id) raise _ResponseFailed(self._status.NO_ROOT) try: return self._block_store[request.head_id] except KeyError as e: LOGGER.debug('Unable to find block "%s" in store', e) raise _ResponseFailed(self._status.NO_ROOT) else: return self._get_chain_head()
Fetches the request specified head block, or the chain head. Note: This method will fail if `_block_store` has not been set Args: request (object): The parsed protobuf request object Returns: Block: the block object at the head of the requested chain Raises: ResponseFailed: Failed to retrieve a head block
juraj-google-style
def atomic_observe(self, states, actions, internals, reward, terminal): self.current_terminal = terminal self.current_reward = reward if self.unique_state: states = dict(state=states) if self.unique_action: actions = dict(action=actions) self.episode = self.model.atomic_observe(states=states, actions=actions, internals=internals, terminal=self.current_terminal, reward=self.current_reward)
Utility method for unbuffered observing where each tuple is inserted into TensorFlow via a single session call, thus avoiding race conditions in multi-threaded mode. Observe full experience tuplefrom the environment to learn from. Optionally pre-processes rewards Child classes should call super to get the processed reward EX: terminal, reward = super()... Args: states (any): One state (usually a value tuple) or dict of states if multiple states are expected. actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected. internals (any): Internal list. terminal (bool): boolean indicating if the episode terminated after the observation. reward (float): scalar reward that resulted from executing the action.
codesearchnet
def _process_debug_graph_node(self, node): if is_debug_node(node.name): return if node.name in self._node_inputs: raise ValueError("Duplicate node name on device %s: '%s'" % (self._device_name, node.name)) self._node_attributes[node.name] = node.attr self._node_inputs[node.name] = [] self._node_ctrl_inputs[node.name] = [] self._node_recipients[node.name] = [] self._node_ctrl_recipients[node.name] = [] if node.name not in self._node_devices: self._node_devices[node.name] = set() self._node_devices[node.name].add(node.device if node.device else self._device_name) self._node_op_types[node.name] = node.op self._ref_args[node.name] = self._get_ref_args(node) for inp in node.input: if is_copy_node(inp) and (node.op == '_Send' or node.op == '_Retval'): self._copy_send_nodes.append(node.name) if inp.startswith('^'): cinp = inp[1:] self._node_ctrl_inputs[node.name].append(cinp) else: self._node_inputs[node.name].append(inp)
Process a node from the debug GraphDef. Args: node: (NodeDef) A partition-graph node to be processed. Raises: ValueError: If duplicate node names are encountered.
github-repos
def get(self, id): request_url = self._client.base_api_url + self.detail_url.format(id=id) response = self._client.session.get(request_url) self.validate_request_success( response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK, ) return self.response_data_to_model_instance(response.json())
Get the model instance with a given id. Args: id (int or str): The primary identifier (e.g., pk or UUID) for the task instance to get. Returns: :class:`saltant.models.resource.Model`: A :class:`saltant.models.resource.Model` subclass instance representing the resource requested.
juraj-google-style
def merge_transformers_sharded_states(path, num_checkpoints): state_dict = {} for i in range(1, num_checkpoints + 1): checkpoint_path = os.path.join(path, f'pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin') check_torch_load_is_safe() current_chunk = torch.load(checkpoint_path, map_location='cpu', weights_only=True) state_dict.update(current_chunk) return state_dict
Merge sharded checkpoints from transformers into a single checkpoint. Args: path (str): the path to the sharded checkpoints num_checkpoints (int): the number of checkpoints to merge
github-repos
def get(self, key, default=None): index = self._xxx_field_to_index.get(key) if (index is None): return default return self._xxx_values[index]
Return a value for key, with a default value if it does not exist. Args: key (str): The key of the column to access default (object): The default value to use if the key does not exist. (Defaults to :data:`None`.) Returns: object: The value associated with the provided key, or a default value. Examples: When the key exists, the value associated with it is returned. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x') 'a' The default value is :data:`None` when the key does not exist. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z') None The default value can be overrided with the ``default`` parameter. >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '') '' >>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '') ''
codesearchnet
def list_files_in_directory(full_directory_path): files = list() for file_name in __os.listdir(full_directory_path): if __os.path.isfile(__os.path.join(full_directory_path, file_name)): files.append(file_name) return files
List the files in a specified directory Args: full_directory_path: The full directory path to check, derive from the os module Returns: returns a list of files
juraj-google-style
def cast_losses_to_common_dtype(losses): highest_float = None for loss in losses: if loss.dtype.is_floating: if highest_float is None or loss.dtype.size > highest_float.size: highest_float = loss.dtype elif {loss.dtype, highest_float} == {'bfloat16', 'float16'}: highest_float = 'float32' if loss.dtype.is_complex: return losses if highest_float: losses = [math_ops.cast(loss, highest_float) for loss in losses] return losses
Cast a list of losses to a common dtype. If any loss is floating-point, they will all be casted to the most-precise floating-point loss. Otherwise the losses are not casted. We also skip casting losses if there are any complex losses. Args: losses: A list of losses. Returns: `losses`, but they have been casted to a common dtype.
github-repos
def add_device(self, path): hdevice = self._libinput.libinput_path_add_device(self._li, path.encode()) if hdevice: return Device(hdevice, self._libinput) return None
Add a device to a libinput context. If successful, the device will be added to the internal list and re-opened on :meth:`~libinput.LibInput.resume`. The device can be removed with :meth:`remove_device`. If the device was successfully initialized, it is returned. Args: path (str): Path to an input device. Returns: ~libinput.define.Device: A device object or :obj:`None`.
codesearchnet
def IsTemplateParameterList(clean_lines, linenum, column): (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, column) if (startpos > -1 and Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])): return True return False
Check if the token ending on (linenum, column) is the end of template<>. Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is end of a template parameter list, False otherwise.
juraj-google-style
def get_inverse_sqrt_schedule(optimizer: Optimizer, num_warmup_steps: int, timescale: Optional[int]=None, last_epoch: int=-1): if timescale is None: timescale = num_warmup_steps or 10000 lr_lambda = partial(_get_inverse_sqrt_schedule_lr_lambda, num_warmup_steps=num_warmup_steps, timescale=timescale) return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a warmup period which increases lr linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. timescale (`int`, *optional*, defaults to `num_warmup_steps`): Time scale. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
github-repos
def _ParseExample(self, example_features, example_feature_lists, entries, index): features_seen = set() for (feature_list, is_feature) in zip([example_features, example_feature_lists], [True, False]): sequence_length = None for feature_name in feature_list: if (feature_name not in entries): entries[feature_name] = {'vals': [], 'counts': [], 'feat_lens': [], 'missing': index} feature_entry = entries[feature_name] feature = feature_list[feature_name] value_type = None value_list = [] if is_feature: if feature.HasField('float_list'): value_list = feature.float_list.value value_type = self.fs_proto.FLOAT elif feature.HasField('bytes_list'): value_list = feature.bytes_list.value value_type = self.fs_proto.STRING elif feature.HasField('int64_list'): value_list = feature.int64_list.value value_type = self.fs_proto.INT else: sequence_length = len(feature.feature) if ((sequence_length != 0) and feature.feature[0].HasField('float_list')): for feat in feature.feature: for value in feat.float_list.value: value_list.append(value) value_type = self.fs_proto.FLOAT elif ((sequence_length != 0) and feature.feature[0].HasField('bytes_list')): for feat in feature.feature: for value in feat.bytes_list.value: value_list.append(value) value_type = self.fs_proto.STRING elif ((sequence_length != 0) and feature.feature[0].HasField('int64_list')): for feat in feature.feature: for value in feat.int64_list.value: value_list.append(value) value_type = self.fs_proto.INT if (value_type is not None): if ('type' not in feature_entry): feature_entry['type'] = value_type elif (feature_entry['type'] != value_type): raise TypeError(('type mismatch for feature ' + feature_name)) feature_entry['counts'].append(len(value_list)) feature_entry['vals'].extend(value_list) if (sequence_length is not None): feature_entry['feat_lens'].append(sequence_length) if value_list: features_seen.add(feature_name) for f in entries: fv = entries[f] if (f not in features_seen): fv['missing'] += 1
Parses data from an example, populating a dictionary of feature values. Args: example_features: A map of strings to tf.Features from the example. example_feature_lists: A map of strings to tf.FeatureLists from the example. entries: A dictionary of all features parsed thus far and arrays of their values. This is mutated by the function. index: The index of the example to parse from a list of examples. Raises: TypeError: Raises an exception when a feature has inconsistent types across examples.
codesearchnet
def invoice(request, invoice_id, access_code=None): current_invoice = InvoiceController.for_id_or_404(invoice_id) if (not current_invoice.can_view(user=request.user, access_code=access_code)): raise Http404() data = {'invoice': current_invoice.invoice} return render(request, 'registrasion/invoice.html', data)
Displays an invoice. This view is not authenticated, but it will only allow access to either: the user the invoice belongs to; staff; or a request made with the correct access code. Arguments: invoice_id (castable to int): The invoice_id for the invoice you want to view. access_code (Optional[str]): The access code for the user who owns this invoice. Returns: render: Renders ``registrasion/invoice.html``, with the following data:: { "invoice": models.commerce.Invoice(), } Raises: Http404: if the current user cannot view this invoice and the correct access_code is not provided.
codesearchnet
def _InitSSLContext(self, cafile=None, disable_ssl_certificate_validation=False): try: if disable_ssl_certificate_validation: ssl._create_default_https_context = ssl._create_unverified_context ssl_context = ssl.create_default_context() else: ssl_context = ssl.create_default_context(cafile=cafile) except AttributeError: return None return ssl_context
Creates a ssl.SSLContext with the given settings. Args: cafile: A str identifying the resolved path to the cafile. If not set, this will use the system default cafile. disable_ssl_certificate_validation: A boolean indicating whether certificate verification is disabled. For security purposes, it is highly recommended that certificate verification remain enabled. Returns: An ssl.SSLContext instance, or None if the version of Python being used doesn't support it.
codesearchnet
def _benchmarkFeed(self, name, target, size, iters): feed_val = np.random.rand(size).astype(np.float32) times = [] with ops.Graph().as_default(): p = array_ops.placeholder(dtypes.float32, shape=[size]) no_op = array_ops.identity(p).op with session.Session(target) as sess: sess.run(no_op, feed_dict={p: feed_val}) for _ in range(iters): start_time = time.time() sess.run(no_op, feed_dict={p: feed_val}) end_time = time.time() times.append(end_time - start_time) print('%s %d %f' % (name, size, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
Runs a microbenchmark to measure the cost of feeding a tensor. Reports the median cost of feeding a tensor of `size` * `sizeof(float)` bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be feed. iters: The number of iterations to perform.
github-repos
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None): with tf.variable_scope("sigmoid_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.sigmoid_cross_entropy( multi_class_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
Calculate sigmoid cross entropy for one-hot lanels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross_entropy (scalar), weights
juraj-google-style
def activate_nsxcontroller(self, **kwargs): name = kwargs.pop('name') name_args = dict(name=name) method_name = 'nsx_controller_activate' method_class = self._brocade_tunnels nsxcontroller_attr = getattr(method_class, method_name) config = nsxcontroller_attr(**name_args) output = self._callback(config) return output
Activate NSX Controller Args: name (str): nsxcontroller name callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
juraj-google-style
def set(self, key, data): self.raise_error_if_not_open() if key in self._file: del self._file[key] self._file.create_dataset(key, data=data)
Set the given data to the container with the given key. Any existing data for the given key is discarded/overwritten. Args: key (str): A key to store the data for. data (numpy.ndarray): Array-like data. Note: The container has to be opened in advance.
juraj-google-style
def find(entity, **kwargs): try: typedfields = entity.typed_fields() except AttributeError: typedfields = iterfields(entity.__class__) matching = [x for x in typedfields if _matches(x, kwargs)] return matching
Return all TypedFields found on the input `Entity` that were initialized with the input **kwargs. Example: >>> find(myentity, multiple=True, type_=Foo) Note: TypedFields.__init__() can accept a string or a class as a type_ argument, but this method expects a class. Args: **kwargs: TypedField __init__ **kwargs to search on. Returns: A list of TypedFields with matching **kwarg values.
juraj-google-style
def preprocess_data(dataset_path: str, training_set_path: str, labels_path: str, test_set_path: str): df = pandas.read_csv(dataset_path) df['Grade'].replace(['low', 'medium', 'high'], [0, 1, 2], inplace=True) x = df.drop(columns=['Grade']) y = df['Grade'] x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.6, random_state=99) x_train.to_csv(training_set_path, index=False) y_train.to_csv(labels_path, index=False) x_test.to_csv(test_set_path, index=False)
Helper function to split the dataset into a training set and its labels and a test set. The training set and its labels are used to train a lightweight model. The test set is used to create a test streaming pipeline. Args: dataset_path: path to csv file containing the Kaggle milk quality dataset training_set_path: path to output the training samples labels_path: path to output the labels for the training set test_set_path: path to output the test samples
github-repos
def crc(msg, encode=False): generator = np.array([1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,1,0,0,1]) ng = len(generator) msgnpbin = bin2np(hex2bin(msg)) if encode: msgnpbin[-24:] = [0] * 24 for i in range(len(msgnpbin)-24): if msgnpbin[i] == 0: continue msgnpbin[i:i+ng] = np.bitwise_xor(msgnpbin[i:i+ng], generator) reminder = np2bin(msgnpbin[-24:]) return reminder
Mode-S Cyclic Redundancy Check Detect if bit error occurs in the Mode-S message Args: msg (string): 28 bytes hexadecimal message string encode (bool): True to encode the date only and return the checksum Returns: string: message checksum, or partity bits (encoder)
juraj-google-style
def __init__(self, element_shape=None, dtype=dtypes.float32, dynamic_size=False, infer_shape=True): self._element_shape = tensor_shape.as_shape(element_shape) self._dtype = dtypes.as_dtype(dtype) self._dynamic_size = dynamic_size self._infer_shape = infer_shape
Constructs a type specification for a `tf.TensorArray`. Args: element_shape: The shape of each element in the `TensorArray`. dtype: Data type of the `TensorArray`. dynamic_size: Whether the `TensorArray` can grow past its initial size. infer_shape: Whether shape inference is enabled.
github-repos
def analyze_async(output_dir, dataset, cloud=False, project_id=None): import google.datalab.utils as du with warnings.catch_warnings(): warnings.simplefilter('ignore') fn = (lambda : _analyze(output_dir, dataset, cloud, project_id)) return du.LambdaJob(fn, job_id=None)
Analyze data locally or in the cloud with BigQuery. Produce analysis used by training. This can take a while, even for small datasets. For small datasets, it may be faster to use local_analysis. Args: output_dir: The output directory to use. dataset: only CsvDataSet is supported currently. cloud: If False, runs analysis locally with Pandas. If Ture, runs analysis in the cloud with BigQuery. project_id: Uses BigQuery with this project id. Default is datalab's default project id. Returns: A google.datalab.utils.Job object that can be used to query state from or wait.
codesearchnet
def send_data(data): datalength = len(data) csm1 = checksum1(data, datalength) csm2 = checksum2(csm1) data.insert(0, 0xFF) data.insert(1, 0xFF) data.insert(5, csm1) data.insert(6, csm2) stringtosend = "" for i in range(len(data)): byteformat = '%02X' % data[i] stringtosend = stringtosend + "\\x" + byteformat try: SERPORT.write(stringtosend.decode('string-escape')) except: raise HerkulexError("could not communicate with motors")
Send data to herkulex Paketize & write the packet to serial port Args: data (list): the data to be sent Raises: SerialException: Error occured while opening serial port
juraj-google-style
def Patch(self, request, global_params=None): config = self.GetMethodConfig('Patch') return self._RunMethod(config, request, global_params=global_params)
Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics. Args: request: (BigqueryTablesPatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Table) The response message.
github-repos
def _post_check(date_info: dict) -> bool: if date_info['pattern']: return True if date_info['value'] == 'may' or re.match(illegal, date_info['value'])\ or (re.match(possible_illegal, date_info['value']) and len([g for g in date_info['groups'] if g]) != 2) \ or (re.match(possible_illegal_3, date_info['value']) and len([g for g in date_info['groups'] if g]) != 3) \ or (re.match('^\b?[0-9]{4}\b?$', date_info['value']) and len([g for g in date_info['groups'] if g]) > 1): return False return True
Post check the extracted date string to filter out some false positives Args: date_info: dict - includes the extracted string, matching groups, patterns etc. Returns: bool - if the date extracted is valid
juraj-google-style
def deserialize(config, custom_objects=None): populate_deserializable_objects() return generic_utils.deserialize_keras_object(config, module_objects=LOCAL.ALL_OBJECTS, custom_objects=custom_objects, printable_module_name='layer')
Instantiates a layer from a config dictionary. Args: config: dict of the form {'class_name': str, 'config': dict} custom_objects: dict mapping class names (or function names) of custom (non-Keras) objects to class/functions Returns: Layer instance (may be Model, Sequential, Network, Layer...)
github-repos
def load_dictionary(self, filename, encoding='utf-8'): with load_file(filename, encoding) as data: self._dictionary.update(json.loads(data.lower(), encoding=encoding)) self._update_dictionary()
Load in a pre-built word frequency list Args: filename (str): The filepath to the json (optionally gzipped) \ file to be loaded encoding (str): The encoding of the dictionary
codesearchnet
def _init_boto3_clients(self, profile, region): try: session = None if (profile and region): session = boto3.session.Session(profile_name=profile, region_name=region) elif profile: session = boto3.session.Session(profile_name=profile) elif region: session = boto3.session.Session(region_name=region) else: session = boto3.session.Session() self._cloud_formation = session.client('cloudformation') return True except Exception as wtf: logging.error(wtf, exc_info=True) return False
The utililty requires boto3 clients to CloudFormation. Args: None Returns: Good or Bad; True or False
codesearchnet
def _parse_response(self, respond): mobj = self._max_qubit_error_re.match(respond.text) if mobj: raise RegisterSizeError( 'device register size must be <= {}'.format(mobj.group(1))) return True
parse text of response for HTTP errors This parses the text of the response to decide whether to retry request or raise exception. At the moment this only detects an exception condition. Args: respond (Response): requests.Response object Returns: bool: False if the request should be retried, True if not. Raises: RegisterSizeError
juraj-google-style
def execute_interactive_code(elem, doc): code_lines = [l[4:] for l in elem.text.split('\n')] code_blocks = [[code_lines[0]]] for line in code_lines[1:]: if line.startswith(' ') or line == '': code_blocks[-1].append(line) else: code_blocks.append([line]) final_code = [] try: child = replwrap.REPLWrapper("python", ">>> ", None) except NameError: pf.debug('Can not run interactive session. No output produced ' + '(Code was:\n{!s}\n)' .format(elem)) pf.debug('Please pip install pexpect.') return '' for code_block in code_blocks: result = child.run_command('\n'.join(code_block) + '\n').rstrip('\r\n') final_code += [('>>> ' if i == 0 else '... ') + l for i, l in enumerate(code_block)] if result: final_code += [r for r in result.split('\n') if r.strip() not in code_block] return '\n'.join(final_code)
Executes code blocks for a python shell. Parses the code in `elem.text` into blocks and executes them. Args: elem The AST element. doc The document. Return: The code with inline results.
juraj-google-style
def append(self, node, dirty=True): self._children[node.id] = node node.parent = self if dirty: self.touch() return node
Add a new child node. Args: node (gkeepapi.Node): Node to add. dirty (bool): Whether this node should be marked dirty.
codesearchnet
def pool(builder, size, timeout=None): lock = threading.Lock() local_pool = queue.Queue() current_size = 0 @contextlib.contextmanager def pooled(): nonlocal current_size instance = None if (current_size < size): with lock: if (current_size < size): current_size += 1 instance = builder() if (instance is None): instance = local_pool.get(timeout=timeout) (yield instance) local_pool.put(instance) return pooled
Create a pool that imposes a limit on the number of stored instances. Args: builder: a function to build an instance. size: the size of the pool. timeout(Optional[float]): the seconds to wait before raising a ``queue.Empty`` exception if no instances are available within that time. Raises: If ``timeout`` is defined but the request is taking longer than the specified time, the context manager will raise a ``queue.Empty`` exception. Returns: A context manager that can be used with the ``with`` statement.
codesearchnet
def setup(self, input_nodes=None, drop_na=False, **kwargs): self.output_nodes = [] input_nodes = (input_nodes or self.input_nodes or []) if (self.level != 'run'): kwargs = kwargs.copy() kwargs.pop('scan_length', None) collections = self.layout.get_collections(self.level, drop_na=drop_na, **kwargs) objects = (collections + input_nodes) (objects, kwargs) = self._filter_objects(objects, kwargs) groups = self._group_objects(objects) model = (self.model or {}) X = model.get('x', []) for grp in groups: input_nodes = [o for o in grp if isinstance(o, AnalysisNode)] colls = list((set(grp) - set(input_nodes))) if input_nodes: node_coll = self._concatenate_input_nodes(input_nodes) colls.append(node_coll) coll = (merge_collections(colls) if (len(colls) > 1) else colls[0]) coll = apply_transformations(coll, self.transformations) if X: transform.Select(coll, X) node = AnalysisNode(self.level, coll, self.contrasts, input_nodes, self.auto_contrasts) self.output_nodes.append(node)
Set up the Step and construct the design matrix. Args: input_nodes (list): Optional list of Node objects produced by the preceding Step in the analysis. If None, uses any inputs passed in at Step initialization. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files. kwargs: Optional keyword arguments to pass onto load_variables.
codesearchnet
def __find_variant(self, value): if isinstance(value, bool): return messages.Variant.BOOL elif isinstance(value, six.integer_types): return messages.Variant.INT64 elif isinstance(value, float): return messages.Variant.DOUBLE elif isinstance(value, six.string_types): return messages.Variant.STRING elif isinstance(value, (list, tuple)): variant_priority = [None, messages.Variant.INT64, messages.Variant.DOUBLE, messages.Variant.STRING] chosen_priority = 0 for v in value: variant = self.__find_variant(v) try: priority = variant_priority.index(variant) except IndexError: priority = -1 if priority > chosen_priority: chosen_priority = priority return variant_priority[chosen_priority] return None
Find the messages.Variant type that describes this value. Args: value: The value whose variant type is being determined. Returns: The messages.Variant value that best describes value's type, or None if it's a type we don't know how to handle.
juraj-google-style
def Read(self, file_object): try: self.root_key = biplist.readPlist(file_object) except (biplist.NotBinaryPlistException, biplist.InvalidPlistException) as exception: raise IOError(exception)
Reads a plist from a file-like object. Args: file_object (dfvfs.FileIO): a file-like object containing plist data. Raises: IOError: if the plist file-like object cannot be read. OSError: if the plist file-like object cannot be read.
codesearchnet
def find_input(self, stream): for i, input_x in enumerate(self.inputs): if input_x[0].matches(stream): return i
Find the input that responds to this stream. Args: stream (DataStream): The stream to find Returns: (index, None): The index if found or None
juraj-google-style
def get_physical_server_hardware(self): uri = '{}/physicalServerHardware'.format(self.data['uri']) return self._helper.do_get(uri)
Information describing an 'SDX' partition including a list of physical server blades represented by a server hardware. Used with SDX enclosures only. Returns: Resource
codesearchnet