code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def is_github_repo_owner_the_official_one(context, repo_owner): official_repo_owner = context.config['official_github_repos_owner'] if (not official_repo_owner): raise ConfigError('This worker does not have a defined owner for official GitHub repositories. Given "official_github_repos_owner": {}'.format(official_repo_owner)) return (official_repo_owner == repo_owner)
Given a repo_owner, check if it matches the one configured to be the official one. Args: context (scriptworker.context.Context): the scriptworker context. repo_owner (str): the repo_owner to verify Raises: scriptworker.exceptions.ConfigError: when no official owner was defined Returns: bool: True when ``repo_owner`` matches the one configured to be the official one
codesearchnet
def get_key(self, key, request_only=False): values = {} requested_names = [x.name for x in self._package_requests if (not x.conflict)] for pkg in self.resolved_packages: if ((not request_only) or (pkg.name in requested_names)): value = getattr(pkg, key) if (value is not None): values[pkg.name] = (pkg, value) return values
Get a data key value for each resolved package. Args: key (str): String key of property, eg 'tools'. request_only (bool): If True, only return the key from resolved packages that were also present in the request. Returns: Dict of {pkg-name: (variant, value)}.
codesearchnet
def validate(self, config): if (not isinstance(config, dict)): raise errors.SchemeValidationError('Scheme can only validate a dictionary config, but was given {} (type: {})'.format(config, type(config))) for arg in self.args: if (arg.name in config): arg.validate(config[arg.name]) elif arg.required: raise errors.SchemeValidationError('Option "{}" is required, but not found.'.format(arg.name))
Validate the given config against the `Scheme`. Args: config (dict): The configuration to validate. Raises: errors.SchemeValidationError: The configuration fails validation against the `Schema`.
codesearchnet
def get_label_set(self, type_str=None): return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)}
Get a set of label_str for the tree rooted at this node. Args: type_str: SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include information from nodes of that type. Returns: set: The labels of the nodes leading up to this node from the root.
juraj-google-style
def test_src_dir_path(relative_path): return os.path.join(os.environ['TEST_SRCDIR'], 'org_tensorflow/tensorflow', relative_path)
Creates an absolute test srcdir path given a relative path. Args: relative_path: a path relative to tensorflow root. e.g. "contrib/session_bundle/example". Returns: An absolute path to the linked in runfiles.
github-repos
def extractSchedule(self, schedule, period): ret = namedtuple('ret', ['Hour', 'Min', 'Tariff', 'Period', 'Schedule']) work_table = self.m_schd_1_to_4 if (Schedules.Schedule_5 <= schedule <= Schedules.Schedule_6): work_table = self.m_schd_5_to_6 period += 1 schedule += 1 ret.Period = str(period) ret.Schedule = str(schedule) if ((schedule < 1) or (schedule > Extents.Schedules) or (period < 0) or (period > Extents.Periods)): ekm_log(((('Out of bounds: tariff ' + str(period)) + ' for schedule ') + str(schedule))) ret.Hour = ret.Min = ret.Tariff = str(0) return ret idxhr = (((('Schedule_' + str(schedule)) + '_Period_') + str(period)) + '_Hour') idxmin = (((('Schedule_' + str(schedule)) + '_Period_') + str(period)) + '_Min') idxrate = (((('Schedule_' + str(schedule)) + '_Period_') + str(period)) + '_Tariff') if (idxhr not in work_table): ekm_log(('Incorrect index: ' + idxhr)) ret.Hour = ret.Min = ret.Tariff = str(0) return ret if (idxmin not in work_table): ekm_log(('Incorrect index: ' + idxmin)) ret.Hour = ret.Min = ret.Tariff = str(0) return ret if (idxrate not in work_table): ekm_log(('Incorrect index: ' + idxrate)) ret.Hour = ret.Min = ret.Tariff = str(0) return ret ret.Hour = work_table[idxhr][MeterData.StringValue] ret.Min = work_table[idxmin][MeterData.StringValue].zfill(2) ret.Tariff = work_table[idxrate][MeterData.StringValue] return ret
Read a single schedule tariff from meter object buffer. Args: schedule (int): A :class:`~ekmmeters.Schedules` value or in range(Extent.Schedules). tariff (int): A :class:`~ekmmeters.Tariffs` value or in range(Extent.Tariffs). Returns: bool: True on completion.
codesearchnet
def _parse_logging(log_values: dict, service_config: dict): for log_key, log_value in log_values.items(): if 'driver' in log_key: service_config['log_driver'] = log_value if 'options' in log_key: service_config['log_driver_options'] = log_value
Parse log key. Args: log_values (dict): logging configuration values service_config (dict): Service specification
juraj-google-style
def getWindow(title, exact=False): titles = getWindows() hwnd = titles.get(title, None) if ((not hwnd) and (not exact)): for (k, v) in titles.items(): if (title in k): hwnd = v break if hwnd: return Window(hwnd) else: return None
Return Window object if 'title' or its part found in visible windows titles, else return None Return only 1 window found first Args: title: unicode string exact (bool): True if search only exact match
codesearchnet
def connectivity_array(self): cart_coords = np.array(self.s.cart_coords) all_sites = (cart_coords[(:, None, :)] + self.cart_offsets[(None, :, :)]) vt = Voronoi(all_sites.reshape(((- 1), 3))) n_images = all_sites.shape[1] cs = (len(self.s), len(self.s), len(self.cart_offsets)) connectivity = np.zeros(cs) vts = np.array(vt.vertices) for ((ki, kj), v) in vt.ridge_dict.items(): atomi = (ki atomj = (kj imagei = (ki % n_images) imagej = (kj % n_images) if ((imagei != (n_images continue if (imagei == (n_images val = solid_angle(vt.points[ki], vts[v]) connectivity[(atomi, atomj, imagej)] = val if (imagej == (n_images val = solid_angle(vt.points[kj], vts[v]) connectivity[(atomj, atomi, imagei)] = val if ((- 10.101) in vts[v]): warn('Found connectivity with infinite vertex. Cutoff is too low, and results may be incorrect') return connectivity
Provides connectivity array. Returns: connectivity: An array of shape [atomi, atomj, imagej]. atomi is the index of the atom in the input structure. Since the second atom can be outside of the unit cell, it must be described by both an atom index and an image index. Array data is the solid angle of polygon between atomi and imagej of atomj
codesearchnet
def run_config_diagnostics(config_path=CONFIG_PATH): config = read_config(config_path) missing_sections = set() malformed_entries = defaultdict(set) for (section, expected_section_keys) in SECTION_KEYS.items(): section_content = config.get(section) if (not section_content): missing_sections.add(section) else: for option in expected_section_keys: option_value = section_content.get(option) if (not option_value): malformed_entries[section].add(option) return (config_path, missing_sections, malformed_entries)
Run diagnostics on the configuration file. Args: config_path (str): Path to the configuration file. Returns: str, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing sections and a dict that maps each section to the entries that have either missing or empty options.
codesearchnet
def add_arguments(self, parser): parser.add_argument('name', nargs=1, choices=['kinetis'], help='name of MCU to unlock') return self.add_common_arguments(parser, True)
Adds the unlock command arguments to the parser. Args: self (UnlockCommand): the ``UnlockCommand`` instance parser (argparse.ArgumentParser): the parser to add the arguments to Returns: ``None``
juraj-google-style
def acquaint_insides(swap_gate: ops.Gate, acquaintance_gate: ops.Operation, qubits: Sequence[ops.Qid], before: bool, layers: Layers, mapping: Dict[(ops.Qid, int)]) -> None: max_reach = _get_max_reach(len(qubits), round_up=before) reaches = itertools.chain(range(1, (max_reach + 1)), range(max_reach, (- 1), (- 1))) offsets = ((0, 1) * max_reach) swap_gate = SwapPermutationGate(swap_gate) ops = [] for (offset, reach) in zip(offsets, reaches): if (offset == before): ops.append(acquaintance_gate) for dr in range(offset, reach, 2): ops.append(swap_gate(*qubits[dr:(dr + 2)])) intrastitial_layer = getattr(layers, ('pre' if before else 'post')) intrastitial_layer += ops interstitial_layer = getattr(layers, (('prior' if before else 'posterior') + '_interstitial')) interstitial_layer.append(acquaintance_gate) reached_qubits = qubits[:(max_reach + 1)] positions = list((mapping[q] for q in reached_qubits)) mapping.update(zip(reached_qubits, reversed(positions)))
Acquaints each of the qubits with another set specified by an acquaintance gate. Args: qubits: The list of qubits of which half are individually acquainted with another list of qubits. layers: The layers to put gates into. acquaintance_gate: The acquaintance gate that acquaints the end qubit with another list of qubits. before: Whether the acquainting is done before the shift. swap_gate: The gate used to swap logical indices. mapping: The mapping from qubits to logical indices. Used to keep track of the effect of inside-acquainting swaps.
codesearchnet
def log(self, metric): message = self.LOGFMT.format(**metric) if metric['context']: message += ' context: {context}'.format(context=metric['context']) self._logger.log(self.level, message)
Format and output metric. Args: metric (dict): Complete metric.
codesearchnet
def split_input(cls, mapper_spec): batch_size = int(_get_params(mapper_spec).get( cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)) shard_count = mapper_spec.shard_count namespace_ranges = namespace_range.NamespaceRange.split(shard_count, contiguous=True) return [NamespaceInputReader(ns_range, batch_size) for ns_range in namespace_ranges]
Returns a list of input readers for the input spec. Args: mapper_spec: The MapperSpec for this InputReader. Returns: A list of InputReaders.
juraj-google-style
def Validate(self, expression): parsed = self._Load(expression) if not parsed: raise DefinitionError("Empty StatFilter expression.") bad_keys = set(parsed) - self._KEYS if bad_keys: raise DefinitionError("Invalid parameters: %s" % ",".join(bad_keys)) if self.cfg.mask and not self.cfg.mode: raise DefinitionError("mode can only be set when mask is also defined.") if self.cfg.mask: if len(self.cfg.mask) > 1: raise DefinitionError("Too many mask values defined.") if not self._PERM_RE.match(self.cfg.mask[0]): raise DefinitionError("mask=%s is not octal, e.g. 0600" % self.cfg.mask) if self.cfg.mode: if len(self.cfg.mode) > 1: raise DefinitionError("Too many mode values defined.") if not self._PERM_RE.match(self.cfg.mode[0]): raise DefinitionError("mode=%s is not octal, e.g. 0600" % self.cfg.mode) if self.cfg.gid: for gid in self.cfg.gid: matched = self._UID_GID_RE.match(gid) if not matched: raise DefinitionError("gid: %s is not an integer preceded by " "!, >, < or =." % gid) if self.cfg.uid: for uid in self.cfg.uid: matched = self._UID_GID_RE.match(uid) if not matched: raise DefinitionError("uid: %s is not an integer preceded by " "!, >, < or =." % uid) if self.cfg.file_re: if len(self.cfg.file_re) > 1: raise DefinitionError("Too many regexes defined: %s" % self.cfg.file_re) try: self.file_re = re.compile(self.cfg.file_re[0]) except (re.error, TypeError) as e: raise DefinitionError("Invalid file regex: %s" % e) if self.cfg.path_re: if len(self.cfg.path_re) > 1: raise DefinitionError("Too many regexes defined: %s" % self.cfg.path_re) try: self.path_re = re.compile(self.cfg.path_re[0]) except (re.error, TypeError) as e: raise DefinitionError("Invalid path regex: %s" % e) if self.cfg.file_type: if len(self.cfg.file_type) > 1: raise DefinitionError( "Too many file types defined: %s" % self.cfg.file_type) file_type = self.cfg.file_type[0].upper() if file_type not in self._TYPES: raise DefinitionError("Unsupported file type %s" % file_type) self._Initialize() if not self.matchers: raise DefinitionError("StatFilter has no actions: %s" % expression) return True
Validates that a parsed rule entry is valid for fschecker. Args: expression: A rule expression. Raises: DefinitionError: If the filter definition could not be validated. Returns: True if the expression validated OK.
juraj-google-style
def gpio_get(self, pins=None): if (pins is None): pins = range(4) size = len(pins) indices = (ctypes.c_uint8 * size)(*pins) statuses = (ctypes.c_uint8 * size)() result = self._dll.JLINK_EMU_GPIO_GetState(ctypes.byref(indices), ctypes.byref(statuses), size) if (result < 0): raise errors.JLinkException(result) return list(statuses)
Returns a list of states for the given pins. Defaults to the first four pins if an argument is not given. Args: self (JLink): the ``JLink`` instance pins (list): indices of the GPIO pins whose states are requested Returns: A list of states. Raises: JLinkException: on error.
codesearchnet
def add_primitives_path(path): if path not in _PRIMITIVES_PATHS: if not os.path.isdir(path): raise ValueError('Invalid path: {}'.format(path)) LOGGER.debug('Adding new primitives path %s', path) _PRIMITIVES_PATHS.insert(0, os.path.abspath(path))
Add a new path to look for primitives. The new path will be inserted in the first place of the list, so any primitive found in this new folder will take precedence over any other primitive with the same name that existed in the system before. Args: path (str): path to add Raises: ValueError: A `ValueError` will be raised if the path is not valid.
juraj-google-style
def is45(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if wrongstatus(d, 1, 2, 3): return False if wrongstatus(d, 4, 5, 6): return False if wrongstatus(d, 7, 8, 9): return False if wrongstatus(d, 10, 11, 12): return False if wrongstatus(d, 13, 14, 15): return False if wrongstatus(d, 16, 17, 26): return False if wrongstatus(d, 27, 28, 38): return False if wrongstatus(d, 39, 40, 51): return False if bin2int(d[51:56]) != 0: return False temp = temp45(msg) if temp: if temp > 60 or temp < -80: return False return True
Check if a message is likely to be BDS code 4,5. Meteorological hazard report Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
juraj-google-style
def get_reduced_structure(self, reduction_algo='niggli'): if (reduction_algo == 'niggli'): reduced_latt = self._lattice.get_niggli_reduced_lattice() elif (reduction_algo == 'LLL'): reduced_latt = self._lattice.get_lll_reduced_lattice() else: raise ValueError('Invalid reduction algo : {}'.format(reduction_algo)) if (reduced_latt != self.lattice): return self.__class__(reduced_latt, self.species_and_occu, self.cart_coords, coords_are_cartesian=True, to_unit_cell=True, site_properties=self.site_properties, charge=self._charge) else: return self.copy()
Get a reduced structure. Args: reduction_algo (str): The lattice reduction algorithm to use. Currently supported options are "niggli" or "LLL".
codesearchnet
def trace(src, options=None): options = options or config.Options.create() with config.verbosity_from(options): loader = load_pytd.create_loader(options) ret = analyze.infer_types(src=src, options=options, loader=loader) pytd_module = ret.ast raw_traces = [] for op, symbol, data in ret.context.vm.opcode_traces: raw_traces.append((op, symbol, tuple((_to_pytd(d, loader, pytd_module) for d in data)))) return source.Code(src, raw_traces, TypeTrace, options.input)
Generates type traces for the given source code. Args: src: The source text. options: A pytype.config.Options object that can be used to specify options such as the target Python version. Returns: A source.Code object.
github-repos
def get_attr_location(self, name, location): line, _ = location src_line = self.line(line) attr = name.split('.')[-1] dot_attr = '.' + attr if dot_attr in src_line: col = src_line.index(dot_attr) return (Location(line, col + 1), len(attr)) else: attr_loc = self._get_multiline_location(location, 5, dot_attr) if attr_loc: return (Location(attr_loc.line, attr_loc.column + 1), len(attr)) else: for l in self.get_closest_line_range(line, line + 5): if self.line(l).endswith('.'): next_line = self.next_non_comment_line(l) text = self.line(next_line) if text.lstrip().startswith(attr): c = text.index(attr) return (Location(next_line, c), len(attr)) return (location, len(name))
Returns the location and span of the attribute in an attribute access. Args: name: The attribute name. location: The location of the value the attribute is accessed on.
github-repos
def __init__(self, server_id): data = datatools.get_data() self.server_id = server_id self.logger = logging.getLogger("{}.{}".format(__name__, self.server_id)) self.songcache_dir = "{}/{}".format(_root_songcache_dir, self.server_id) self.songcache_next_dir = "{}/{}/next".format(_root_songcache_dir, self.server_id) self.output_format = "{}/{}".format(self.songcache_dir, file_format) self.output_format_next = "{}/{}".format(self.songcache_next_dir, file_format) self.vchannel = None self.vclient = None self.streamer = None self.current_duration = 0 self.current_download_elapsed = 0 self.is_live = False self.queue = [] self.prev_queue = [] self.prev_queue_max = 500 self.volume = 20 self.vclient_starttime = None self.vclient_task = None self.pause_time = None self.prev_time = "" self.loop_type = 'off' self.mready = False self.vready = False self.state = 'off' self.mchannel = None self.embed = None self.queue_display = 9 self.nowplayinglog = logging.getLogger("{}.{}.nowplaying".format(__name__, self.server_id)) self.nowplayinglog.setLevel("DEBUG") self.nowplayingauthorlog = logging.getLogger("{}.{}.nowplayingauthor".format(__name__, self.server_id)) self.nowplayingauthorlog.setLevel("DEBUG") self.nowplayingsourcelog = logging.getLogger("{}.{}.nowplayingsource".format(__name__, self.server_id)) self.nowplayingsourcelog.setLevel("DEBUG") self.timelog = logging.getLogger("{}.{}.time".format(__name__, self.server_id)) self.timelog.setLevel("DEBUG") self.timelog.propagate = False self.queuelog = logging.getLogger("{}.{}.queue".format(__name__, self.server_id)) self.queuelog.setLevel("DEBUG") self.queuelog.propagate = False self.queuelenlog = logging.getLogger("{}.{}.queuelen".format(__name__, self.server_id)) self.queuelenlog.setLevel("DEBUG") self.queuelenlog.propagate = False self.volumelog = logging.getLogger("{}.{}.volume".format(__name__, self.server_id)) self.volumelog.setLevel("DEBUG") self.statuslog = logging.getLogger("{}.{}.status".format(__name__, self.server_id)) self.statuslog.setLevel("DEBUG") self.statustimer = None self.clear_cache() self.topic = "" self.topicchannel = None if "topic_id" in data["discord"]["servers"][self.server_id][_data.modulename]: topic_id = data["discord"]["servers"][self.server_id][_data.modulename]["topic_id"] if topic_id is not None and topic_id != "": logger.debug("Topic channel id: {}".format(topic_id)) self.topicchannel = client.get_channel(topic_id) if "volume" in data["discord"]["servers"][self.server_id][_data.modulename]: self.volume = data["discord"]["servers"][self.server_id][_data.modulename]["volume"] else: self.write_volume()
Locks onto a server for easy management of various UIs Args: server_id (str): The Discord ID of the server to lock on to
juraj-google-style
def flip_channel_order(self, image): self._ensure_format_supported(image) if isinstance(image, PIL.Image.Image): image = self.to_numpy_array(image) return image[::-1, :, :]
Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of `image` to a NumPy array if it's a PIL Image. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should be first.
github-repos
def get_random_value(length=10, character_sets=[string.ascii_uppercase, string.ascii_lowercase]): return ''.join((random.choice(''.join(character_sets)) for i in range(length)))
Get a random string with the given length. Args: length (int): The length of the string to return. character_sets list(str): The caracter sets to use. Returns: str: The random string.
codesearchnet
def _DownloadAuthUrl(self, url, dest_dir): dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) dest_file.close() dest = dest_file.name self.logger.info( 'Downloading url from %s to %s using authentication token.', url, dest) if not self.token: response = self.watcher.GetMetadata( self.token_metadata_key, recursive=False, retry=False) if not response: self.logger.info( 'Authentication token not found. Attempting unauthenticated ' 'download.') return self._DownloadUrl(url, dest_dir) self.token = '%s %s' % ( response.get('token_type', ''), response.get('access_token', '')) try: request = urlrequest.Request(url) request.add_unredirected_header('Metadata-Flavor', 'Google') request.add_unredirected_header('Authorization', self.token) content = urlrequest.urlopen(request).read().decode('utf-8') except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: self.logger.warning('Could not download %s. %s.', url, str(e)) return None with open(dest, 'wb') as f: f.write(content) return dest
Download a Google Storage URL using an authentication token. If the token cannot be fetched, fallback to unauthenticated download. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
juraj-google-style
def uses_star_args_in_call(node): if sys.version_info[:2] >= (3, 5): for arg in node.args: if isinstance(arg, ast.Starred): return True elif node.starargs: return True return False
Check if an ast.Call node uses arbitrary-length positional *args. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args or keyword args. False if it does not.
github-repos
def sign(self, byts): chosen_hash = c_hashes.SHA256() hasher = c_hashes.Hash(chosen_hash, default_backend()) hasher.update(byts) digest = hasher.finalize() return self.priv.sign(digest, c_ec.ECDSA(c_utils.Prehashed(chosen_hash)))
Compute the ECC signature for the given bytestream. Args: byts (bytes): The bytes to sign. Returns: bytes: The RSA Signature bytes.
codesearchnet
def load_metrics(event_dir, epoch): metrics = {} for filename in tf.gfile.ListDirectory(event_dir): path = os.path.join(event_dir, filename) for event in tf.train.summary_iterator(path): if ((event.step == epoch) and event.HasField('summary')): value = event.summary.value[0] metrics[value.tag] = value.simple_value return metrics
Loads metrics for this epoch if they have already been written. This reads the entire event file but it's small with just per-epoch metrics. Args: event_dir: TODO(koz4k): Document this. epoch: TODO(koz4k): Document this. Returns: metrics.
codesearchnet
def RegisterMessage(self, message): desc = message.DESCRIPTOR self._classes[desc.full_name] = message self.pool.AddDescriptor(desc) return message
Registers the given message type in the local database. Calls to GetSymbol() and GetMessages() will return messages registered here. Args: message: a message.Message, to be registered. Returns: The provided message.
juraj-google-style
def GetSectionByIndex(self, section_index): if not self._is_parsed: self._Parse() self._is_parsed = True if section_index < 0 or section_index >= len(self._sections): return None return self._sections[section_index]
Retrieves a specific section based on the index. Args: section_index (int): index of the section. Returns: VolumeExtent: a volume extent or None if not available.
juraj-google-style
def run_commands(self, commands, encoding='json', send_enable=True, **kwargs): commands = make_iterable(commands) commands = [({'cmd': c.split('MULTILINE:')[0], 'input': ('%s\n' % c.split('MULTILINE:')[1].strip())} if ('MULTILINE:' in c) else c) for c in commands] if send_enable: if self._enablepwd: commands.insert(0, {'cmd': 'enable', 'input': self._enablepwd}) else: commands.insert(0, 'enable') response = self._connection.execute(commands, encoding, **kwargs) if send_enable: response['result'].pop(0) return response['result']
Sends the commands over the transport to the device This method sends the commands to the device using the nodes transport. This is a lower layer function that shouldn't normally need to be used, preferring instead to use config() or enable(). Args: commands (list): The ordered list of commands to send to the device using the transport encoding (str): The encoding method to use for the request and excpected response. send_enable (bool): If True the enable command will be prepended to the command list automatically. **kwargs: Additional keyword arguments for expanded eAPI functionality. Only supported eAPI params are used in building the request Returns: This method will return the raw response from the connection which is a Python dictionary object.
codesearchnet
def parse(self, filepath, content): try: parsed = yaml.load(content) except yaml.YAMLError as exc: msg = "No YAML object could be decoded from file: {}\n{}" raise SettingsBackendError(msg.format(filepath, exc)) return parsed
Parse opened settings content using YAML parser. Args: filepath (str): Settings object, depends from backend content (str): Settings content from opened file, depends from backend. Raises: boussole.exceptions.SettingsBackendError: If parser can not decode a valid YAML object. Returns: dict: Dictionnary containing parsed setting elements.
juraj-google-style
def __get_state_by_id(cls, job_id): state = model.MapreduceState.get_by_job_id(job_id) if state is None: raise ValueError("Job state for job %s is missing." % job_id) return state
Get job state by id. Args: job_id: job id. Returns: model.MapreduceState for the job. Raises: ValueError: if the job state is missing.
juraj-google-style
def _print_test_names(test_classes): for test_class in test_classes: cls = test_class(config_parser.TestRunConfig()) test_names = [] try: cls._pre_run() if cls.tests: test_names = list(cls.tests) else: test_names = cls.get_existing_test_names() except Exception: logging.exception('Failed to retrieve generated tests.') finally: cls._clean_up() print('==========> %s <==========' % cls.TAG) for name in test_names: print(f'{cls.TAG}.{name}')
Prints the names of all the tests in all test classes. Args: test_classes: classes, the test classes to print names from.
github-repos
def _get_prop_from_modelclass(modelclass, name): if name == '__key__': return modelclass._key parts = name.split('.') part, more = parts[0], parts[1:] prop = modelclass._properties.get(part) if prop is None: if issubclass(modelclass, model.Expando): prop = model.GenericProperty(part) else: raise TypeError('Model %s has no property named %r' % (modelclass._get_kind(), part)) while more: part = more.pop(0) if not isinstance(prop, model.StructuredProperty): raise TypeError('Model %s has no property named %r' % (modelclass._get_kind(), part)) maybe = getattr(prop, part, None) if isinstance(maybe, model.Property) and maybe._name == part: prop = maybe else: maybe = prop._modelclass._properties.get(part) if maybe is not None: prop = getattr(prop, maybe._code_name) else: if issubclass(prop._modelclass, model.Expando) and not more: prop = model.GenericProperty() prop._name = name else: raise KeyError('Model %s has no property named %r' % (prop._modelclass._get_kind(), part)) return prop
Helper for FQL parsing to turn a property name into a property object. Args: modelclass: The model class specified in the query. name: The property name. This may contain dots which indicate sub-properties of structured properties. Returns: A Property object. Raises: KeyError if the property doesn't exist and the model clas doesn't derive from Expando.
juraj-google-style
def run_idle(self): if ((not self.idlers) or (self.inactive >= len(self.idlers))): return False idler = self.idlers.popleft() (callback, args, kwds) = idler _logging_debug('idler: %s', callback.__name__) res = callback(*args, **kwds) if (res is not None): if res: self.inactive = 0 else: self.inactive += 1 self.idlers.append(idler) else: _logging_debug('idler %s removed', callback.__name__) return True
Run one of the idle callbacks. Returns: True if one was called, False if no idle callback was called.
codesearchnet
def _MakeFieldDescriptor(self, field_proto, message_name, index, is_extension=False): if message_name: full_name = '.'.join((message_name, field_proto.name)) else: full_name = field_proto.name return descriptor.FieldDescriptor(name=field_proto.name, full_name=full_name, index=index, number=field_proto.number, type=field_proto.type, cpp_type=None, message_type=None, enum_type=None, containing_type=None, label=field_proto.label, has_default_value=False, default_value=None, is_extension=is_extension, extension_scope=None, options=field_proto.options)
Creates a field descriptor from a FieldDescriptorProto. For message and enum type fields, this method will do a look up in the pool for the appropriate descriptor for that type. If it is unavailable, it will fall back to the _source function to create it. If this type is still unavailable, construction will fail. Args: field_proto: The proto describing the field. message_name: The name of the containing message. index: Index of the field is_extension: Indication that this field is for an extension. Returns: An initialized FieldDescriptor object
codesearchnet
def __init__(self, qobj_model, **run_config): self._qobj_model = qobj_model self._run_config = run_config
Create new converter. Args: qobj_model (QobjInstruction): marshmallow model to serialize to object. run_config (dict): experimental configuration.
juraj-google-style
def __init__(self, column_names=None, title=None): super(CLITableView, self).__init__(column_names=column_names, title=title) if self._columns: self._column_width = len(self._columns[0]) else: self._column_width = 0
Initializes a command line table view. Args: column_names (Optional[list[str]]): column names. title (Optional[str]): title.
juraj-google-style
def xldate_as_datetime(xldate, datemode=0, option="to_datetime"): if option == "to_float": d = (xldate - 25589) * 86400.0 else: try: d = datetime.datetime(1899, 12, 30) + \ datetime.timedelta(days=xldate + 1462 * datemode) if option == "to_string": date_format = "%Y-%m-%d %H:%M:%S" d = d.strftime(date_format) except TypeError: logging.info(f'The date is not of correct type [{xldate}]') d = xldate return d
Converts a xls date stamp to a more sensible format. Args: xldate (str): date stamp in Excel format. datemode (int): 0 for 1900-based, 1 for 1904-based. option (str): option in ("to_datetime", "to_float", "to_string"), return value Returns: datetime (datetime object, float, or string).
juraj-google-style
def table_chains(self, table='filter'): return dict(((c['name'], self.get_chain(c['name'], table)) for c in self.get_table(table)))
Get a dict where the keys are all the chains for the given table and each value is the set of rules defined for the given chain. Args: table (str): table name, defaults to ``filter`` Returns: dict: chains with set of defined rules
codesearchnet
def cutting_indices(self, independent_decision_points: List[pg.geno.DecisionPoint], global_state: pg.geno.AttributeDict, step: int) -> List[int]:
Implementation of getting the indices of the cutting points. Args: independent_decision_points: A list of independent decision points. global_state: An optional keyword argument as the global state. Subclass can omit. step: An optional keyword argument as the curent step. Subclass can omit. Returns: A list of integers as the cutting points.
github-repos
def scroll(clicks, x=None, y=None, pause=None, _pause=True): _failSafeCheck() if (type(x) in (tuple, list)): (x, y) = (x[0], x[1]) (x, y) = position(x, y) platformModule._scroll(clicks, x, y) _autoPause(pause, _pause)
Performs a scroll of the mouse scroll wheel. Whether this is a vertical or horizontal scroll depends on the underlying operating system. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: clicks (int, float): The amount of scrolling to perform. x (int, float, None, tuple, optional): The x position on the screen where the click happens. None by default. If tuple, this is used for x and y. y (int, float, None, optional): The y position on the screen where the click happens. None by default. Returns: None
codesearchnet
def sample(self, bqm, num_reads=10): values = tuple(bqm.vartype.value) def _itersample(): for __ in range(num_reads): sample = {v: choice(values) for v in bqm.linear} energy = bqm.energy(sample) yield sample, energy samples, energies = zip(*_itersample()) return SampleSet.from_samples(samples, bqm.vartype, energies)
Give random samples for a binary quadratic model. Variable assignments are chosen by coin flip. Args: bqm (:obj:`.BinaryQuadraticModel`): Binary quadratic model to be sampled from. num_reads (int, optional, default=10): Number of reads. Returns: :obj:`.SampleSet`
juraj-google-style
def get_property_dict(entity_proto): return dict(((p.key, p.value) for p in entity_proto.property))
Convert datastore.Entity to a dict of property name -> datastore.Value. Args: entity_proto: datastore.Entity proto message. Usage: >>> get_property_dict(entity_proto) {'foo': {string_value='a'}, 'bar': {integer_value=2}} Returns: dict of entity properties.
codesearchnet
def _read_mptcp_join(self, bits, size, kind): if (self._syn and self._ack): return self._read_join_synack(bits, size, kind) elif self._syn: return self._read_join_syn(bits, size, kind) elif self._ack: return self._read_join_ack(bits, size, kind) else: temp = self._read_fileng(size) data = dict(kind=kind, length=(size + 1), subtype='MP_JOIN-Unknown', data=(bytes(chr(int(bits[:4], base=2)), encoding='utf-8') + temp)) return data
Read Join Connection option. Positional arguments: * bits - str, 4-bit data * size - int, length of option * kind - int, 30 (Multipath TCP) Returns: * dict -- extracted Join Connection (MP_JOIN) option Structure of MP_JOIN [RFC 6824]: Octets Bits Name Description 0 0 tcp.mp.kind Kind (30) 1 8 tcp.mp.length Length 2 16 tcp.mp.subtype Subtype (1) 2 20 tcp.mp.data Handshake-specific Data
codesearchnet
def op_nodes(self, op=None): nodes = [] for node in self._multi_graph.nodes(): if node.type == "op": if op is None or isinstance(node.op, op): nodes.append(node) return nodes
Get the list of "op" nodes in the dag. Args: op (Type): Instruction subclass op nodes to return. if op=None, return all op nodes. Returns: list[DAGNode]: the list of node ids containing the given op.
juraj-google-style
def get_plugin_asset(plugin_asset_cls, graph=None): if graph is None: graph = ops.get_default_graph() if not plugin_asset_cls.plugin_name: raise ValueError('Class %s has no plugin_name' % plugin_asset_cls.__name__) name = _PLUGIN_ASSET_PREFIX + plugin_asset_cls.plugin_name container = graph.get_collection(name) if container: if len(container) != 1: raise ValueError('Collection for %s had %d items, expected 1' % (name, len(container))) instance = container[0] if not isinstance(instance, plugin_asset_cls): raise ValueError('Plugin name collision between classes %s and %s' % (plugin_asset_cls.__name__, instance.__class__.__name__)) else: instance = plugin_asset_cls() graph.add_to_collection(name, instance) graph.add_to_collection(_PLUGIN_ASSET_PREFIX, plugin_asset_cls.plugin_name) return instance
Acquire singleton PluginAsset instance from a graph. PluginAssets are always singletons, and are stored in tf Graph collections. This way, they can be defined anywhere the graph is being constructed, and if the same plugin is configured at many different points, the user can always modify the same instance. Args: plugin_asset_cls: The PluginAsset class graph: (optional) The graph to retrieve the instance from. If not specified, the default graph is used. Returns: An instance of the plugin_asset_class Raises: ValueError: If we have a plugin name collision, or if we unexpectedly find the wrong number of items in a collection.
github-repos
def __init__(self, loss_scale_value): super(FixedLossScale, self).__init__() if not isinstance(loss_scale_value, (int, float)): raise ValueError('loss_scale_value must be a Python int or float.') if loss_scale_value < 1: raise ValueError('loss_scale_value must be at least 1.') self._loss_scale_value = float(loss_scale_value)
Creates the fixed loss scale. Args: loss_scale_value: A Python float. Its ideal value varies depending on models to run. Choosing a too small loss_scale might affect model quality; a too big loss_scale might cause inf or nan. There is no single right loss_scale to apply. There is no harm choosing a relatively big number as long as no nan or inf is encountered in training. Raises: ValueError: If loss_scale_value is less than 1.
github-repos
def _print_download_progress_msg(self, msg, flush=False): if self._interactive_mode(): self._max_prog_str = max(self._max_prog_str, len(msg)) sys.stdout.write(('\r%-{}s'.format(self._max_prog_str) % msg)) sys.stdout.flush() if flush: print('\n') else: logging.info(msg)
Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode).
codesearchnet
def _address_content(self, x): mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_query") norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True) dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") access_logits = self.sharpen_factor * cos_dist return access_logits
Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size].
juraj-google-style
def register_user(self, user): self.users[user.index] = {'known_items': set()} self.n_user += 1
For new users, append their information into the dictionaries. Args: user (User): User.
juraj-google-style
def write_fasta_file_from_dict(indict, outname, outdir=None, outext='.faa', force_rerun=False): if (not outdir): outdir = '' outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): seqs = [] for (i, s) in indict.items(): seq = ssbio.protein.sequence.utils.cast_to_seq_record(s, id=i) seqs.append(seq) SeqIO.write(seqs, outfile, 'fasta') return outfile
Write a FASTA file for a dictionary of IDs and their sequence strings. Args: indict: Input dictionary with keys as IDs and values as sequence strings outname: Name of the output file which will have outext appended to it outdir: Path to directory to output sequences to outext: Extension of FASTA file, default ".faa" force_rerun: If file should be overwritten if it exists Returns: str: Path to output FASTA file.
codesearchnet
def Push(cls, connection, datafile, filename, st_mode=DEFAULT_PUSH_MODE, mtime=0, progress_callback=None): fileinfo = ('{},{}'.format(filename, int(st_mode))).encode('utf-8') cnxn = FileSyncConnection(connection, b'<2I') cnxn.Send(b'SEND', fileinfo) if progress_callback: total_bytes = os.fstat(datafile.fileno()).st_size if isinstance(datafile, file) else -1 progress = cls._HandleProgress(lambda current: progress_callback(filename, current, total_bytes)) next(progress) while True: data = datafile.read(MAX_PUSH_DATA) if data: cnxn.Send(b'DATA', data) if progress_callback: progress.send(len(data)) else: break if mtime == 0: mtime = int(time.time()) cnxn.Send(b'DONE', size=mtime) for cmd_id, _, data in cnxn.ReadUntil((), b'OKAY', b'FAIL'): if cmd_id == b'OKAY': return raise PushFailedError(data)
Push a file-like object to the device. Args: connection: ADB connection datafile: File-like object for reading from filename: Filename to push to st_mode: stat mode for filename mtime: modification time progress_callback: callback method that accepts filename, bytes_written and total_bytes Raises: PushFailedError: Raised on push failure.
juraj-google-style
def _tuple_of_big_endian_int(bit_groups: Tuple[(np.ndarray, ...)]) -> Tuple[(int, ...)]: return tuple((_big_endian_int(bits) for bits in bit_groups))
Returns the big-endian integers specified by groups of bits. Args: bit_groups: Groups of descending bits, each specifying a big endian integer with the 1s bit at the end. Returns: A tuple containing the integer for each group.
codesearchnet
def register_for_auto_class(cls, auto_class='AutoProcessor'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class
Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `AutoProcessor`. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoProcessor"`): The auto class to register this new feature extractor with.
github-repos
def exe_cmd(*cmds, timeout=DEFAULT_TIMEOUT_SEC): cmd = ' '.join(cmds) ret, out, err = utils.run_command(cmd=cmd, stdout=PIPE, stderr=PIPE, shell=True, timeout=timeout) logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', utils.cli_cmd_to_string(cmds), out, err, ret) if not err: return out return err
Executes commands in a new shell. Directing stderr to PIPE, with timeout. This is fastboot's own exe_cmd because of its peculiar way of writing non-error info to stderr. Args: cmds: A sequence of commands and arguments. timeout: The number of seconds to wait before timing out. Returns: The output of the command run, in bytes. Raises: Exception: An error occurred during the command execution or the command timed out.
github-repos
def is_distributing_by_cloning(model): if backend.is_tpu_strategy(model._distribution_strategy) and context.executing_eagerly: return False elif ops.executing_eagerly_outside_functions(): return bool(model._compile_distribution) return True
Decide whether this model is going to be distributed via cloning. We are going to distribute the model by cloning in graph mode. Args: model: Keras model to distribute. Returns: True if the `model` is going to be distributed using cloning and False otherwise.
github-repos
def has_chosen(state, correct, msgs): ctxt = {} exec(state.student_code, globals(), ctxt) sel_indx = ctxt['selected_option'] if (sel_indx != correct): state.report(Feedback(msgs[(sel_indx - 1)])) else: state.reporter.success_msg = msgs[(correct - 1)] return state
Verify exercises of the type MultipleChoiceExercise Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). correct: index of correct option, where 1 is the first option. msgs : list of feedback messages corresponding to each option. :Example: The following SCT is for a multiple choice exercise with 2 options, the first of which is correct.:: Ex().has_chosen(1, ['Correct!', 'Incorrect. Try again!'])
codesearchnet
def gather(strategy, value): return nest.map_structure(functools.partial(_gather, strategy), value)
Gathers value from all workers. This is intended for tests before we implement an official all-gather API. Args: strategy: a `tf.distribute.Strategy`. value: a nested structure of n-dim `tf.distribute.DistributedValue` of `tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica. Cannot contain tf.sparse.SparseTensor. Returns: a (n+1)-dim `tf.Tensor`.
github-repos
def convert_dropout(params, w_name, scope_name, inputs, layers, weights, names): print('Converting dropout ...') if names == 'short': tf_name = 'DO' + random_string(6) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) dropout = keras.layers.Dropout(rate=params['ratio'], name=tf_name) layers[scope_name] = dropout(layers[inputs[0]])
Convert dropout. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def update_defaults(self, new_defaults, respect_none=False): for key, value in six.iteritems(new_defaults): item = self.get_item(key) if item is None: raise YapconfItemNotFound("Cannot update default for {0}, " "there is no config item by the " "name of {1}".format(key, key), None) item.update_default(value, respect_none)
Update items defaults to the values in the new_defaults dict. Args: new_defaults (dict): A key-value pair of new defaults to be applied. respect_none (bool): Flag to indicate if ``None`` values should constitute an update to the default.
juraj-google-style
def cumulative_probabilities(self): partition_function = np.sum(self.p) return (np.cumsum(self.p) / partition_function)
Cumulative sum of the relative probabilities for all possible jumps. Args: None Returns: (np.array): Cumulative sum of relative jump probabilities.
codesearchnet
def Append(self, item): if self._index >= self._size: self._index = self._index % self._size try: self._list[self._index] = item except IndexError: self._list.append(item) self._index += 1
Add an item to the list. Args: item (object): item.
juraj-google-style
def where(self, *constraints: column_expression_builder.ColumnExpressionBuilder) -> 'View': for constraint in constraints: if constraint.node.return_type != _fhir_path_data_types.Boolean: raise ValueError(('view `where` expressions must be boolean predicates', f' got `{constraint.node.to_fhir_path()}`')) return View(self._context, self._root_resource, self._fields, self._constraints + tuple(constraints), self._handler)
Returns a new View instance with these added constraints. Args: *constraints: a list of FHIRPath expressions to conjuctively constrain the underlying data. The returned view will apply the both the current and additional constraints defined here.
github-repos
def _add_bound_method(self, bound_method, identify_observed): inst = bound_method.__self__ method_name = bound_method.__name__ key = self.make_key(bound_method) if (key not in self.observers): self.observers[key] = ObserverBoundMethod(inst, method_name, identify_observed, (key, self.observers)) return True else: return False
Add an bound method as an observer. Args: bound_method: The bound method to add as an observer. identify_observed: See the docstring for add_observer. Returns: True if the bound method is added, otherwise False.
codesearchnet
def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, output_attentions) else: layer_outputs = encoder_layer(hidden_states, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
github-repos
def filter(self, *query_filter): for query in query_filter: self.query.append(query) return self
Set the query filter to perform the query with Args: *query_filter: Simplified Query Language filter
codesearchnet
def _CreateAdGroup(client, campaign_id): ad_group_service = client.GetService('AdGroupService') operations = [{ 'operator': 'ADD', 'operand': { 'campaignId': campaign_id, 'adGroupType': 'SEARCH_DYNAMIC_ADS', 'name': 'Earth to Mars Cruises 'status': 'PAUSED', 'biddingStrategyConfiguration': { 'bids': [{ 'xsi_type': 'CpcBid', 'bid': { 'microAmount': '3000000' }, }] } } }] ad_group = ad_group_service.mutate(operations)['value'][0] ad_group_id = ad_group['id'] print 'Ad group with ID "%d" and name "%s" was created.' % ( ad_group_id, ad_group['name']) return ad_group_id
Creates an ad group. Args: client: an AdWordsClient instance. campaign_id: an integer campaign ID. Returns: An integer ad group ID.
juraj-google-style
def inverse_transform(self, y): sklearn.base.check_is_fitted(self) xp, _ = sklearn.utils._array_api.get_namespace(y) if self.ndim_ == 1 and y.ndim == 2: return xp.squeeze(y, axis=1) return y
Revert the transformation of transform. Args: y: np.ndarray Transformed numpy array. Returns: np.ndarray If the transformer was fit to a 1D numpy array, and a 2D numpy array with a singleton second dimension is passed, it will be squeezed back to 1D. Otherwise, it will eb left untouched.
github-repos
def prepare_soap_body(self, method, parameters, namespace): tags = [] for (name, value) in parameters: tag = '<{name}>{value}</{name}>'.format(name=name, value=escape(('%s' % value), {'"': '&quot;'})) tags.append(tag) wrapped_params = ''.join(tags) if (namespace is not None): soap_body = '<{method} xmlns="{namespace}">{params}</{method}>'.format(method=method, params=wrapped_params, namespace=namespace) else: soap_body = '<{method}>{params}</{method}>'.format(method=method, params=wrapped_params) return soap_body
Prepare the SOAP message body for sending. Args: method (str): The name of the method to call. parameters (list): A list of (name, value) tuples containing the parameters to pass to the method. namespace (str): tThe XML namespace to use for the method. Returns: str: A properly formatted SOAP Body.
codesearchnet
class Iterator(PyDataset): white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff') def __init__(self, n, batch_size, shuffle, seed): self.n = n self.batch_size = batch_size self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() self.index_array = None self.index_generator = self._flow_index() def _set_index_array(self): self.index_array = np.arange(self.n) if self.shuffle: self.index_array = np.random.permutation(self.n) def __getitem__(self, idx): if idx >= len(self): raise ValueError('Asked to retrieve element {idx}, but the Sequence has length {length}'.format(idx=idx, length=len(self))) if self.seed is not None: np.random.seed(self.seed + self.total_batches_seen) self.total_batches_seen += 1 if self.index_array is None: self._set_index_array() index_array = self.index_array[self.batch_size * idx:self.batch_size * (idx + 1)] return self._get_batches_of_transformed_samples(index_array) def __len__(self): return (self.n + self.batch_size - 1) def on_epoch_end(self): self._set_index_array() def reset(self): self.batch_index = 0 def _flow_index(self): self.reset() while 1: if self.seed is not None: np.random.seed(self.seed + self.total_batches_seen) if self.batch_index == 0: self._set_index_array() if self.n == 0: current_index = 0 else: current_index = self.batch_index * self.batch_size % self.n if self.n > current_index + self.batch_size: self.batch_index += 1 else: self.batch_index = 0 self.total_batches_seen += 1 yield self.index_array[current_index:current_index + self.batch_size] def __iter__(self): return self def __next__(self): with self.lock: index_array = next(self.index_generator) return self._get_batches_of_transformed_samples(index_array) def _get_batches_of_transformed_samples(self, index_array): raise NotImplementedError
Base class for image data iterators. DEPRECATED. Every `Iterator` must implement the `_get_batches_of_transformed_samples` method. Args: n: Integer, total number of samples in the dataset to loop over. batch_size: Integer, size of a batch. shuffle: Boolean, whether to shuffle the data between epochs. seed: Random seeding for data shuffling.
github-repos
def main(args): if (not args): raise Exception('Please specify at least one JSON config path') inputs = [] program = [] outputs = [] for arg in args: with open(arg) as fd: config = json.load(fd) inputs.extend(config.get('inputs', [])) program.extend(config.get('program', [])) outputs.extend(config.get('outputs', [])) if (not program): raise Exception('Please specify a program') return run(inputs, program, outputs)
Invokes run function using a JSON file config. Args: args: CLI args, which can be a JSON file containing an object whose attributes are the parameters to the run function. If multiple JSON files are passed, their contents are concatenated. Returns: 0 if succeeded or nonzero if failed. Raises: Exception: If input data is missing.
codesearchnet
def request(self,message,message_type): if message_type == MULTIPART: raise Exception("Unsupported request type") super(Requestor,self).send(message,message_type)
Send a request message of the given type Args: - message: the message to publish - message_type: the type of message being sent
juraj-google-style
def GetSourceStrings(cls, event): formatter_object = cls.GetFormatterObject(event.data_type) return formatter_object.GetSources(event)
Retrieves the formatted source strings for a specific event object. Args: event (EventObject): event. Returns: list[str, str]: short and long version of the source of the event.
juraj-google-style
def qrandom(n): import quantumrandom return np.concatenate([quantumrandom.get_data(data_type='uint16', array_length=1024) for i in range(int(np.ceil((n / 1024.0))))])[:n]
Creates an array of n true random numbers obtained from the quantum random number generator at qrng.anu.edu.au This function requires the package quantumrandom and an internet connection. Args: n (int): length of the random array Return: array of ints: array of truly random unsigned 16 bit int values
codesearchnet
def convert_slice(params, w_name, scope_name, inputs, layers, weights, names): print('Converting slice ...') if len(params['axes']) > 1: raise AssertionError('Cannot convert slice by multiple dimensions') if params['axes'][0] not in [0, 1, 2, 3]: raise AssertionError('Slice by dimension more than 3 or less than 0 is not supported') def target_layer(x, axis=int(params['axes'][0]), start=int(params['starts'][0]), end=int(params['ends'][0])): if axis == 0: return x[start:end] elif axis == 1: return x[:, start:end] elif axis == 2: return x[:, :, start:end] elif axis == 3: return x[:, :, :, start:end] lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert slice operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def make_bubble_surface(dims=DEFAULT_DIMS, repeat=3): gradients = make_gradients(dims) return ( np.sin((gradients[0] - 0.5) * repeat * np.pi) * np.sin((gradients[1] - 0.5) * repeat * np.pi))
Makes a surface from the product of sine functions on each axis. Args: dims (pair): the dimensions of the surface to create repeat (int): the frequency of the waves is set to ensure this many repetitions of the function Returns: surface: A surface.
juraj-google-style
def discovery(self, logfile=None, tracefile=None): self._enable_logging(logfile=logfile, tracefile=tracefile) self.log("'discovery' method is deprecated. Please 'connect' with force_discovery=True.") self.log('Device discovery process started') self.connect(logfile=logfile, force_discovery=True, tracefile=tracefile) self.disconnect()
Discover the device details. This method discover several device attributes. Args: logfile (file): Optional file descriptor for session logging. The file must be open for write. The session is logged only if ``log_session=True`` was passed to the constructor. It the parameter is not passed then the default *session.log* file is created in `log_dir`.
codesearchnet
def ctc_loss_and_grad(logits, labels, label_length, logit_length, unique=None): num_labels = _get_dim(logits, 2) max_label_seq_length = _get_dim(labels, 1) ilabel_log_probs = nn_ops.log_softmax(logits) state_log_probs = _ilabel_to_state(labels, num_labels, ilabel_log_probs) state_trans_probs = _ctc_state_trans(labels) initial_state_log_probs, final_state_log_probs = ctc_state_log_probs(label_length, max_label_seq_length) fwd_bwd_log_probs, log_likelihood = _forward_backward_log(state_trans_log_probs=math_ops.log(state_trans_probs), initial_state_log_probs=initial_state_log_probs, final_state_log_probs=final_state_log_probs, observed_log_probs=state_log_probs, sequence_length=logit_length) if unique: olabel_log_probs = _state_to_olabel_unique(labels, num_labels, fwd_bwd_log_probs, unique) else: olabel_log_probs = _state_to_olabel(labels, num_labels, fwd_bwd_log_probs) grad = math_ops.exp(ilabel_log_probs) - math_ops.exp(olabel_log_probs) max_logit_length = _get_dim(logits, 0) logit_mask = array_ops.sequence_mask(logit_length, max_logit_length, dtypes.float32) logit_mask = array_ops.transpose(logit_mask, perm=[1, 0]) logit_mask = array_ops.expand_dims(logit_mask, axis=2) grad *= logit_mask loss = -log_likelihood return (loss, grad)
Computes the CTC loss and gradients. Most users will want fwd_bwd.ctc_loss This function returns the computed gradient, it does not have a gradient of its own defined. Args: logits: tensor of shape [frames, batch_size, num_labels] labels: tensor of shape [batch_size, max_label_seq_length] label_length: tensor of shape [batch_size] Length of reference label sequence in labels. logit_length: tensor of shape [batch_size] Length of input sequence in logits. unique: (optional) unique label indices as computed by unique(labels) If supplied, enables an implementation that is faster and more memory efficient on TPU. Returns: loss: tensor of shape [batch_size] gradient: tensor of shape [frames, batch_size, num_labels]
github-repos
def _chunk_query(l, n, cn, conn, table, db_type): [insert_query_m(l[i:i + n], table, conn, cn, db_type) for i in range(0, len(l), n)]
Call for inserting SQL query in chunks based on n rows Args: l (list): List of tuples n (int): Number of rows cn (str): Column names conn (connection object): Database connection object table (str): Table name db_type (str): If "sqlite" or "mysql"
juraj-google-style
def correlation_vector(self, value): if value == self._defaults['ai.operation.correlationVector'] and 'ai.operation.correlationVector' in self._values: del self._values['ai.operation.correlationVector'] else: self._values['ai.operation.correlationVector'] = value
The correlation_vector property. Args: value (string). the property value.
juraj-google-style
def emit(self, record): if record.levelno < logging.getLevelName(self.min_level): return evt = LogEvent() evt.level = record.levelname evt.levelno = record.levelno evt.timestamp = datetime.fromtimestamp(record.created) evt.message = record.message evt.filename = record.filename evt.lineno = record.lineno evt.module = record.module evt.funcname = record.funcName evt.pathname = record.pathname evt.process_id = record.process if record.levelno >= 40: evt.stacktrace = traceback.format_exc() try: db.session.add(evt) db.session.commit() except Exception: db.session.rollback()
Persist a record into the database Args: record (`logging.Record`): The logging.Record object to store Returns: `None`
juraj-google-style
def _ot_make_closed(self, access_string): self.observation_table.sm_vector.append(access_string) for i in self.alphabet: self.observation_table.smi_vector.append(access_string + i) for e in self.observation_table.em_vector: self._fill_table_entry(access_string + i, e)
Given a state input_string in Smi that is not equivalent with any state in Sm this method will move that state in Sm create a corresponding Smi state and fill the corresponding entries in the table. Args: access_string (str): State access string Returns: None
juraj-google-style
def rpc(self, address, rpc_id, *args, **kwargs): if isinstance(rpc_id, RPCDeclaration): arg_format = rpc_id.arg_format resp_format = rpc_id.resp_format rpc_id = rpc_id.rpc_id else: arg_format = kwargs.get('arg_format', None) resp_format = kwargs.get('resp_format', None) arg_payload = b'' if (arg_format is not None): arg_payload = pack_rpc_payload(arg_format, args) self._logger.debug('Sending rpc to %d:%04X, payload=%s', address, rpc_id, args) resp_payload = self.call_rpc(address, rpc_id, arg_payload) if (resp_format is None): return [] resp = unpack_rpc_payload(resp_format, resp_payload) return resp
Immediately dispatch an RPC inside this EmulatedDevice. This function is meant to be used for testing purposes as well as by tiles inside a complex EmulatedDevice subclass that need to communicate with each other. It should only be called from the main virtual device thread where start() was called from. **Background workers may not call this method since it may cause them to deadlock.** Args: address (int): The address of the tile that has the RPC. rpc_id (int): The 16-bit id of the rpc we want to call *args: Any required arguments for the RPC as python objects. **kwargs: Only two keyword arguments are supported: - arg_format: A format specifier for the argument list - result_format: A format specifier for the result Returns: list: A list of the decoded response members from the RPC.
codesearchnet
def label(self, main_type, sub_type, unique_id, label, action='ADD', owner=None, params=None): params = params or {} if owner: params['owner'] = owner action = action.upper() if not sub_type: url = '/v2/{}/{}/securityLabels/{}'.format(main_type, unique_id, quote(label)) else: url = '/v2/{}/{}/{}/securityLabels/{}'.format( main_type, sub_type, unique_id, quote(label) ) if action == 'ADD': return self.tcex.session.post(url, params=params) if action == 'DELETE': return self.tcex.session.delete(url, params=params) if action == 'GET': return self.tcex.session.get(url, params=params) return None
Args: owner: main_type: sub_type: unique_id: label: action: params: Return:
juraj-google-style
def from_key_counter(cls, key, counter, alg): counter = _convert_to_state_tensor(counter) key = _convert_to_state_tensor(key) alg = random_ops_util.convert_alg_to_int(alg) counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1]) key.shape.assert_is_compatible_with([]) key = array_ops.reshape(key, [1]) state = array_ops.concat([counter, key], 0) return cls(state=state, alg=alg)
Creates a generator from a key and a counter. This constructor only applies if the algorithm is a counter-based algorithm. See method `key` for the meaning of "key" and "counter". Args: key: the key for the RNG, a scalar of type STATE_TYPE. counter: a vector of dtype STATE_TYPE representing the initial counter for the RNG, whose length is algorithm-specific., alg: the RNG algorithm. If None, it will be auto-selected. See `__init__` for its possible values. Returns: The new generator.
github-repos
def HandleBlockReceived(self, inventory): block = IOHelper.AsSerializableWithType(inventory, 'neo.Core.Block.Block') if not block: return blockhash = block.Hash.ToBytes() try: if blockhash in BC.Default().BlockRequests: BC.Default().BlockRequests.remove(blockhash) except KeyError: pass try: if blockhash in self.myblockrequests: self.heart_beat(HEARTBEAT_BLOCKS) self.myblockrequests.remove(blockhash) except KeyError: pass self.leader.InventoryReceived(block)
Process a Block inventory payload. Args: inventory (neo.Network.Inventory):
juraj-google-style
def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO: (sx, sz) = size (rx, rz) = resolution (dx, dz) = ((sx / rx), (sz / rz)) (ox, oz) = (((- sx) / 2), ((- sz) / 2)) def gen_pos(): for z in range(rz): for x in range(rx): (yield (ox + (x * dx))) (yield 0) (yield (oz + (z * dz))) def gen_uv(): for z in range(rz): for x in range(rx): (yield (x / (rx - 1))) (yield (1 - (z / (rz - 1)))) def gen_normal(): for _ in range((rx * rz)): (yield 0.0) (yield 1.0) (yield 0.0) def gen_index(): for z in range((rz - 1)): for x in range((rx - 1)): (yield (((z * rz) + x) + 1)) (yield ((z * rz) + x)) (yield (((z * rz) + x) + rx)) (yield (((z * rz) + x) + 1)) (yield (((z * rz) + x) + rx)) (yield ((((z * rz) + x) + rx) + 1)) pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32) uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32) normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32) index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32) vao = VAO('plane_xz', mode=moderngl.TRIANGLES) vao.buffer(pos_data, '3f', ['in_position']) vao.buffer(uv_data, '2f', ['in_uv']) vao.buffer(normal_data, '3f', ['in_normal']) vao.index_buffer(index_data, index_element_size=4) return vao
Generates a plane on the xz axis of a specific size and resolution. Normals and texture coordinates are also included. Args: size: (x, y) tuple resolution: (x, y) tuple Returns: A :py:class:`demosys.opengl.vao.VAO` instance
codesearchnet
def clear_errors(): data = [] data.append(0x0B) data.append(BROADCAST_ID) data.append(RAM_WRITE_REQ) data.append(STATUS_ERROR_RAM) data.append(BYTE2) data.append(0x00) data.append(0x00) send_data(data)
Clears the errors register of all Herkulex servos Args: none
juraj-google-style
def _decorate_run_options_for_debug(self, run_options, debug_urls, debug_ops='DebugIdentity', node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False): run_options.output_partition_graphs = True debug_utils.watch_graph(run_options, self._sess.graph, debug_urls=debug_urls, debug_ops=debug_ops, node_name_regex_allowlist=node_name_regex_allowlist, op_type_regex_allowlist=op_type_regex_allowlist, tensor_dtype_regex_allowlist=tensor_dtype_regex_allowlist, tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures, reset_disk_byte_usage=self._run_call_count == 1 or self._is_disk_usage_reset_each_run())
Modify a RunOptions object for debug tensor watching. Specifies request for outputting partition graphs. Adds debug_tensor_watch_opts with proper debug URLs. Args: run_options: (RunOptions) the modified RunOptions object. debug_urls: (list of str) debug URLs to be entered in run_options. debug_tensor_watch_opts. debug_ops: (str or list of str) debug op(s) to be used by the debugger. node_name_regex_allowlist: Regular-expression allowlist for node name. op_type_regex_allowlist: Regular-expression allowlist for op type. tensor_dtype_regex_allowlist: Regular-expression allowlist for tensor dtype. tolerate_debug_op_creation_failures: Whether debug op creation failures are to be tolerated.
github-repos
def parse_arguments(argv): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent()) parser.add_argument('--cloud', action='store_true', help='Analysis will use cloud services.') parser.add_argument('--output', metavar='DIR', type=str, required=True, help='GCS or local folder') input_group = parser.add_argument_group( title='Data Source Parameters', description='schema is only needed if using --csv') input_group.add_argument('--csv', metavar='FILE', type=str, required=False, action='append', help='Input CSV absolute file paths. May contain a ' 'file pattern.') input_group.add_argument('--schema', metavar='FILE', type=str, required=False, help='Schema file path. Only required if using csv files') input_group.add_argument('--bigquery', metavar='PROJECT_ID.DATASET.TABLE_NAME', type=str, required=False, help=('Must be in the form project.dataset.table_name')) parser.add_argument('--features', metavar='FILE', type=str, required=True, help='Features file path') args = parser.parse_args(args=argv[1:]) if args.cloud: if not args.output.startswith('gs: raise ValueError('--output must point to a location on GCS') if (args.csv and not all(x.startswith('gs: raise ValueError('--csv must point to a location on GCS') if args.schema and not args.schema.startswith('gs: raise ValueError('--schema must point to a location on GCS') if not args.cloud and args.bigquery: raise ValueError('--bigquery must be used with --cloud') if not ((args.bigquery and args.csv is None and args.schema is None) or (args.bigquery is None and args.csv and args.schema)): raise ValueError('either --csv and --schema must both' ' be set or just --bigquery is set') return args
Parse command line arguments. Args: argv: list of command line arguments, including program name. Returns: An argparse Namespace object. Raises: ValueError: for bad parameters
juraj-google-style
def create_struct(name): sid = idc.GetStrucIdByName(name) if sid != idaapi.BADADDR: raise exceptions.SarkStructAlreadyExists("A struct names {!r} already exists.".format(name)) sid = idc.AddStrucEx(-1, name, 0) if sid == idaapi.BADADDR: raise exceptions.SarkStructCreationFailed("Struct creation failed.") return sid
Create a structure. Args: name: The structure's name Returns: The sturct ID Raises: exceptions.SarkStructAlreadyExists: A struct with the same name already exists exceptions.SarkCreationFailed: Struct creation failed
juraj-google-style
def deal_with_changeset_stack_policy(self, fqn, stack_policy): if stack_policy: kwargs = generate_stack_policy_args(stack_policy) kwargs["StackName"] = fqn logger.debug("Setting stack policy on %s.", fqn) self.cloudformation.set_stack_policy(**kwargs)
Set a stack policy when using changesets. ChangeSets don't allow you to set stack policies in the same call to update them. This sets it before executing the changeset if the stack policy is passed in. Args: stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy.
juraj-google-style
def get_review(review_struct): review_fn = _resource_context("review.rst") with open(review_fn) as f: review = f.read() with NamedTemporaryFile(suffix=".png") as qr_file: url = pyqrcode.create(review_struct.internal_url) url.png(qr_file.name, scale=5) qr_file.flush() qr_file.seek(0) review = Template(review).substitute( content=review_struct.get_rst(), datum=time.strftime("%d.%m.%Y", time.localtime()), cas=time.strftime("%H:%M", time.localtime()), resources_path=RES_PATH, qr_path=qr_file.name, ) return gen_pdf( review, open(_resource_context("review_style.json")).read(), )
Generate review from `review_struct`. Args: review_struct (obj): :class:`.GenerateReview` instance. Returns: obj: StringIO file instance containing PDF file.
juraj-google-style
def get_dataset_split(tmp_dir, split, use_control_set): if (not use_control_set): dataset_split = {problem.DatasetSplit.TRAIN: [f for f in tf.gfile.Glob(os.path.join(tmp_dir, 'train-novels*.txt'))], problem.DatasetSplit.EVAL: [os.path.join(tmp_dir, 'lambada_control_test_data_plain_text.txt')]} return dataset_split[split]
Gives the file paths with regards to the given split. Args: tmp_dir: temp directory split: dataset split use_control_set: uses control dataset if true. Returns: list of file paths.
codesearchnet
def summarize_tensors(tensor_dict, tag=None): if (tag is None): tag = 'tensors/' for t_name in list(tensor_dict): t = tensor_dict[t_name] tf.summary.histogram((tag + t_name), t)
Summarize the tensors. Args: tensor_dict: a dictionary of tensors. tag: name scope of the summary; defaults to tensors/.
codesearchnet
def rename(self, container, name): url = self._url("/containers/{0}/rename", container) params = {'name': name} res = self._post(url, params=params) self._raise_for_status(res)
Rename a container. Similar to the ``docker rename`` command. Args: container (str): ID of the container to rename name (str): New name for the container Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def predict_array(self, arr): precompute = self.precompute self.precompute = False pred = super().predict_array(arr) self.precompute = precompute return pred
This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: a numpy array containing the predictions from the model
juraj-google-style
def play_alert(zones, alert_uri, alert_volume=20, alert_duration=0, fade_back=False): for zone in zones: zone.snap = Snapshot(zone) zone.snap.snapshot() print('snapshot of zone: {}'.format(zone.player_name)) for zone in zones: if zone.is_coordinator: if (not zone.is_playing_tv): trans_state = zone.get_current_transport_info() if (trans_state['current_transport_state'] == 'PLAYING'): zone.pause() zone.volume = alert_volume zone.mute = False print('will play: {} on all coordinators'.format(alert_uri)) for zone in zones: if zone.is_coordinator: zone.play_uri(uri=alert_uri, title='Sonos Alert') time.sleep(alert_duration) for zone in zones: print('restoring {}'.format(zone.player_name)) zone.snap.restore(fade=fade_back)
Demo function using soco.snapshot across multiple Sonos players. Args: zones (set): a set of SoCo objects alert_uri (str): uri that Sonos can play as an alert alert_volume (int): volume level for playing alert (0 tp 100) alert_duration (int): length of alert (if zero then length of track) fade_back (bool): on reinstating the zones fade up the sound?
codesearchnet