code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_related(self): if self.exists and hasattr(self.rdf.triples, 'ore') and hasattr(self.rdf.triples.ore, 'aggregates'): related = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.ore.aggregates ] return related else: return []
get ore:aggregates for this resource, optionally retrieving resource payload Args: retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload
juraj-google-style
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None): countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception) if countryinfo is not None: return countryinfo.get(' return None
Get country name from ISO3 code Args: iso3 (str): ISO3 code for which to get country name use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: Country name
juraj-google-style
def row_splits_dtype(self): return self._row_splits_dtype
The `tf.dtypes.DType` of the RaggedTensor's `row_splits`. Examples: >>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64) >>> tf.type_spec_from_value(rt).row_splits_dtype tf.int64 Returns: A `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One of `tf.int32` or `tf.int64`.
github-repos
def _print_task_data(self, task): print(' {0:s} ({1:s})'.format(task['name'], task['id'])) paths = task.get('saved_paths', []) if not paths: return for path in paths: if path.endswith('worker-log.txt'): continue if path.endswith('{0:s}.log'.format(task.get('id'))): continue if path.startswith('/'): continue print(' ' + path)
Pretty-prints task data. Args: task: Task dict generated by Turbinia.
juraj-google-style
def get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses): ret = [] pool = {int(k): v for (k, v) in pool.items()} total_in_pool = len(seeded_answers) merged_pool = convert_seeded_answers(seeded_answers) student_id = get_student_item_dict()['student_id'] for key in pool: total_in_pool += len(pool[key]) if (student_id in pool[key].keys()): total_in_pool -= 1 if (key in merged_pool): merged_pool[key].update(pool[key].items()) else: merged_pool[key] = pool[key] selected = [] while (len(ret) < min(num_responses, total_in_pool)): for (option, students) in merged_pool.items(): student = student_id i = 0 while (((student == student_id) or (i > 100)) and ((str(option) + student) not in selected)): student = random.choice(students.keys()) i += 1 selected.append((str(option) + student)) if student.startswith('seeded'): rationale = students[student] else: student_item = get_student_item_dict(student) submission = sas_api.get_answers_for_student(student_item) rationale = submission.get_rationale(0) ret.append({'option': option, 'rationale': rationale}) if (len(ret) >= min(num_responses, total_in_pool)): break return {'answers': ret}
Get answers from others with simple algorithm, which picks one answer for each option. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm
codesearchnet
def xeval(source, optimize=True): native = xcompile(source, optimize=optimize) return native()
Compiles to native Python bytecode and runs program, returning the topmost value on the stack. Args: optimize: Whether to optimize the code after parsing it. Returns: None: If the stack is empty obj: If the stack contains a single value [obj, obj, ...]: If the stack contains many values
juraj-google-style
def update(self, resource, timeout=-1): self.__set_default_values(resource) uri = self._client.build_uri(resource['logicalSwitch']['uri']) return self._client.update(resource, uri=uri, timeout=timeout)
Updates a Logical Switch. Args: resource (dict): Object to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Updated resource.
juraj-google-style
def logout(self, client_id, return_to, federated=False): return_to = quote_plus(return_to) if federated is True: return self.get( 'https: self.domain, client_id, return_to), headers={'Content-Type': 'application/json'} ) return self.get( 'https: client_id, return_to), headers={'Content-Type': 'application/json'} )
Logout Use this endpoint to logout a user. If you want to navigate the user to a specific URL after the logout, set that URL at the returnTo parameter. The URL should be included in any the appropriate Allowed Logout URLs list: Args: client_id (str): The client_id of your application. returnTo (str): URL to redirect the user after the logout. federated (bool): Querystring parameter to log the user out of the IdP
juraj-google-style
def ProcessConfigOverrides(filename): abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: (abs_path, base_name) = os.path.split(abs_filename) if (not base_name): break cfg_file = os.path.join(abs_path, 'CPPLINT.cfg') abs_filename = abs_path if (not os.path.isfile(cfg_file)): continue try: with open(cfg_file) as file_handle: for line in file_handle: (line, _, _) = line.partition(' if (not line.strip()): continue (name, _, val) = line.partition('=') name = name.strip() val = val.strip() if (name == 'set noparent'): keep_looking = False elif (name == 'filter'): cfg_filters.append(val) elif (name == 'exclude_files'): if base_name: pattern = re.compile(val) if pattern.match(base_name): _cpplint_state.PrintInfo(('Ignoring "%s": file excluded by "%s". File path component "%s" matches pattern "%s"\n' % (filename, cfg_file, base_name, val))) return False elif (name == 'linelength'): global _line_length try: _line_length = int(val) except ValueError: _cpplint_state.PrintError('Line length must be numeric.') elif (name == 'extensions'): global _valid_extensions try: extensions = [ext.strip() for ext in val.split(',')] _valid_extensions = set(extensions) except ValueError: sys.stderr.write(('Extensions should be a comma-separated list of values;for example: extensions=hpp,cpp\nThis could not be parsed: "%s"' % (val,))) elif (name == 'headers'): global _header_extensions try: extensions = [ext.strip() for ext in val.split(',')] _header_extensions = set(extensions) except ValueError: sys.stderr.write(('Extensions should be a comma-separated list of values;for example: extensions=hpp,cpp\nThis could not be parsed: "%s"' % (val,))) elif (name == 'root'): global _root _root = val else: _cpplint_state.PrintError(('Invalid configuration option (%s) in file %s\n' % (name, cfg_file))) except IOError: _cpplint_state.PrintError(("Skipping config file '%s': Can't open for reading\n" % cfg_file)) keep_looking = False for cfg_filter in reversed(cfg_filters): _AddFilters(cfg_filter) return True
Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further.
codesearchnet
def has_no_checked_field(self, locator, **kwargs): kwargs["checked"] = True return self.has_no_selector("field", locator, **kwargs)
Checks if the page or current node has no radio button or checkbox with the given label, value, or id that is currently checked. Args: locator (str): The label, name, or id of a checked field. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. Returns: bool: Whether it doesn't exist.
juraj-google-style
def rename(oldname, newname, overwrite=False): rename_v2(oldname, newname, overwrite)
Rename or move a file / directory. Args: oldname: string, pathname for a file newname: string, pathname to which the file needs to be moved overwrite: boolean, if false it's an error for `newname` to be occupied by an existing file. Raises: errors.OpError: If the operation fails.
github-repos
def add_glyph(self, source_or_glyph, glyph=None, **kw): if (glyph is not None): source = source_or_glyph else: (source, glyph) = (ColumnDataSource(), source_or_glyph) if (not isinstance(source, DataSource)): raise ValueError("'source' argument to add_glyph() must be DataSource subclass") if (not isinstance(glyph, Glyph)): raise ValueError("'glyph' argument to add_glyph() must be Glyph subclass") g = GlyphRenderer(data_source=source, glyph=glyph, **kw) self.renderers.append(g) return g
Adds a glyph to the plot with associated data sources and ranges. This function will take care of creating and configuring a Glyph object, and then add it to the plot's list of renderers. Args: source (DataSource) : a data source for the glyphs to all use glyph (Glyph) : the glyph to add to the Plot Keyword Arguments: Any additional keyword arguments are passed on as-is to the Glyph initializer. Returns: GlyphRenderer
codesearchnet
def register_subcommand(parser: ArgumentParser): serve_parser = parser.add_parser('serve', help='CLI tool to run inference requests through REST and GraphQL endpoints.') serve_parser.add_argument('--task', type=str, choices=get_supported_tasks(), help='The task to run the pipeline on') serve_parser.add_argument('--host', type=str, default='localhost', help='Interface the server will listen on.') serve_parser.add_argument('--port', type=int, default=8888, help='Port the serving will listen to.') serve_parser.add_argument('--workers', type=int, default=1, help='Number of http workers') serve_parser.add_argument('--model', type=str, help="Model's name or path to stored model.") serve_parser.add_argument('--config', type=str, help="Model's config name or path to stored model.") serve_parser.add_argument('--tokenizer', type=str, help='Tokenizer name to use.') serve_parser.add_argument('--device', type=int, default=-1, help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)') serve_parser.set_defaults(func=serve_command_factory)
Register this command to argparse so it's available for the transformer-cli Args: parser: Root parser to register command-specific arguments
github-repos
def compare_versions(ver1='', oper='==', ver2=''): if (not ver1): raise SaltInvocationError('compare_version, ver1 is blank') if (not ver2): raise SaltInvocationError('compare_version, ver2 is blank') if (ver1 == 'latest'): ver1 = six.text_type(sys.maxsize) if (ver2 == 'latest'): ver2 = six.text_type(sys.maxsize) if (ver1 == 'Not Found'): ver1 = '0.0.0.0.0' if (ver2 == 'Not Found'): ver2 = '0.0.0.0.0' return salt.utils.versions.compare(ver1, oper, ver2, ignore_epoch=True)
Compare software package versions Args: ver1 (str): A software version to compare oper (str): The operand to use to compare ver2 (str): A software version to compare Returns: bool: True if the comparison is valid, otherwise False CLI Example: .. code-block:: bash salt '*' pkg.compare_versions 1.2 >= 1.3
codesearchnet
def send_highspeed(self, data, progress_callback): if (not self.connected): raise HardwareError('Cannot send a script if we are not in a connected state') if (isinstance(data, str) and (not isinstance(data, bytes))): raise ArgumentError('You must send bytes or bytearray to _send_highspeed', type=type(data)) if (not isinstance(data, bytes)): data = bytes(data) try: self._on_progress = progress_callback self._loop.run_coroutine(self.adapter.send_script(0, data)) finally: self._on_progress = None
Send a script to a device at highspeed, reporting progress. This method takes a binary blob and downloads it to the device as fast as possible, calling the passed progress_callback periodically with updates on how far it has gotten. Args: data (bytes): The binary blob that should be sent to the device at highspeed. progress_callback (callable): A function that will be called periodically to report progress. The signature must be callback(done_count, total_count) where done_count and total_count will be passed as integers.
codesearchnet
def __init__(self, project_id, credentials, config=None): self._project_id = project_id self._credentials = credentials self._config = config if config is not None else Context._get_default_config()
Initializes an instance of a Context object. Args: project_id: the current cloud project. credentials: the credentials to use to authorize requests. config: key/value configurations for cloud operations
juraj-google-style
def preprocess_data(data: List[Tuple[List[str], List[str]]], to_lower: bool = True, append_case: str = "first") -> List[Tuple[List[Tuple[str]], List[str]]]: new_data = [] for words, tags in data: new_words = [process_word(word, to_lower=to_lower, append_case=append_case) for word in words] new_tags = tags new_data.append((new_words, new_tags)) return new_data
Processes all words in data using :func:`~deeppavlov.dataset_iterators.morphotagger_iterator.process_word`. Args: data: a list of pairs (words, tags), each pair corresponds to a single sentence to_lower: whether to lowercase append_case: whether to add case mark Returns: a list of preprocessed sentences
juraj-google-style
def create(self, data, *args, **kwargs): if self.create.__func__.__module__ != self.__module__: raise Exception("Child method not implemented") self._MambuStruct__method = "POST" self._MambuStruct__data = data self.connect(*args, **kwargs) self._MambuStruct__method = "GET" self._MambuStruct__data = None
Creates an entity in Mambu This method must be implemented in child classes Args: data (dictionary): dictionary with data to send, this dictionary is specific for each Mambu entity
juraj-google-style
def speechlib_mel(sample_rate, n_fft, n_mels, fmin=None, fmax=None): bank_width = int(n_fft if fmax is None: fmax = sample_rate / 2 if fmin is None: fmin = 0 assert fmin >= 0, 'fmin cannot be negative' assert fmin < fmax <= sample_rate / 2, 'fmax must be between (fmin, samplerate / 2]' def mel(f): return 1127.0 * np.log(1.0 + f / 700.0) def bin2mel(fft_bin): return 1127.0 * np.log(1.0 + fft_bin * sample_rate / (n_fft * 700.0)) def f2bin(f): return int(f * n_fft / sample_rate + 0.5) klo = f2bin(fmin) + 1 khi = f2bin(fmax) khi = max(khi, klo) mlo = mel(fmin) mhi = mel(fmax) m_centers = np.linspace(mlo, mhi, n_mels + 2) ms = (mhi - mlo) / (n_mels + 1) matrix = np.zeros((n_mels, bank_width), dtype=np.float32) for m in range(0, n_mels): left = m_centers[m] center = m_centers[m + 1] right = m_centers[m + 2] for fft_bin in range(klo, khi): mbin = bin2mel(fft_bin) if left < mbin < right: matrix[m, fft_bin] = 1.0 - abs(center - mbin) / ms return matrix
Create a Mel filter-bank the same as SpeechLib FbankFC. Args: sample_rate (int): Sample rate in Hz. number > 0 [scalar] n_fft (int): FFT size. int > 0 [scalar] n_mel (int): Mel filter size. int > 0 [scalar] fmin (float): lowest frequency (in Hz). If None use 0.0. float >= 0 [scalar] fmax: highest frequency (in Hz). If None use sample_rate / 2. float >= 0 [scalar] Returns out (numpy.ndarray): Mel transform matrix [shape=(n_mels, 1 + n_fft/2)]
github-repos
def execute(self, commands, encoding='json', **kwargs): if (encoding not in ('json', 'text')): raise TypeError('encoding must be one of [json, text]') try: self.error = None request = self.request(commands, encoding=encoding, **kwargs) response = self.send(request) return response except (ConnectionError, CommandError, TypeError) as exc: exc.commands = commands self.error = exc raise
Executes the list of commands on the destination node This method takes a list of commands and sends them to the destination node, returning the results. The execute method handles putting the destination node in enable mode and will pass the enable password, if required. Args: commands (list): A list of commands to execute on the remote node encoding (string): The encoding to send along with the request message to the destination node. Valid values include 'json' or 'text'. This argument will influence the response object encoding **kwargs: Arbitrary keyword arguments Returns: A decoded response message as a native Python dictionary object that has been deserialized from JSON. Raises: CommandError: A CommandError is raised that includes the error code, error message along with the list of commands that were sent to the node. The exception instance is also stored in the error property and is availble until the next request is sent
codesearchnet
def read_from_hdx(identifier, configuration=None): dataset = Dataset(configuration=configuration) result = dataset._dataset_load_from_hdx(identifier) if result: return dataset return None
Reads the dataset given by identifier from HDX and returns Dataset object Args: identifier (str): Identifier of dataset configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Dataset]: Dataset object if successful read, None if not
codesearchnet
def get_cytoband_coord(chrom, pos): chrom = chrom.strip('chr') pos = int(pos) result = None logger.debug('Finding Cytoband for chrom:{0} pos:{1}'.format(chrom, pos)) if (chrom in CYTOBANDS): for interval in CYTOBANDS[chrom][pos]: result = '{0}{1}'.format(chrom, interval.data) return result
Get the cytoband coordinate for a position Args: chrom(str): A chromosome pos(int): The position Returns: cytoband
codesearchnet
def minimum_image( self, r1, r2 ): delta_r = r2 - r1 delta_r = np.array( [ x - math.copysign( 1.0, x ) if abs(x) > 0.5 else x for x in delta_r ] ) return( delta_r )
Find the minimum image vector from point r1 to point r2. Args: r1 (np.array): fractional coordinates of point r1. r2 (np.array): fractional coordinates of point r2. Returns: (np.array): the fractional coordinate vector from r1 to the nearest image of r2.
juraj-google-style
def close(self, reason=None): with self._closing: if self._closed: return if self.is_active: _LOGGER.debug("Stopping consumer.") self._consumer.stop() self._consumer = None self._rpc.close() self._rpc = None self._closed = True _LOGGER.debug("Finished stopping manager.") if reason: _LOGGER.debug("reason for closing: %s" % reason) if isinstance(reason, Exception): raise reason raise RuntimeError(reason)
Stop consuming messages and shutdown all helper threads. This method is idempotent. Additional calls will have no effect. Args: reason (Any): The reason to close this. If None, this is considered an "intentional" shutdown.
juraj-google-style
def Write(self, output_writer): for (column_index, column_size) in enumerate(self._column_sizes): (column_size, _) = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB) column_size = ((column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB) self._column_sizes[column_index] = column_size if self._columns: self._WriteRow(output_writer, self._columns, in_bold=True) for values in self._rows: self._WriteRow(output_writer, values)
Writes the table to output writer. Args: output_writer (CLIOutputWriter): output writer.
codesearchnet
def _shared_name(self): return self.name[:self.name.index(':')]
The shared name of the variable. Unlike name(), shared_name doesn't have ":0" suffix. It is user-specified name with name scope prefix. Returns: variable name.
github-repos
def __eq__(self, other): if type(self) is not type(other) or \ self.name != other.name or \ self.num_qubits != other.num_qubits or \ self.num_clbits != other.num_clbits or \ self.definition != other.definition: return False for self_param, other_param in zip_longest(self.params, other.params): if self_param == other_param: continue try: if numpy.isclose(float(self_param), float(other_param), atol=_CUTOFF_PRECISION): continue except TypeError: pass return False return True
Two instructions are the same if they have the same name, same dimensions, and same params. Args: other (instruction): other instruction Returns: bool: are self and other equal.
juraj-google-style
def merge_svg_layers(svg_sources, share_transform=True): (width, height), layers = get_svg_layers(svg_sources) if share_transform: transforms = [layer_i.attrib['transform'] for layer_i in layers if 'transform' in layer_i.attrib] if len(transforms) > 1: raise ValueError('Transform can only be shared if *exactly one* ' 'layer has a transform ({} layers have ' '`transform` attributes)'.format(len(transforms))) elif transforms: for layer_i in layers: layer_i.attrib['transform'] = transforms[0] dwg = svgwrite.Drawing(profile='tiny', debug=False, size=(width, height)) output_svg_root = etree.fromstring(dwg.tostring()) output_svg_root.extend(layers) output = StringIO.StringIO() output.write(etree.tostring(output_svg_root)) output.seek(0) return output
Merge layers from input svg sources into a single XML document. Args: svg_sources (list) : A list of file-like objects, each containing one or more XML layers. share_transform (bool) : If exactly one layer has a transform, apply it to *all* other layers as well. Returns: StringIO.StringIO : File-like object containing merge XML document.
juraj-google-style
def get_text(revision, strip=True): start_pos = revision.find("<text") assert start_pos != -1 end_tag_pos = revision.find(">", start_pos) assert end_tag_pos != -1 end_tag_pos += len(">") end_pos = revision.find("</text>") if end_pos == -1: ret = "" else: ret = revision[end_tag_pos:end_pos] if strip: ret = strip_text(ret) ret = text_encoder.to_unicode_utf8(ret) return ret
Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string
juraj-google-style
def connection(self): ctx = stack.top if (ctx is None): raise Exception('Working outside of the Flask application context. If you wish to make a connection outside of a flask application context, please handle your connections and use manager.make_connection()') if hasattr(ctx, 'ldap3_manager_main_connection'): return ctx.ldap3_manager_main_connection else: connection = self._make_connection(bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'), contextualise=False) connection.bind() if (ctx is not None): ctx.ldap3_manager_main_connection = connection return connection
Convenience property for externally accessing an authenticated connection to the server. This connection is automatically handled by the appcontext, so you do not have to perform an unbind. Returns: ldap3.Connection: A bound ldap3.Connection Raises: ldap3.core.exceptions.LDAPException: Since this method is performing a bind on behalf of the caller. You should handle this case occuring, such as invalid service credentials.
codesearchnet
def _safe_issubclass(derived, parent): try: return issubclass(derived, parent) except (TypeError, AttributeError): if hasattr(derived, '__origin__'): try: return issubclass(derived.__origin__, parent) except TypeError: pass return False
Like issubclass, but swallows TypeErrors. This is useful for when either parameter might not actually be a class, e.g. typing.Union isn't actually a class. Args: derived: As in issubclass. parent: As in issubclass. Returns: issubclass(derived, parent), or False if a TypeError was raised.
github-repos
def clear(self, timestamp): self.storage.clear() self.push(streams.DATA_CLEARED, timestamp, 1)
Clear all data from the RSL. This pushes a single reading once we clear everything so that we keep track of the highest ID that we have allocated to date. This needs the current timestamp to be able to properly timestamp the cleared storage reading that it pushes. Args: timestamp (int): The current timestamp to store with the reading.
codesearchnet
def __init__(self, vars_map): super(core.PostProcessor, self).__init__() self.vars_map = {} for var_name, value in iteritems(vars_map): var_regex = re.compile( re.escape("%" + var_name + "%"), flags=re.IGNORECASE) self.vars_map[var_name.lower()] = (var_regex, value)
EnvVarsPostProcessor constructor. Args: vars_map: Dictionary of "string" -> "string|list", i.e. a mapping of environment variables names to their suggested values or to lists of their suggested values.
juraj-google-style
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc): for vevent in getattr(cal, 'vevent_list', []): start = getattr(vevent, 'dtstart', None) end = getattr(vevent, 'dtend', None) for node in (start, end): if node: dt = node.value if (isinstance(dt, datetime) and (not utc_only or dt.tzinfo == utc_tz)): if dt.tzinfo is None: dt = dt.replace(tzinfo = default) node.value = dt.astimezone(new_timezone)
Change the timezone of the specified component. Args: cal (Component): the component to change new_timezone (tzinfo): the timezone to change to default (tzinfo): a timezone to assume if the dtstart or dtend in cal doesn't have an existing timezone utc_only (bool): only convert dates that are in utc utc_tz (tzinfo): the tzinfo to compare to for UTC when processing utc_only=True
juraj-google-style
def core_name(self): buf_size = self.MAX_BUF_SIZE buf = (ctypes.c_char * buf_size)() self._dll.JLINKARM_Core2CoreName(self.core_cpu(), buf, buf_size) return ctypes.string_at(buf).decode()
Returns the name of the target ARM core. Args: self (JLink): the ``JLink`` instance Returns: The target core's name.
codesearchnet
def get_type_info(obj): if isinstance(obj, primitive_types): return ('primitive', type(obj).__name__) if isinstance(obj, sequence_types): return ('sequence', type(obj).__name__) if isinstance(obj, array_types): return ('array', type(obj).__name__) if isinstance(obj, key_value_types): return ('key-value', type(obj).__name__) if isinstance(obj, types.ModuleType): return ('module', type(obj).__name__) if isinstance(obj, (types.FunctionType, types.MethodType)): return ('function', type(obj).__name__) if isinstance(obj, type): if hasattr(obj, '__dict__'): return ('class', obj.__name__) if isinstance(type(obj), type): if hasattr(obj, '__dict__'): cls_name = type(obj).__name__ if (cls_name == 'classobj'): cls_name = obj.__name__ return ('class', '{}'.format(cls_name)) if (cls_name == 'instance'): cls_name = obj.__class__.__name__ return ('instance', '{} instance'.format(cls_name)) return ('unknown', type(obj).__name__)
Get type information for a Python object Args: obj: The Python object Returns: tuple: (object type "catagory", object type name)
codesearchnet
def get_node_ip_address(address='8.8.8.8:53'): (ip_address, port) = address.split(':') s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect((ip_address, int(port))) node_ip_address = s.getsockname()[0] except Exception as e: node_ip_address = '127.0.0.1' if (e.errno == 101): try: host_name = socket.getfqdn(socket.gethostname()) node_ip_address = socket.gethostbyname(host_name) except Exception: pass finally: s.close() return node_ip_address
Determine the IP address of the local node. Args: address (str): The IP address and port of any known live service on the network you care about. Returns: The IP address of the current node.
codesearchnet
def DeregisterDefinition(self, artifact_definition): artifact_definition_name = artifact_definition.name.lower() if (artifact_definition_name not in self._artifact_definitions): raise KeyError('Artifact definition not set for name: {0:s}.'.format(artifact_definition.name)) del self._artifact_definitions[artifact_definition_name]
Deregisters an artifact definition. Artifact definitions are identified based on their lower case name. Args: artifact_definition (ArtifactDefinition): an artifact definition. Raises: KeyError: if an artifact definition is not set for the corresponding name.
codesearchnet
def norm(self, valu): func = self._type_norms.get(type(valu)) if (func is None): raise s_exc.NoSuchFunc(name=self.name, mesg=('no norm for type: %r' % (type(valu),))) return func(valu)
Normalize the value for a given type. Args: valu (obj): The value to normalize. Returns: ((obj,dict)): The normalized valu, info tuple. Notes: The info dictionary uses the following key conventions: subs (dict): The normalized sub-fields as name: valu entries.
codesearchnet
def get_output_from_cache(name, filename): cache_filename = _get_cache_filename(name, filename) if (os.path.exists(cache_filename) and (os.path.getmtime(filename) < os.path.getmtime(cache_filename))): with io.open(cache_filename) as f: return f.read() return None
Returns the output from the cache if still valid. It checks that the cache file is defined and that its modification time is after the modification time of the original file. Args: name: string: name of the linter. filename: string: path of the filename for which we are retrieving the output. Returns: a string with the output, if it is still valid, or None otherwise.
codesearchnet
def BuildService(self, cls): def _WrapCallMethod(srvc, method_descriptor, rpc_controller, request, callback): return self._CallMethod(srvc, method_descriptor, rpc_controller, request, callback) self.cls = cls cls.CallMethod = _WrapCallMethod cls.GetDescriptor = staticmethod(lambda: self.descriptor) cls.GetDescriptor.__doc__ = "Returns the service descriptor." cls.GetRequestClass = self._GetRequestClass cls.GetResponseClass = self._GetResponseClass for method in self.descriptor.methods: setattr(cls, method.name, self._GenerateNonImplementedMethod(method))
Constructs the service class. Args: cls: The class that will be constructed.
juraj-google-style
def configure(self, options): self.client.api.configure_plugin(self.name, options) self.reload()
Update the plugin's settings. Args: options (dict): A key-value mapping of options. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def create_chapter_from_file(self, file_name, url=None, title=None): with codecs.open(file_name, 'r', encoding='utf-8') as f: content_string = f.read() return self.create_chapter_from_string(content_string, url, title)
Creates a Chapter object from an html or xhtml file. Sanitizes the file's content using the clean_function method, and saves it as the content of the created chapter. Args: file_name (string): The file_name containing the html or xhtml content of the created Chapter url (Option[string]): A url to infer the title of the chapter from title (Option[string]): The title of the created Chapter. By default, this is None, in which case the title will try to be inferred from the webpage at the url. Returns: Chapter: A chapter object whose content is the given file and whose title is that provided or inferred from the url
codesearchnet
def learn_one(self, x: beam.Row) -> None: if len(x.__dict__) != 1: raise ValueError('ZScore.learn_one expected univariate input, but got %s', str(x)) v = next(iter(x)) self._stdev_tracker.push(v) self._sub_stat_tracker.push(v)
Updates the mean and standard deviation trackers with a new data point. Args: x: A `beam.Row` containing a single numerical value.
github-repos
def collect_function_renames(): renames = set() all_v2_names = get_all_v2_names() def visit(unused_path, unused_parent, children): for child in children: _, attr = tf_decorator.unwrap(child[1]) api_names_v1 = [name for name in tf_export.get_v1_names(attr) if '.__internal__.' not in name] api_names_v2 = tf_export.get_v2_names(attr) if not api_names_v2: api_names_v2 = [name for name in api_names_v1 if name in all_v2_names] deprecated_api_names = set(api_names_v1) - set(api_names_v2) for name in deprecated_api_names: renames.add((name, get_canonical_name(api_names_v2, name))) visitor = public_api.PublicAPIVisitor(visit) visitor.do_not_descend_map['tf'].append('contrib') visitor.private_map['tf.compat'] = ['v1', 'v2'] traverse.traverse(tf.version, visitor) traverse.traverse(tf.compat.v1, visitor) traverse.traverse(tf.compat.v2, visitor) return renames
Looks for functions/classes that need to be renamed in TF 2.0. Returns: Set of tuples of the form (current name, new name).
github-repos
def forward(self, main_feature, condition_feature): probabilities_and_temperature = self.mlp(torch.concat((main_feature, condition_feature), dim=1)) probabilities, temperature = (probabilities_and_temperature[:, :2, ...], probabilities_and_temperature[:, 2:, ...]) probabilities = probabilities + self.p_eps probabilities = probabilities[:, 0, ...] / (probabilities[:, 0, ...] + probabilities[:, 1, ...]) temperature = temperature + self.p_eps temperature = temperature[:, 0, ...] / (temperature[:, 0, ...] + temperature[:, 1, ...]) temperature = temperature.unsqueeze(1) temperature = (self.max_temp - self.min_temp) * temperature + self.min_temp return self.log_binomial_transform(probabilities, temperature)
Args: main_feature (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Main feature. condition_feature (torch.Tensor of shape `(batch_size, num_channels, height, width)`): Condition feature. Returns: `torch.Tensor`: Output log binomial distribution
github-repos
def todo(self, **kwargs): path = ('%s/%s/todo' % (self.manager.path, self.get_id())) self.manager.gitlab.http_post(path, **kwargs)
Create a todo associated to the object. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTodoError: If the todo cannot be set
codesearchnet
def assert_processor_available(processor: str) -> None: if processor not in [Processors.XHTML2PDF, Processors.WEASYPRINT, Processors.PDFKIT]: raise AssertionError("rnc_pdf.set_pdf_processor: invalid PDF processor" " specified") if processor == Processors.WEASYPRINT and not weasyprint: raise RuntimeError("rnc_pdf: Weasyprint requested, but not available") if processor == Processors.XHTML2PDF and not xhtml2pdf: raise RuntimeError("rnc_pdf: xhtml2pdf requested, but not available") if processor == Processors.PDFKIT and not pdfkit: raise RuntimeError("rnc_pdf: pdfkit requested, but not available")
Assert that a specific PDF processor is available. Args: processor: a PDF processor type from :class:`Processors` Raises: AssertionError: if bad ``processor`` RuntimeError: if requested processor is unavailable
juraj-google-style
def __init__(self, data_type=DATA_TYPE): super(SyslogLineEventData, self).__init__(data_type=data_type) self.body = None self.hostname = None self.pid = None self.reporter = None self.severity = None
Initializes an event data attribute container. Args: data_type (Optional[str]): event data type indicator.
juraj-google-style
async def inspect(self, name: str) -> Mapping: response = await self.docker._query_json("images/{name}/json".format(name=name)) return response
Return low-level information about an image Args: name: name of the image
juraj-google-style
def cut_spectrum(sp, l0, lf): if (l0 >= lf): raise ValueError('l0 must be lower than lf') idx0 = np.argmin(np.abs((sp.x - l0))) idx1 = np.argmin(np.abs((sp.x - lf))) out = copy.deepcopy(sp) out.x = out.x[idx0:idx1] out.y = out.y[idx0:idx1] return out
Cuts spectrum given a wavelength interval, leaving origina intact Args: sp: Spectrum instance l0: initial wavelength lf: final wavelength Returns: Spectrum: cut spectrum
codesearchnet
def stop_artifact_creation(self, id_or_uri, task_uri): data = {'taskUri': task_uri} uri = (((self.URI + '/') + extract_id_from_uri(id_or_uri)) + self.STOP_CREATION_PATH) return self._client.update(data, uri=uri)
Stops creation of the selected Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. task_uri: Task URI associated with the Artifact Bundle. Returns: string:
codesearchnet
def is_user_in_experiment(config, experiment, attributes, logger): audience_conditions = experiment.getAudienceConditionsOrIds() logger.debug(audience_logs.EVALUATING_AUDIENCES_COMBINED.format(experiment.key, json.dumps(audience_conditions))) if ((audience_conditions is None) or (audience_conditions == [])): logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT_COMBINED.format(experiment.key, 'TRUE')) return True if (attributes is None): attributes = {} def evaluate_custom_attr(audienceId, index): audience = config.get_audience(audienceId) custom_attr_condition_evaluator = condition_helper.CustomAttributeConditionEvaluator(audience.conditionList, attributes, logger) return custom_attr_condition_evaluator.evaluate(index) def evaluate_audience(audienceId): audience = config.get_audience(audienceId) if (audience is None): return None logger.debug(audience_logs.EVALUATING_AUDIENCE.format(audienceId, audience.conditions)) result = condition_tree_evaluator.evaluate(audience.conditionStructure, (lambda index: evaluate_custom_attr(audienceId, index))) result_str = (str(result).upper() if (result is not None) else 'UNKNOWN') logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT.format(audienceId, result_str)) return result eval_result = condition_tree_evaluator.evaluate(audience_conditions, evaluate_audience) eval_result = (eval_result or False) logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT_COMBINED.format(experiment.key, str(eval_result).upper())) return eval_result
Determine for given experiment if user satisfies the audiences for the experiment. Args: config: project_config.ProjectConfig object representing the project. experiment: Object representing the experiment. attributes: Dict representing user attributes which will be used in determining if the audience conditions are met. If not provided, default to an empty dict. logger: Provides a logger to send log messages to. Returns: Boolean representing if user satisfies audience conditions for any of the audiences or not.
codesearchnet
def _compute_keys(self, n_minus_1_grams: torch.LongTensor, indices: torch.LongTensor) -> Tuple[torch.LongTensor, torch.LongTensor]: batch_size, _ = n_minus_1_grams.shape hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long) hash_result_with_just_context = self.accumulate_hash(hash_result, n_minus_1_grams) hash_result = torch.vmap(self.accumulate_hash, in_dims=(None, 1), out_dims=1)(hash_result_with_just_context, indices[:, :, None]) keys = self.keys[None, None, :, None] hash_result = torch.vmap(self.accumulate_hash, in_dims=(None, 2), out_dims=2)(hash_result, keys) return (hash_result, hash_result_with_just_context)
Computes random keys for each ngram and depth. Args: n_minus_1_grams (`torch.LongTensor`): Ngrams (batch_size, ngram_len - 1). indices (`torch.LongTensor`): indices of the continuations (batch_size, num_indices) Returns: Ngram keys (batch_size, num_indices, depth).
github-repos
def rename(self, new_folder_name): headers = self.headers endpoint = 'https: payload = '{ "DisplayName": "' + new_folder_name + '"}' r = requests.patch(endpoint, headers=headers, data=payload) if check_response(r): return_folder = r.json() return self._json_to_folder(self.account, return_folder)
Renames the Folder to the provided name. Args: new_folder_name: A string of the replacement name. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token. Returns: A new Folder representing the folder with the new name on Outlook.
juraj-google-style
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] if token_ids_1 is None: return prefix_ones + [0] * len(token_ids_0) + suffix_ones return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def call(func, args): assert hasattr(func, '__call__'), 'Cannot call func: {}'.format( func.__name__) raw_func = ( func if isinstance(func, FunctionType) else func.__class__.__call__) hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func)) argspec = _getargspec(raw_func) named_args = {} varargs = () for k, nk, v in _normalize(args): if nk == argspec.varargs: hints[nk] = Tuple[hints[nk], ...] elif nk not in argspec.args and argspec.varkw in hints: hints[nk] = hints[argspec.varkw] try: value = cast(hints[nk], v) except TypeError as e: _LOGGER.exception(e) six.raise_from(exc.InvalidCliValueError(k, v), e) if nk == argspec.varargs: varargs = value elif (nk in argspec.args or argspec.varkw) and ( nk not in named_args or named_args[nk] is None): named_args[nk] = value return func(*varargs, **named_args)
Call the function with args normalized and cast to the correct types. Args: func: The function to call. args: The arguments parsed by docopt. Returns: The return value of func.
juraj-google-style
def __init__(self, fill_method='zero', fill_missing=True, **kwargs): super().__init__() self.fill_missing = fill_missing self.filler = SimpleFill(fill_method)
Imputs NaN's using various filling methods like mean, zero, median, min, random Args: fill_method: How NaN's will be exchanged. Possible values: 'mean', 'zero', 'median', 'min', 'random' fill_missing: If True, transformer will fill NaN values by filling method
juraj-google-style
def set_error_filter(self, filt): self._filter = filt
Set the error filter. Args: filt: A function or callable object that accepts a single argument of type Error and returns True if that error should be included in the log. A filter of None will add all errors. NOTE: The filter may adjust some properties of the error.
github-repos
def add_user(self, group, username): try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: raise err from None operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]} self.client.modify(self.__distinguished_name(group), operation)
Add a user to the specified LDAP group. Args: group: Name of group to update username: Username of user to add Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info.
codesearchnet
def cap17(msg): allbds = ['05', '06', '07', '08', '09', '0A', '20', '21', '40', '41', '42', '43', '44', '45', '48', '50', '51', '52', '53', '54', '55', '56', '5F', '60', 'NA', 'NA', 'E1', 'E2'] d = hex2bin(data(msg)) idx = [i for i, v in enumerate(d[:28]) if v=='1'] capacity = ['BDS'+allbds[i] for i in idx if allbds[i] is not 'NA'] return capacity
Extract capacities from BDS 1,7 message Args: msg (String): 28 bytes hexadecimal message string Returns: list: list of suport BDS codes
juraj-google-style
def add(self, doc, attributes=None): doc_ref = str(doc[self._ref]) self._documents[doc_ref] = (attributes or {}) self.document_count += 1 for (field_name, field) in self._fields.items(): extractor = field.extractor field_value = (doc[field_name] if (extractor is None) else extractor(doc)) tokens = Tokenizer(field_value) terms = self.pipeline.run(tokens) field_ref = FieldRef(doc_ref, field_name) field_terms = defaultdict(int) self.field_term_frequencies[str(field_ref)] = field_terms self.field_lengths[str(field_ref)] = len(terms) for term in terms: term_key = str(term) field_terms[term_key] += 1 if (term_key not in self.inverted_index): posting = {_field_name: {} for _field_name in self._fields} posting['_index'] = self.term_index self.term_index += 1 self.inverted_index[term_key] = posting if (doc_ref not in self.inverted_index[term_key][field_name]): self.inverted_index[term_key][field_name][doc_ref] = defaultdict(list) for metadata_key in self.metadata_whitelist: metadata = term.metadata[metadata_key] self.inverted_index[term_key][field_name][doc_ref][metadata_key].append(metadata)
Adds a document to the index. Before adding documents to the index it should have been fully setup, with the document ref and all fields to index already having been specified. The document must have a field name as specified by the ref (by default this is 'id') and it should have all fields defined for indexing, though None values will not cause errors. Args: - doc (dict): The document to be added to the index. - attributes (dict, optional): A set of attributes corresponding to the document, currently a single `boost` -> int will be taken into account.
codesearchnet
def setEditable(self, editable): if not isinstance(editable, bool): raise TypeError('Argument is not of type bool') self._editable = editable
setter to _editable. apply changes while changing dtype. Raises: TypeError: if editable is not of type bool. Args: editable (bool): apply changes while changing dtype.
juraj-google-style
def get_numeric_features_to_observed_range(examples): observed_features = collections.defaultdict(list) for example in examples: for feature_name in get_numeric_feature_names(example): original_feature = parse_original_feature_from_example(example, feature_name) observed_features[feature_name].extend(original_feature.original_value) return {feature_name: {'observedMin': min(feature_values), 'observedMax': max(feature_values)} for (feature_name, feature_values) in iteritems(observed_features)}
Returns numerical features and their observed ranges. Args: examples: Examples to read to get ranges. Returns: A dict mapping feature_name -> {'observedMin': 'observedMax': } dicts, with a key for each numerical feature.
codesearchnet
def upload_backup_bundle_from_file(self, file_path, deployment_groups_id_or_uri): deployment_groups_uri = deployment_groups_id_or_uri if self.DEPLOYMENT_GROUPS_URI not in deployment_groups_id_or_uri: deployment_groups_uri = self.DEPLOYMENT_GROUPS_URI + deployment_groups_id_or_uri uri = self.BACKUP_ARCHIVE_PATH + "?deploymentGrpUri=" + deployment_groups_uri return self._client.upload(file_path, uri)
Restore an Artifact Bundle from a backup file. Args: file_path (str): The File Path to restore the Artifact Bundle. deployment_groups_id_or_uri: ID or URI of the Deployment Groups. Returns: dict: Deployment group.
juraj-google-style
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame: df = pd.DataFrame(index=table.index) for column in self.columns: df = column.update_dataframe(df, table=table, validate=validate) return df
Return a fully recoded dataframe. Args: table (pd.DataFrame): A dataframe on which to apply recoding logic. validate (bool): If ``True``, recoded table must pass validation tests.
codesearchnet
def js_adaptor(buffer): buffer = re.sub('true', 'True', buffer) buffer = re.sub('false', 'False', buffer) buffer = re.sub('none', 'None', buffer) buffer = re.sub('NaN', '"NaN"', buffer) return buffer
convert javascript objects like true, none, NaN etc. to quoted word. Arguments: buffer: string to be converted Returns: string after conversion
codesearchnet
def get_resize_output_image_size(image, resolution_max_side: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]: height, width = get_image_size(image, channel_dim=input_data_format) height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=resolution_max_side) height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE) return (height, width)
Get the output size of the image after resizing given a dictionary specifying the max and min sizes. Args: image (`np.ndarray`): Image to resize. resolution_max_side (`int`): The longest edge of the image will be resized to this value. The shortest edge will be resized to keep the input aspect ratio. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: The output size of the image after resizing.
github-repos
def update_work_as_completed(self, worker_id, work_id, other_values=None, error=None): client = self._datastore_client try: with client.transaction() as transaction: work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id, KIND_WORK, work_id) work_entity = client.get(work_key, transaction=transaction) if work_entity['claimed_worker_id'] != worker_id: return False work_entity['is_completed'] = True if other_values: work_entity.update(other_values) if error: work_entity['error'] = text_type(error) transaction.put(work_entity) except Exception: return False return True
Updates work piece in datastore as completed. Args: worker_id: ID of the worker which did the work work_id: ID of the work which was done other_values: dictionary with additonal values which should be saved with the work piece error: if not None then error occurred during computation of the work piece. In such case work will be marked as completed with error. Returns: whether work was successfully updated
juraj-google-style
def power(self, n): if n > 0: return super().power(n) return Chi(SuperOp(self).power(n))
The matrix power of the channel. Args: n (int): compute the matrix power of the superoperator matrix. Returns: Chi: the matrix power of the SuperOp converted to a Chi channel. Raises: QiskitError: if the input and output dimensions of the QuantumChannel are not equal, or the power is not an integer.
juraj-google-style
def install_bootstrapped_files(nb_path=None, server_config=True, DEBUG=False): install_path = None print('Starting hide_code.js install...') current_dir = path.abspath(path.dirname(__file__)) config_dirs = j_path.jupyter_config_path() notebook_module_path = Utils.get_notebook_module_dir() for dir in config_dirs: custom_dir = path.join(dir, 'custom') if path.isdir(custom_dir): install_path = custom_dir break if (install_path == None): print('No config directories contain "custom" folder. Trying Jupyter notebook module path...') install_path = path.join(notebook_module_path, 'static', 'custom') if (nb_path != None): install_path = nb_path print(('Using argument supplied path: ' + install_path)) if DEBUG: print(install_path) if path.isdir(install_path): shutil.copyfile(path.join(current_dir, 'hide_code.js'), path.join(install_path, 'hide_code.js')) print(('Copying hide_code.js to ' + install_path)) print('Attempting to configure custom.js to auto-load hide_code.js...') try: with open(path.join(current_dir, 'auto-load.txt')) as auto: auto_load_txt = auto.read() auto_loaded = False with open(path.join(install_path, 'custom.js'), 'r') as customJS: if (auto_load_txt in customJS.read()): auto_loaded = True print('Custom.js already configured to auto-load hide_code.js.') if (not auto_loaded): with open(path.join(install_path, 'custom.js'), 'a') as customJS: customJS.write(auto_load_txt) print('Configured custom.js to auto-load hide_code.js.') except: print('Custom.js not in custom directory.') else: print(('Unable to install into ' + install_path)) print("Directory doesn't exist.") print('Make sure Jupyter is installed.') if server_config: print('Attempting to configure auto-loading for hide_code export handlers.') try: server_cm = ConfigManager(config_dir=j_path.jupyter_config_dir()) cfg = server_cm.get('jupyter_notebook_config') server_extensions = cfg.setdefault('NotebookApp', {}).setdefault('server_extensions', []) extension = 'hide_code.hide_code' if (extension not in server_extensions): cfg['NotebookApp']['server_extensions'] += [extension] server_cm.update('jupyter_notebook_config', cfg) print('Configured jupyter to auto-load hide_code export handlers.') else: print('Jupyter already configured to auto-load export handlers.') except: print('Unable to install server extension.')
Installs javascript and exporting server extensions in Jupyter notebook. Args: nb_path (string): Path to notebook module. server_config (boolean): Install exporting server extensions. DEBUG (boolean): Verbose mode.
codesearchnet
def __init__(self, broker, queue_output, backend=None, max_tasks_in_memory=None, max_workers_in_memory=None): self._app = Celery(broker=broker, backend=backend) self._queue_output = queue_output from celery.backends.base import DisabledBackend self._use_result_backend = not isinstance(self._app.backend, DisabledBackend) logger.info('Creating %s: max_tasks=%d; max_workers=%d', EventListener.__name__, max_tasks_in_memory, max_workers_in_memory) logger.info('Celery broker=%s; backend=%s; using_result_backend=%s', broker, backend, self._use_result_backend) self.memory = State( max_tasks_in_memory=max_tasks_in_memory, max_workers_in_memory=max_workers_in_memory, ) self._listener_thread = None self._celery_receiver = None self._wait_event = threading.Event() def sigterm_handler(_signo, _stack_frame): self.__stop() signal.signal(signal.SIGTERM, sigterm_handler) self.__start()
Constructs an event listener instance. Args: broker (str): the broker being used by the celery system. queue_output (Queue): to send to streaming dispatcher. backend (str): the result backend being used by the celery system. max_tasks_in_memory (int): max tasks stored max_workers_in_memory (int): max workers stored
juraj-google-style
def add_gene_panel(self, panel_obj): panel_name = panel_obj['panel_name'] panel_version = panel_obj['version'] display_name = panel_obj.get('display_name', panel_name) if self.gene_panel(panel_name, panel_version): raise IntegrityError('Panel {0} with version {1} already exist in database'.format(panel_name, panel_version)) LOG.info('loading panel {0}, version {1} to database'.format(display_name, panel_version)) result = self.panel_collection.insert_one(panel_obj) LOG.debug('Panel saved') return result.inserted_id
Add a gene panel to the database Args: panel_obj(dict)
codesearchnet
def static_nvals(self): if self._nvals is not None: nvals = tensor_util.constant_value(self._nvals) if nvals is not None: return nvals if self._value_rowids is not None: nvals = tensor_shape.dimension_at_index(self._value_rowids.shape, 0) if nvals.value is not None: return nvals.value return None
The number of values in this partition, if statically known. ```python self.value_rowids().shape == [self.static_vals] ``` Returns: The number of values in this partition as an `int` (if statically known); or `None` (otherwise).
github-repos
def setEditorData(self, editor, index): editor.blockSignals(True) data = index.data() dataIndex = editor.findData(data) editor.setCurrentIndex(dataIndex) editor.blockSignals(False)
Sets the current data for the editor. The data displayed has the same value as `index.data(Qt.EditRole)` (the translated name of the datatype). Therefor a lookup for all items of the combobox is made and the matching item is set as the currently displayed item. Signals emitted by the editor are blocked during exection of this method. Args: editor (QtGui.QComboBox): The current editor for the item. Should be a `QtGui.QComboBox` as defined in `createEditor`. index (QtCore.QModelIndex): The index of the current item.
codesearchnet
def set_all_file_column_labels(self, xlabel=None, ylabel=None): if xlabel is not None: self.general.x_column_label = xlabel if ylabel is not None: self.general.y_column_label = ylabel if xlabel is None and ylabel is None: warnings.warn("is not specifying x or y lables even" + "though column labels function is called.", UserWarning) return
Indicate general x,y column labels. This sets the general x and y column labels into data files for all plots. It can be overridden for specific plots. Args: xlabel/ylabel (str, optional): String indicating column label for x,y values into the data files. Default is None. Raises: UserWarning: If xlabel and ylabel are both not specified, The user will be alerted, but the code will not stop.
juraj-google-style
def filter_object(obj, marks, presumption=DELETE): if isinstance(obj, list): keys = reversed(range(0, len(obj))) else: keys = obj.keys() for k in keys: v = obj[k] m = marks.get(id(v), UNSPECIFIED) if m == DELETE: del obj[k] elif m == KEEP or presumption==KEEP: if isinstance(v, list) or isinstance(v, dict): filter_object(v, marks, presumption=KEEP) elif m == UNSPECIFIED: if isinstance(v, list) or isinstance(v, dict): filter_object(v, marks, presumption=DELETE) if len(v) == 0: del obj[k] else: del obj[k]
Filter down obj based on marks, presuming keys should be kept/deleted. Args: obj: The object to be filtered. Filtering is done in-place. marks: An object mapping id(obj) --> {DELETE,KEEP} These values apply to the entire subtree, unless inverted. presumption: The default action to take on all keys.
juraj-google-style
def is_complex_format_str(node): inferred = utils.safe_infer(node) if ((inferred is None) or (not isinstance(inferred.value, str))): return True try: parsed = list(string.Formatter().parse(inferred.value)) except ValueError: return False for (_, _, format_spec, _) in parsed: if format_spec: return True return False
Checks if node represents a string with complex formatting specs. Args: node (astroid.node_classes.NodeNG): AST node to check Returns: bool: True if inferred string uses complex formatting, False otherwise
codesearchnet
def convert_matmul(params, w_name, scope_name, inputs, layers, weights, names): print('Converting matmul ...') if (names == 'short'): tf_name = ('MMUL' + random_string(4)) elif (names == 'keep'): tf_name = w_name else: tf_name = (w_name + str(random.random())) if (len(inputs) == 1): weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() (input_channels, output_channels) = W.shape keras_weights = [W] dense = keras.layers.Dense(output_channels, weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros') layers[scope_name] = dense(layers[inputs[0]]) elif (len(inputs) == 2): weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() (input_channels, output_channels) = W.shape keras_weights = [W] dense = keras.layers.Dense(output_channels, weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros') layers[scope_name] = dense(layers[inputs[0]]) else: raise AssertionError('Cannot convert matmul layer')
Convert matmul layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def __init__(self, name, default=None, help=None, type=str): self._name = name self._default = default self._help = help self._type = type
Initialise the workflow option. Args: name (str): The name of the option under which the value will be stored. default: The default value that should be used when no value is specified. Set to None to make this a non-optional option. help (str): A short help string for this option. type: The type of the option. Supported types are: str, int, float, bool
juraj-google-style
def median(self, **kwargs): if self._is_transposed: kwargs['axis'] = (kwargs.get('axis', 0) ^ 1) return self.transpose().median(**kwargs) axis = kwargs.get('axis', 0) func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs) return self._full_axis_reduce(axis, func)
Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row.
codesearchnet
def _execute_command(self, key, *args): client = self.redis_clients[(key.redis_shard_hash() % len(self.redis_clients))] return client.execute_command(*args)
Execute a Redis command on the appropriate Redis shard based on key. Args: key: The object ID or the task ID that the query is about. args: The command to run. Returns: The value returned by the Redis command.
codesearchnet
def _prepare_images_structure(self, images: ImageInput) -> ImageInput: return make_flat_list_of_images(images)
Prepare the images structure for processing. Args: images (`ImageInput`): The input images to process. Returns: `ImageInput`: The images with a valid nesting.
github-repos
def send(url, data): validate(data) return requests.post(url, json=data)
Sends an incoming message Args: url(str): the incoming hook url data(dict): the sending data Returns: requests.Response
codesearchnet
def _format_value(cls, value, type_): res = value if type_ == 'CLASS': res = '{}.{}'.format(value.__module__, value.__name__) elif type_ == 'DURATION': res = value.total_seconds() * 1000 elif type_ == 'TIMESTAMP': res = calendar.timegm(value.timetuple()) * 1000 + value.microsecond return res
Returns the API representation of a value given its type. Args: value: The value of the item that needs to be shortened. type_(string): The type of the value. Returns: A formatted value in the form of a float, int, or string.
github-repos
def read_config_info(ini_file): try: config = RawConfigParser() config.optionxform = lambda option: option config.read(ini_file) the_stuff = {} for section in config.sections(): the_stuff[section] = {} for option in config.options(section): the_stuff[section][option] = config.get(section, option) return the_stuff except Exception as wtf: logging.error('Exception caught in read_config_info(): {}'.format(wtf)) traceback.print_exc(file=sys.stdout) return sys.exit(1)
Read the INI file Args: ini_file - path to the file Returns: A dictionary of stuff from the INI file Exits: 1 - if problems are encountered
juraj-google-style
def to_css(self): if (self.a == 1.0): return ('hsl(%d, %s%%, %s%%)' % (self.h, (self.s * 100), (self.l * 100))) else: return ('hsla(%d, %s%%, %s%%, %s)' % (self.h, (self.s * 100), (self.l * 100), self.a))
Generate the CSS representation of this HSL color. Returns: str, ``"hsl(...)"`` or ``"hsla(...)"``
codesearchnet
def _get_file_iterator(self, file_obj): file_obj.seek(0) return iter(lambda: file_obj.read(self.read_bs), '')
For given `file_obj` return iterator, which will read the file in `self.read_bs` chunks. Args: file_obj (file): File-like object. Return: iterator: Iterator reading the file-like object in chunks.
juraj-google-style
def __init__(self, _args): super(TcExLib, self).__init__(_args) self.latest_version = None self.lib_directory = 'lib_{}.{}.{}'.format( sys.version_info.major, sys.version_info.minor, sys.version_info.micro ) self.requirements_file = 'requirements.txt' self.static_lib_dir = 'lib_latest' self.use_temp_requirements_file = False
Initialize Class properties. Args: _args (namespace): The argparser args Namespace.
juraj-google-style
def Open(self, hostname, port): server_url = 'http: try: self._xmlrpc_proxy = xmlrpclib.ServerProxy( server_url, allow_none=True) except SocketServer.socket.error as exception: logger.warning(( 'Unable to connect to RPC server on {0:s}:{1:d} with error: ' '{2!s}').format(hostname, port, exception)) return False return True
Opens a RPC communication channel to the server. Args: hostname (str): hostname or IP address to connect to for requests. port (int): port to connect to for requests. Returns: bool: True if the communication channel was established.
juraj-google-style
def infer(query, replacements=None, root_type=None, libs=('stdcore', 'stdmath')): if root_type: type_scope = scope.ScopeStack(std_core.MODULE, root_type) else: type_scope = scope.ScopeStack(std_core.MODULE) stdcore_included = False for lib in libs: if (lib == 'stdcore'): stdcore_included = True continue module = std_core.LibraryModule.ALL_MODULES.get(lib) if (not module): raise TypeError(('No standard library module %r.' % lib)) type_scope = scope.ScopeStack(module, type_scope) if (not stdcore_included): raise TypeError("'stdcore' must always be included.") query = q.Query(query, params=replacements) return infer_type.infer_type(query, type_scope)
Determine the type of the query's output without actually running it. Arguments: query: A query object or string with the query. replacements: Built-time parameters to the query, either as dict or as an array (for positional interpolation). root_type: The types of variables to be supplied to the query inference. libs: What standard libraries should be taken into account for the inference. Returns: The type of the query's output, if it can be determined. If undecidable, returns efilter.protocol.AnyType. NOTE: The inference returns the type of a row in the results, not of the actual Python object returned by 'apply'. For example, if a query returns multiple rows, each one of which is an integer, the type of the output is considered to be int, not a collection of rows. Examples: infer("5 + 5") # -> INumber infer("SELECT * FROM people WHERE age > 10") # -> AnyType # If root_type implements the IStructured reflection API: infer("SELECT * FROM people WHERE age > 10", root_type=...) # -> dict
codesearchnet
def step_interpolation(x, xp, fp, **kwargs): del kwargs xp = np.expand_dims(xp, (- 1)) (lower, upper) = (xp[:(- 1)], xp[1:]) conditions = ((x >= lower) & (x < upper)) conditions = np.concatenate([[(x < xp[0])], conditions, [(x >= xp[(- 1)])]]) values = np.concatenate([[fp[0]], fp]) assert np.all((np.sum(conditions, 0) == 1)), 'xp must be increasing.' indices = np.argmax(conditions, 0) return values[indices].astype(np.float32)
Multi-dimensional step interpolation. Returns the multi-dimensional step interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Unused. Returns: An array of shape [*N, *M], the interpolated values.
codesearchnet
def AddStop(self, lat, lng, name, stop_id=None): if stop_id is None: stop_id = util.FindUniqueId(self.stops) stop = self._gtfs_factory.Stop(stop_id=stop_id, lat=lat, lng=lng, name=name) self.AddStopObject(stop) return stop
Add a stop to this schedule. Args: lat: Latitude of the stop as a float or string lng: Longitude of the stop as a float or string name: Name of the stop, which will appear in the feed stop_id: stop_id of the stop or None, in which case a unique id is picked Returns: A new Stop object
juraj-google-style
def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): if not masks.shape[0] == scores.shape[0] == labels.shape[0]: raise ValueError('mask, scores and labels must have the same shape!') to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return (masks[to_keep], scores[to_keep], labels[to_keep])
Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`.
github-repos
def make_color_wheel(bins=None): if bins is None: bins = [15, 6, 4, 11, 13, 6] assert len(bins) == 6 RY, YG, GC, CB, BM, MR = tuple(bins) ry = [1, np.arange(RY) / RY, 0] yg = [1 - np.arange(YG) / YG, 1, 0] gc = [0, 1, np.arange(GC) / GC] cb = [0, 1 - np.arange(CB) / CB, 1] bm = [np.arange(BM) / BM, 0, 1] mr = [1, 0, 1 - np.arange(MR) / MR] num_bins = RY + YG + GC + CB + BM + MR color_wheel = np.zeros((3, num_bins), dtype=np.float32) col = 0 for i, color in enumerate([ry, yg, gc, cb, bm, mr]): for j in range(3): color_wheel[j, col:col + bins[i]] = color[j] col += bins[i] return color_wheel.T
Build a color wheel. Args: bins(list or tuple, optional): Specify the number of bins for each color range, corresponding to six ranges: red -> yellow, yellow -> green, green -> cyan, cyan -> blue, blue -> magenta, magenta -> red. [15, 6, 4, 11, 13, 6] is used for default (see Middlebury). Returns: ndarray: Color wheel of shape (total_bins, 3).
juraj-google-style
def install(device: AndroidDevice, apk_path: str, timeout: int=DEFAULT_TIMEOUT_INSTALL_APK_SEC, user_id: Optional[int]=None, params: Optional[Iterable[str]]=None) -> None: android_api_version = int(device.build_info['build_version_sdk']) if user_id is not None and android_api_version < 24: raise ValueError('Cannot specify `user_id` for device below SDK 24.') args = ['-r', '-t'] if android_api_version >= 24: if user_id is None: user_id = device.adb.current_user_id args = ['--user', str(user_id)] + args if android_api_version >= 23: args.append('-g') if android_api_version >= 17: args.append('-d') args += params or [] args.append(apk_path) try: _execute_adb_install(device, args, timeout) return except adb.AdbError as e: if not _should_retry_apk_install(str(e)): raise device.log.debug('Retrying installation of %s', apk_path) device.reboot() _execute_adb_install(device, args, timeout)
Install an apk on an Android device. Installing apk is more complicated than most people realize on Android. This is just a util for the most common use cases. If you need special logic beyond this, we recomend you write your own instead of modifying this. Args: device: AndroidDevice, Mobly's Android controller object. apk_path: string, file path of an apk file. timeout: int, the number of seconds to wait before timing out. user_id: int, the ID of the user to install the apk for. For SDK>=24, install for the current user by default. Android's multi-user support did not realistically work until SDK 24. params: string list, additional parameters included in the adb install cmd. Raises: AdbError: Installation failed. ValueError: Attempts to set user_id on SDK<24.
github-repos
def __init__(self, *nodes, depth=0): self.edges = set() vertices = [] matrix = Matrix(*nodes, depth=depth) for key in matrix.keys: vertices.append(Vertex(key)) for l, line in enumerate(matrix.data): for c, cell in enumerate(line): if cell > 0: self.edges.add(Edge(vertices[l], vertices[c], weight=cell)) self.vertices = set(vertices)
Initialization method. An intermediary matrix is built to ease the creation of the graph. Args: *nodes (list of DSM/Package/Module): the nodes on which to build the graph. depth (int): the depth of the intermediary matrix. See the documentation for Matrix class.
juraj-google-style
def clean_file(c_source, virtualenv_dirname): with open(c_source, "r") as file_obj: contents = file_obj.read().rstrip() py_version = "python{}.{}".format(*sys.version_info[:2]) lib_path = os.path.join( ".nox", virtualenv_dirname, "lib", py_version, "site-packages", "" ) contents = contents.replace(lib_path, "") lines = contents.split("\n") with open(c_source, "w") as file_obj: for line in lines: file_obj.write(line.rstrip() + "\n")
Strip trailing whitespace and clean up "local" names in C source. These source files are autogenerated from the ``cython`` CLI. Args: c_source (str): Path to a ``.c`` source file. virtualenv_dirname (str): The name of the ``virtualenv`` directory where Cython is installed (this is part of a relative path ``.nox/{NAME}/lib/...``).
juraj-google-style
def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True): traj = {} trajsize = len(rollout[SampleBatch.ACTIONS]) for key in rollout: traj[key] = np.stack(rollout[key]) if use_gae: assert SampleBatch.VF_PREDS in rollout, "Values not found!" vpred_t = np.concatenate( [rollout[SampleBatch.VF_PREDS], np.array([last_r])]) delta_t = ( traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1]) traj[Postprocessing.ADVANTAGES] = discount(delta_t, gamma * lambda_) traj[Postprocessing.VALUE_TARGETS] = ( traj[Postprocessing.ADVANTAGES] + traj[SampleBatch.VF_PREDS]).copy().astype(np.float32) else: rewards_plus_v = np.concatenate( [rollout[SampleBatch.REWARDS], np.array([last_r])]) traj[Postprocessing.ADVANTAGES] = discount(rewards_plus_v, gamma)[:-1] traj[Postprocessing.VALUE_TARGETS] = np.zeros_like( traj[Postprocessing.ADVANTAGES]) traj[Postprocessing.ADVANTAGES] = traj[ Postprocessing.ADVANTAGES].copy().astype(np.float32) assert all(val.shape[0] == trajsize for val in traj.values()), \ "Rollout stacked incorrectly!" return SampleBatch(traj)
Given a rollout, compute its value targets and the advantage. Args: rollout (SampleBatch): SampleBatch of a single trajectory last_r (float): Value estimation for last observation gamma (float): Discount factor. lambda_ (float): Parameter for GAE use_gae (bool): Using Generalized Advantage Estamation Returns: SampleBatch (SampleBatch): Object with experience from rollout and processed rewards.
juraj-google-style
def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint': if (data.expires_after < time.time()): del self.data[url] data = None return data
Checks the expiration time for data for a url. If the data has expired, it is deleted from the cache. Args: url: url to check data: page of data for that url Returns: value of either the passed data or None if it expired
codesearchnet