code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def node_device(self, node_name): if not self._debug_graphs: raise LookupError('Node devices are not loaded from partition graphs yet.') if node_name not in self._node_devices: raise ValueError("Node '%s' does not exist in partition graphs." % node_name) output = list(self._node_devices[node_name]) return output[0] if len(output) == 1 else output
Get the names of the devices that has nodes of the specified name. Args: node_name: (`str`) name of the node. Returns: (`str` or `list` of `str`) name of the device(s) on which the node of the given name is found. Returns a `str` if there is only one such device, otherwise return a `list` of `str`. Raises: LookupError: If node inputs and control inputs have not been loaded from partition graphs yet. ValueError: If the node does not exist in partition graphs.
github-repos
def dv(self, orb): orb = orb.copy(form="cartesian") if self.frame == "QSW": mat = to_qsw(orb).T elif self.frame == "TNW": mat = to_tnw(orb).T else: mat = np.identity(3) return mat @ self._dv
Computation of the velocity increment in the reference frame of the orbit Args: orb (Orbit): Return: numpy.array: Velocity increment, length 3
juraj-google-style
def __init__(self, weight_shape: Sequence[int]) -> None: self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape) if bias_fn is not None: self.bias = np.random.uniform(low=-1.0, high=1.0, size=weight_shape[-1])
Initializes a MatmulModel. Args: weight_shape: Shape of the weight tensor.
github-repos
def GetAutomountMasterMap(self): master_map = self.GetAutomountMap(location='auto.master') for map_entry in master_map: map_entry.location = os.path.split(map_entry.location)[1] self.log.debug('master map has: %s' % map_entry.location) return master_map
Return the autmount master map from this source. Returns: an instance of automount.AutomountMap
github-repos
def Parse(self, text): self.parser.parse(text)
Parse |text| and store the parsed information in self.global_env. Args: text: The text to parse.
github-repos
def __init__(self, image_processor: AutoImageProcessor, id2label: Mapping[int, str], threshold: float=0.0): self.image_processor = image_processor self.id2label = id2label self.threshold = threshold self.metric = self.get_metric()
Initialize evaluator with image processor, id2label mapping and threshold for filtering predictions. Args: image_processor (AutoImageProcessor): Image processor for `post_process_instance_segmentation` method. id2label (Mapping[int, str]): Mapping from class id to class name. threshold (float): Threshold to filter predicted boxes by confidence. Defaults to 0.0.
github-repos
def ParseNolintSuppressions(filename, raw_line, linenum, error): matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) if matched: if matched.group(1): suppressed_line = linenum + 1 else: suppressed_line = linenum category = matched.group(2) if category in (None, '(*)'): _error_suppressions.setdefault(None, set()).add(suppressed_line) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(suppressed_line) elif category not in _LEGACY_ERROR_CATEGORIES: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category)
Updates the global list of line error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler.
juraj-google-style
def set_structure(self, structure, reset_camera=True, to_unit_cell=True): self.ren.RemoveAllViewProps() has_lattice = hasattr(structure, "lattice") if has_lattice: s = Structure.from_sites(structure, to_unit_cell=to_unit_cell) s.make_supercell(self.supercell, to_unit_cell=to_unit_cell) else: s = structure inc_coords = [] for site in s: self.add_site(site) inc_coords.append(site.coords) count = 0 labels = ["a", "b", "c"] colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] if has_lattice: matrix = s.lattice.matrix if self.show_unit_cell and has_lattice: self.add_text([0, 0, 0], "o") for vec in matrix: self.add_line((0, 0, 0), vec, colors[count]) self.add_text(vec, labels[count], colors[count]) count += 1 for (vec1, vec2) in itertools.permutations(matrix, 2): self.add_line(vec1, vec1 + vec2) for (vec1, vec2, vec3) in itertools.permutations(matrix, 3): self.add_line(vec1 + vec2, vec1 + vec2 + vec3) if self.show_bonds or self.show_polyhedron: elements = sorted(s.composition.elements, key=lambda a: a.X) anion = elements[-1] def contains_anion(site): for sp in site.species.keys(): if sp.symbol == anion.symbol: return True return False anion_radius = anion.average_ionic_radius for site in s: exclude = False max_radius = 0 color = np.array([0, 0, 0]) for sp, occu in site.species.items(): if sp.symbol in self.excluded_bonding_elements \ or sp == anion: exclude = True break max_radius = max(max_radius, sp.average_ionic_radius) color = color + \ occu * np.array(self.el_color_mapping.get(sp.symbol, [0, 0, 0])) if not exclude: max_radius = (1 + self.poly_radii_tol_factor) * \ (max_radius + anion_radius) nn = structure.get_neighbors(site, float(max_radius)) nn_sites = [] for nnsite, dist in nn: if contains_anion(nnsite): nn_sites.append(nnsite) if not in_coord_list(inc_coords, nnsite.coords): self.add_site(nnsite) if self.show_bonds: self.add_bonds(nn_sites, site) if self.show_polyhedron: color = [i / 255 for i in color] self.add_polyhedron(nn_sites, site, color) if self.show_help: self.helptxt_actor = vtk.vtkActor2D() self.helptxt_actor.VisibilityOn() self.helptxt_actor.SetMapper(self.helptxt_mapper) self.ren.AddActor(self.helptxt_actor) self.display_help() camera = self.ren.GetActiveCamera() if reset_camera: if has_lattice: lengths = s.lattice.abc pos = (matrix[1] + matrix[2]) * 0.5 + \ matrix[0] * max(lengths) / lengths[0] * 3.5 camera.SetPosition(pos) camera.SetViewUp(matrix[2]) camera.SetFocalPoint((matrix[0] + matrix[1] + matrix[2]) * 0.5) else: origin = s.center_of_mass max_site = max( s, key=lambda site: site.distance_from_point(origin)) camera.SetPosition(origin + 5 * (max_site.coords - origin)) camera.SetFocalPoint(s.center_of_mass) self.structure = structure self.title = s.composition.formula
Add a structure to the visualizer. Args: structure: structure to visualize reset_camera: Set to True to reset the camera to a default determined based on the structure. to_unit_cell: Whether or not to fall back sites into the unit cell.
juraj-google-style
def _respond(self, channel, text): result = self._format_message(channel, text) if result is not None: logger.info( 'Sending message: %r', truncate(result, max_len=50), ) self.socket.send_str(result)
Respond to a message on the current socket. Args: channel (:py:class:`str`): The channel to send to. text (:py:class:`str`): The message text to send.
juraj-google-style
def start_with(self, request): HTTPRequestHelper.patch_with_options(request, self.__options) self.queue.add_request(request) self.__crawler_start()
Start the crawler using the given request. Args: request (:class:`nyawc.http.Request`): The startpoint for the crawler.
juraj-google-style
def credits(self, **kwargs): path = self._get_id_path('credits') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the cast and crew information for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def create_blob(profile, content): resource = '/blobs' payload = {'content': content} data = api.post_request(profile, resource, payload) return data
Create a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. content The (UTF-8 encoded) content to create in the blob. Returns: A dict with data about the newly created blob.
codesearchnet
def _extract_hunt_results(self, output_file_path): collection_paths = [] client_ids = set() client_id_to_fqdn = {} hunt_dir = None try: with zipfile.ZipFile(output_file_path) as archive: items = archive.infolist() for f in items: if not hunt_dir: hunt_dir = f.filename.split('/')[0] if f.filename.split('/')[-1] == 'client_info.yaml': client_id, fqdn = self._get_client_fqdn(archive.read(f)) client_id_to_fqdn[client_id] = fqdn continue client_id = f.filename.split('/')[1] if client_id.startswith('C.'): if client_id not in client_ids: client_directory = os.path.join(self.output_path, hunt_dir, client_id) collection_paths.append((client_id, client_directory)) client_ids.add(client_id) try: archive.extract(f, self.output_path) except KeyError as exception: print('Extraction error: {0:s}'.format(exception)) return [] except OSError as exception: msg = 'Error manipulating file {0:s}: {1!s}'.format( output_file_path, exception) self.state.add_error(msg, critical=True) return [] except zipfile.BadZipfile as exception: msg = 'Bad zipfile {0:s}: {1!s}'.format( output_file_path, exception) self.state.add_error(msg, critical=True) return [] try: os.remove(output_file_path) except OSError as exception: print('Output path {0:s} could not be removed: {1:s}'.format( output_file_path, exception)) fqdn_collection_paths = [] for client_id, path in collection_paths: fqdn = client_id_to_fqdn.get(client_id, client_id) fqdn_collection_paths.append((fqdn, path)) if not fqdn_collection_paths: self.state.add_error('Nothing was extracted from the hunt archive', critical=True) return [] return fqdn_collection_paths
Open a hunt output archive and extract files. Args: output_file_path: The path where the hunt archive is downloaded to. Returns: list: tuples containing: str: The name of the client from where the files were downloaded. str: The directory where the files were downloaded to.
juraj-google-style
def get_mimetype(url): filename = url.split('?')[0] filename = filename.split(' (content_type, _) = mimetypes.guess_type(filename) return (url, content_type)
Guess based on the file extension. Args: url (text): Web url that was linked to by a reddit submission. Returns: modified_url (text): The url (or filename) that will be used when constructing the command to run. content_type (text): The mime-type that will be used when constructing the command to run. If the mime-type is unknown, return None and the program will fallback to using the web browser.
codesearchnet
def list_projects(self, dataset_name): url = ((self.url() + '/nd/resource/dataset/{}'.format(dataset_name)) + '/project/') req = self.remote_utils.get_url(url) if (req.status_code is not 200): raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
Lists a set of projects related to a dataset. Arguments: dataset_name (str): Dataset name to search projects for Returns: dict: Projects found based on dataset query
codesearchnet
def forward_tcp(self, host, port): return self.transport.open_channel( 'direct-tcpip', (host, port), self.transport.getpeername() )
Open a connection to host:port via an ssh tunnel. Args: host (str): The host to connect to. port (int): The port to connect to. Returns: A socket-like object that is connected to the provided host:port.
juraj-google-style
def group_by_reducer(key_func, reducer): def _apply_fn(dataset): return _GroupByReducerDataset(dataset, key_func, reducer) return _apply_fn
A transformation that groups elements and performs a reduction. This transformation maps element of a dataset to a key using `key_func` and groups the elements by key. The `reducer` is used to process each group; its `init_func` is used to initialize state for each group when it is created, the `reduce_func` is used to update the state every time an element is mapped to the matching group, and the `finalize_func` is used to map the final state to an output value. Args: key_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a scalar `tf.int64` tensor. reducer: An instance of `Reducer`, which captures the reduction logic using the `init_func`, `reduce_func`, and `finalize_func` functions. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
github-repos
def update_script(self, information, timeout=(- 1)): uri = '{}/script'.format(self.data['uri']) return self._helper.update(information, uri=uri, timeout=timeout)
Updates the configuration script of the logical enclosure and on all enclosures in the logical enclosure with the specified ID. Args: information: Updated script. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Return: Configuration script.
codesearchnet
def check_missing(self, args): return [opt.name for opt in self if ((opt.name not in args) and (opt.default is None))]
Returns the names of all options that are required but were not specified. All options that don't have a default value are required in order to run the workflow. Args: args (dict): A dictionary of the provided arguments that is checked for missing options. Returns: list: A list with the names of the options that are missing from the provided arguments.
codesearchnet
def find_or_build(cls, **kwargs): keys = kwargs.pop('keys') if 'keys' in kwargs else [] return cls.first(**subdict(kwargs, keys)) or cls.build(**kwargs)
Checks if an instance already exists in db with these kwargs else returns a new, saved instance of the service's model class. Args: **kwargs: instance parameters
juraj-google-style
def FindFieldByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) message_name, _, field_name = full_name.rpartition('.') message_descriptor = self.FindMessageTypeByName(message_name) return message_descriptor.fields_by_name[field_name]
Loads the named field descriptor from the pool. Args: full_name: The full name of the field descriptor to load. Returns: The field descriptor for the named field. Raises: KeyError: if the field cannot be found in the pool.
juraj-google-style
def mirror_sources(self, sourcedir, targetdir=None, recursive=True, excludes=[]): sources = self.compilable_sources(sourcedir, absolute=False, recursive=recursive, excludes=excludes) maplist = [] for filepath in sources: src = filepath dst = self.get_destination(src, targetdir=targetdir) if targetdir: src = os.path.join(sourcedir, src) maplist.append((src, dst)) return maplist
Mirroring compilable sources filepaths to their targets. Args: sourcedir (str): Directory path to scan. Keyword Arguments: absolute (bool): Returned paths will be absolute using ``sourcedir`` argument (if True), else return relative paths. recursive (bool): Switch to enabled recursive finding (if True). Default to True. excludes (list): A list of excluding patterns (glob patterns). Patterns are matched against the relative filepath (from its sourcedir). Returns: list: A list of pairs ``(source, target)``. Where ``target`` is the ``source`` path but renamed with ``.css`` extension. Relative directory from source dir is left unchanged but if given, returned paths will be absolute (using ``sourcedir`` for sources and ``targetdir`` for targets).
codesearchnet
def _parse_octet(self, octet_str): if not octet_str: raise ValueError("Empty octet not permitted") if not self._DECIMAL_DIGITS.issuperset(octet_str): msg = "Only decimal digits permitted in %r" raise ValueError(msg % octet_str) if len(octet_str) > 3: msg = "At most 3 characters permitted in %r" raise ValueError(msg % octet_str) octet_int = int(octet_str, 10) if octet_int > 7 and octet_str[0] == '0': msg = "Ambiguous (octal/decimal) value in %r not permitted" raise ValueError(msg % octet_str) if octet_int > 255: raise ValueError("Octet %d (> 255) not permitted" % octet_int) return octet_int
Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255].
juraj-google-style
def setup_privnet(self, host=None): self.setup(FILENAME_SETTINGS_PRIVNET) if isinstance(host, str): if ":" in host: raise Exception("No protocol prefix or port allowed in host, use just the IP or domain.") print("Using custom privatenet host:", host) self.SEED_LIST = ["%s:20333" % host] self.RPC_LIST = ["http: print("- P2P:", ", ".join(self.SEED_LIST)) print("- RPC:", ", ".join(self.RPC_LIST)) self.check_privatenet()
Load settings from the privnet JSON config file Args: host (string, optional): if supplied, uses this IP or domain as neo nodes. The host must use these standard ports: P2P 20333, RPC 30333.
juraj-google-style
def csv_to_dict(csv_filepath, **kwargs): callbacks = {'to_list': csv_tolist, 'row_csv_limiter': row_csv_limiter, 'csv_row_cleaner': csv_row_cleaner, 'row_headers_count': row_headers_count, 'get_col_header': get_csv_col_headers, 'get_row_headers': get_row_headers, 'populate_headers': populate_headers, 'csv_column_header_cleaner': csv_column_header_cleaner, 'csv_column_cleaner': csv_column_cleaner, 'retrieve_csv_data': retrieve_csv_data} callbacks.update(kwargs.get('alt_callbacks', {})) rows = kwargs.get('rows', []) if not rows: rows = callbacks.get('to_list')(csv_filepath, **kwargs) if not rows: msg = 'Empty rows obtained from {}'.format(csv_filepath) logger.warning(msg) raise ValueError(msg) rows = callbacks.get('row_csv_limiter')( rows, kwargs.get('limits', [None, None])) rows = callbacks.get('csv_row_cleaner')(rows) rows = callbacks.get('csv_column_cleaner')(rows) num_row_headers = callbacks.get('row_headers_count')(rows) c_headers_raw = callbacks.get('get_col_header')(rows, num_row_headers) r_headers = callbacks.get('get_row_headers')( rows, num_row_headers, len(c_headers_raw)) c_headers_dirty = callbacks.get('populate_headers')( c_headers_raw) if len(c_headers_raw) > 1 else c_headers_raw[0] c_headers = callbacks.get('csv_column_header_cleaner')(c_headers_dirty) csv_data = callbacks.get('retrieve_csv_data')( rows, column_header=len(c_headers_raw), row_header=num_row_headers, limit_column=len(c_headers) - len(c_headers_dirty) or None) if csv_data: assert len(c_headers) == len(csv_data[0]) if r_headers: assert len(r_headers) == len(csv_data) kwargs.pop('rows', None) result = csv_format(csv_data, c_headers, r_headers, rows, **kwargs) return result
Turn csv into dict. Args: :csv_filepath: path to csv file to turn into dict. :limits: path to csv file to turn into dict
juraj-google-style
def temporal_segmentation(segments, min_time): final_segments = [] for segment in segments: final_segments.append([]) for point in segment: if (point.dt > min_time): final_segments.append([]) final_segments[(- 1)].append(point) return final_segments
Segments based on time distant points Args: segments (:obj:`list` of :obj:`list` of :obj:`Point`): segment points min_time (int): minimum required time for segmentation
codesearchnet
def _decompress(self, compressed_payload): if self._data: raise RuntimeError('Cannot decompress to an instance with payload') self._data = zlib.decompress(compressed_payload) len_data = len(self._data) counts_size = (len_data - payload_header_size) if (payload_header_size > counts_size > MAX_COUNTS_SIZE): raise HdrLengthException(('Invalid size:' + str(len_data))) self.payload = PayloadHeader.from_buffer_copy(self._data) cookie = self.payload.cookie if (get_cookie_base(cookie) != V2_ENCODING_COOKIE_BASE): raise HdrCookieException(('Invalid cookie: %x' % cookie)) word_size = get_word_size_in_bytes_from_cookie(cookie) if (word_size != V2_MAX_WORD_SIZE_IN_BYTES): raise HdrCookieException(('Invalid V2 cookie: %x' % cookie))
Decompress a compressed payload into this payload wrapper. Note that the decompressed buffer is saved in self._data and the counts array is not yet allocated. Args: compressed_payload (string) a payload in zlib compressed form Exception: HdrCookieException: the compressed payload has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class HdrHistogramSettingsException: mismatch in the significant figures, lowest and highest trackable value
codesearchnet
def batch_size(self): raise NotImplementedError
Return the batch size of the dataset created. For certain type of the data input, the batch size is known, and even required, like numpy array. Whereas for dataset, the batch is unknown unless we take a peek. Returns: int, the batch size of the dataset, or None if it is unknown.
github-repos
def __init__(self, decode_module, methodName='runTest'): super(DecodeProtoOpTestBase, self).__init__(methodName) self._decode_module = decode_module
DecodeProtoOpTestBase initializer. Args: decode_module: a module containing the `decode_proto_op` method methodName: the name of the test method (same as for test.TestCase)
github-repos
def RunOnce(self, names=None, token=None): del token leased_jobs = data_store.REL_DB.LeaseCronJobs( cronjob_ids=names, lease_time=rdfvalue.Duration("10m")) logging.info("Leased %d cron jobs for processing.", len(leased_jobs)) if not leased_jobs: return errors = {} processed_count = 0 for job in sorted(leased_jobs, key=lambda j: j.cron_job_id): if self.TerminateStuckRunIfNeeded(job): continue if not self.JobDueToRun(job): continue try: if self.RunJob(job): processed_count += 1 else: logging.info( "Can't schedule cron job %s on a thread pool " "(all threads are busy or CPU load is high)", job.cron_job_id) break except Exception as e: logging.exception("Cron job %s has failed: %s", job.cron_job_id, e) errors[job.cron_job_id] = e logging.info("Processed %d cron jobs.", processed_count) data_store.REL_DB.ReturnLeasedCronJobs(leased_jobs) if errors: raise OneOrMoreCronJobsFailedError(errors)
Tries to lock and run cron jobs. Args: names: List of cron jobs to run. If unset, run them all. token: security token. Raises: OneOrMoreCronJobsFailedError: if one or more individual cron jobs fail. Note: a failure of a single cron job doesn't preclude other cron jobs from running.
juraj-google-style
def get_all_users(configuration=None, **kwargs): user = User(configuration=configuration) user['id'] = 'all users' result = user._write_to_hdx('list', kwargs, 'id') users = list() if result: for userdict in result: user = User(userdict, configuration=configuration) users.append(user) else: logger.debug(result) return users
Get all users in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below q (str): Restrict to names containing a string. Defaults to all users. order_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'. Returns: List[User]: List of all users in HDX
codesearchnet
def _flush_tensor_values_cache(self, tensor_fetches, op_fetches, on_tpu, tensor_trace_order, graph): if not tensor_trace_order.traced_tensors: logging.warn('No tensor values being traced. No flush cache op added.') return tensor_fetches with ops.control_dependencies(op_fetches + [tensor.op for tensor in tensor_fetches]): flush_cache_op = self._generate_flush_cache_op(self._tt_config.num_replicas, on_tpu, tensor_trace_order, graph) return control_flow_ops.tuple(tensor_fetches, control_inputs=[flush_cache_op])
Flushes the intermediate tensor values in the graph to the cache. Args: tensor_fetches: list of tensor results returned by the model_fn. op_fetches: list of ops that are returned by the model_fn, e.g., train_op. on_tpu: if the graph is executed on TPU. tensor_trace_order: TensorTraceOrder object holding tensorname to id map. graph: TensorFlow graph. Returns: An identical copy of tensor_fetches.
github-repos
def recipe_sheets_to_bigquery(config, auth_read, auth_write, sheets_url, sheets_tab, sheets_range, dataset, table, sheets_header): sheets(config, {'auth': auth_read, 'sheet': sheets_url, 'tab': sheets_tab, 'range': sheets_range, 'header': sheets_header, 'out': {'auth': auth_write, 'bigquery': {'dataset': dataset, 'table': table}}})
Import data from a sheet and move it to a BigQuery table. Args: auth_read (authentication) - Credentials used for reading data. auth_write (authentication) - Credentials used for writing data. sheets_url (string) - NA sheets_tab (string) - NA sheets_range (string) - NA dataset (string) - NA table (string) - NA sheets_header (boolean) - NA
github-repos
def list_keyvaults_sub(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API]) return do_get_next(endpoint, access_token)
Lists key vaults belonging to this subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. 200 OK.
codesearchnet
def add_average_summary(self, var, tag=None, decay=0.999, ignore_nan=True): if (not self.summary_collections): return with self.g.as_default(): if ((decay < 0.9) or (decay >= 1.0)): raise ValueError(('Decay is %5.2f, but has to be in [0, 1).' % decay)) avg_var = self.exponential_moving_average(var, decay=decay, ignore_nan=ignore_nan) if (tag is None): tag = _bare_var_name(avg_var) tag = self.g.unique_name(tag) self.add_scalar_summary(avg_var, tag) return avg_var
Add a summary with the moving average of var. Adds a variable to keep track of the exponential moving average and adds an update operation to the bookkeeper. The name of the variable is '%s_average' % name prefixed with the current variable scope. Args: var: The variable for which a moving average should be computed. tag: The tag of the summary. If None var.name[:-2] is used to strip off the ':0' that is added by TF. decay: How much history to use in the moving average. Higher, means more history values [0.9, 1) accepted. ignore_nan: If the value is NaN or Inf, skip it. Note that this default is different than the exponential_moving_average one. Returns: The averaged variable. Raises: ValueError: if decay is not in [0.9, 1).
codesearchnet
def find_emails_by_subject(self, subject, limit=50, match_recipient=None): self._mail.select('inbox') matching_uids = self.__search_email_by_subject(subject, match_recipient) return matching_uids
Searches for Email by Subject. Returns email's imap message IDs as a list if matching subjects is found. Args: subject (str) - Subject to search for. Kwargs: limit (int) - Limit search to X number of matches, default 50 match_recipient (str) - Recipient to exactly (don't care if not specified) Returns: list - List of Integers representing imap message UIDs.
codesearchnet
def QueryAndOwn(self, queue, lease_seconds=10, limit=1): with self.data_store.GetMutationPool() as mutation_pool: return mutation_pool.QueueQueryAndOwn(queue, lease_seconds, limit, self.frozen_timestamp)
Returns a list of Tasks leased for a certain time. Args: queue: The queue to query from. lease_seconds: The tasks will be leased for this long. limit: Number of values to fetch. Returns: A list of GrrMessage() objects leased.
juraj-google-style
def _from_record(data): if isinstance(data, dict): return Schema._from_dict_record(data) elif isinstance(data, list): return Schema._from_list_record(data) else: raise Exception(('Cannot create a schema from record %s' % str(data)))
Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements is used. For a list, the field names are simply 'Column1', 'Column2', etc. Args: data: The list of fields or dictionary. Returns: A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a BigQuery Tables resource schema.
codesearchnet
def next_weekday(date): n_days = 7 - date.weekday() if n_days > 3: n_days = 1 return date + datetime.timedelta(days=n_days)
Return the first weekday after date Args: date (datetime or datetime.date) Returns: (datetime or datetime.date) Raises: -
juraj-google-style
def define_saver(exclude=None): variables = [] exclude = (exclude or []) exclude = [re.compile(regex) for regex in exclude] for variable in tf.global_variables(): if any((regex.match(variable.name) for regex in exclude)): continue variables.append(variable) saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5) return saver
Create a saver for the variables we want to checkpoint. Args: exclude: List of regexes to match variable names to exclude. Returns: Saver object.
codesearchnet
def setPollingValues(self, max_waits, wait_sleep): self.m_max_waits = max_waits self.m_wait_sleep = wait_sleep
Optional polling loop control Args: max_waits (int): waits wait_sleep (int): ms per wait
codesearchnet
def lint(exclude, skip_untracked, commit_only): exclude = (list(exclude) + conf.get('lint.exclude', [])) runner = LintRunner(exclude, skip_untracked, commit_only) if (not runner.run()): exit(1)
Lint python files. Args: exclude (list[str]): A list of glob string patterns to test against. If the file/path matches any of those patters, it will be filtered out. skip_untracked (bool): If set to **True** it will skip all files not tracked by git. commit_only (bool): Only lint files that are staged for commit.
codesearchnet
def store_object(self, obj): self._check_obj_properties(obj) with transaction.manager: self._put_into_indexes(obj)
Save `obj` into database and into proper indexes. Attr: obj (obj): Indexable object. Raises: InvalidType: When the `obj` doesn't have right properties. Unindexableobjlication: When there is no indexes defined.
codesearchnet
def add_parser(self, func=None, name=None, **kwargs): if func: if (not func.__doc__): raise ValueError('No docstrings given in {0}'.format(func.__name__)) info = _parse_doc(func.__doc__) if ((_HELP not in kwargs) or (not kwargs[_HELP])): kwargs[_HELP] = info['headline'] if ((_DESCRIPTION not in kwargs) or (not kwargs[_DESCRIPTION])): kwargs[_DESCRIPTION] = info['description'] if ((_FORMAT_CLASS not in kwargs) or (not kwargs[_FORMAT_CLASS])): kwargs[_FORMAT_CLASS] = argparse.RawTextHelpFormatter if (not name): name = (func.__name__ if hasattr(func, '__name__') else func) res = self.__delegate.add_parser(name, argmap=info['args'], **kwargs) res.set_defaults(cmd=func) else: res = self.__delegate.add_parser(name, **kwargs) return res
Add parser. This method makes a new sub command parser. It takes same arguments as add_parser() of the action class made by argparse.ArgumentParser.add_subparsers. In addition to, it takes one positional argument `func`, which is the function implements process of this sub command. The `func` will be used to determine the name, help, and description of this sub command. The function `func` will also be set as a default value of `cmd` attribute. If you want to choose name of this sub command, use keyword argument `name`. Args: func: function implements the process of this command. name: name of this command. If not give, the function name is used. Returns: new ArgumentParser object. Raises: ValueError: if the given function does not have docstrings.
codesearchnet
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame: series = table[self.name] self._check_series_name(series) validators = self.validators results = pd.DataFrame({validator: series for validator in validators}, index=series.index) for name, func in validators.items(): results[name] = func(results[name]) results['dtype'] = self._validate_series_dtype(series) if self.unique: results['unique'] = v.funcs.unique(series) if failed_only: results = find_failed_rows(results) return results
Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate.
juraj-google-style
def raster_to_asc(raster_f, asc_f): raster_r = RasterUtilClass.read_raster(raster_f) RasterUtilClass.write_asc_file(asc_f, raster_r.data, raster_r.nCols, raster_r.nRows, raster_r.geotrans, raster_r.noDataValue)
Converting Raster format to ASCII raster. Args: raster_f: raster file. asc_f: output ASCII file.
codesearchnet
def convert_field(field_names: Optional[List[str]]=None): if field_names is None: field_names = [] def convert_field_decorator(convert_method): convert_method.convert_field_names = field_names @functools.wraps(convert_method) def convert_field_wrapper(self, src_proto, dest_proto): convert_method(self, src_proto, dest_proto) return convert_field_wrapper return convert_field_decorator
Decorator that converts proto fields. Args: field_names: list of field names from src proto this function handles. Returns: convert_field_decorator Typical usage example: @converter.convert_field(field_names=["hello"]) def hello_convert_function(self, src_proto, dest_proto): ...
github-repos
def as_pyplot_figure(self, label=1, **kwargs): import matplotlib.pyplot as plt exp = self.as_list(label=label, **kwargs) fig = plt.figure() vals = [x[1] for x in exp] names = [x[0] for x in exp] vals.reverse() names.reverse() colors = [('green' if (x > 0) else 'red') for x in vals] pos = (np.arange(len(exp)) + 0.5) plt.barh(pos, vals, align='center', color=colors) plt.yticks(pos, names) if (self.mode == 'classification'): title = ('Local explanation for class %s' % self.class_names[label]) else: title = 'Local explanation' plt.title(title) return fig
Returns the explanation as a pyplot figure. Will throw an error if you don't have matplotlib installed Args: label: desired label. If you ask for a label for which an explanation wasn't computed, will throw an exception. Will be ignored for regression explanations. kwargs: keyword arguments, passed to domain_mapper Returns: pyplot figure (barchart).
codesearchnet
def metadata(self, path): try: file_metadata = s3io.S3IO(options=self._options)._status(path) return FileMetadata(path, file_metadata['size'], file_metadata['last_updated']) except Exception as e: raise BeamIOError('Metadata operation failed', {path: e})
Fetch metadata fields of a file on the FileSystem. Args: path: string path of a file. Returns: :class:`~apache_beam.io.filesystem.FileMetadata`. Raises: ``BeamIOError``: if path isn't a file or doesn't exist.
github-repos
def FillDeviceAttributes(device, descriptor): attributes = HidAttributes() result = hid.HidD_GetAttributes(device, ctypes.byref(attributes)) if not result: raise ctypes.WinError() buf = ctypes.create_string_buffer(1024) result = hid.HidD_GetProductString(device, buf, 1024) if not result: raise ctypes.WinError() descriptor.vendor_id = attributes.VendorID descriptor.product_id = attributes.ProductID descriptor.product_string = ctypes.wstring_at(buf)
Fill out the attributes of the device. Fills the devices HidAttributes and product string into the descriptor. Args: device: A handle to the open device descriptor: The DeviceDescriptor to populate with the attributes. Returns: None Raises: WindowsError when unable to obtain attributes or product string.
juraj-google-style
def __init__(self, start, size, name="merge_dims"): super(MergeDims, self).__init__(name=name) self._start = start self._size = size if size <= 1: raise ValueError("`size` should be strictly greater than 1.")
Constructs the MergeDims module. Args: start: Start of the range of dimensions to merge. size: Size the range of dimensions to merge. name: The name of the module. Raises: ValueError: If `size` is not strictly greater than 1.
juraj-google-style
def _filter_valid_filepaths(self, df, x_col): filepaths = df[x_col].map(lambda fname: os.path.join(self.directory, fname)) mask = filepaths.apply(validate_filename, args=(self.white_list_formats,)) n_invalid = (~mask).sum() if n_invalid: warnings.warn('Found {} invalid image filename(s) in x_col="{}". These filename(s) will be ignored.'.format(n_invalid, x_col)) return df[mask]
Keep only dataframe rows with valid filenames. Args: df: Pandas dataframe containing filenames in a column x_col: string, column in `df` that contains the filenames or filepaths Returns: absolute paths to image files
github-repos
def ParseNotificationcenterRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = MacNotificationCenterEventData() event_data.bundle_name = self._GetRowValue(query_hash, row, 'bundle_name') event_data.presented = self._GetRowValue(query_hash, row, 'presented') blob = self._GetRowValue(query_hash, row, 'dataBlob') try: full_biplist = biplist.readPlistFromString(blob) req = full_biplist['req'] except (biplist.InvalidPlistException, KeyError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to read plist from database with error: {0!s}'.format( exception)) return event_data.title = req.get('titl', None) event_data.subtitle = req.get('subt', None) event_data.body = req.get('body', None) timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a message row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def from_dict(cls, metadata): hyperparameters = metadata.get('hyperparameters') tunable = metadata.get('tunable_hyperparameters') pipeline = cls(metadata['primitives'], metadata.get('init_params'), metadata.get('input_names'), metadata.get('output_names')) if hyperparameters: pipeline.set_hyperparameters(hyperparameters) if (tunable is not None): pipeline._tunable_hyperparameters = tunable return pipeline
Create a new MLPipeline from a dict specification. The dict structure is the same as the one created by the `to_dict` method. Args: metadata (dict): Dictionary containing the pipeline specification. Returns: MLPipeline: A new MLPipeline instance with the details found in the given specification dictionary.
codesearchnet
def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False): vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) query_output = query_outputs[0] if query_output.dtype != image_embeds.dtype: query_output = query_output.to(image_embeds.dtype) language_model_inputs = self.language_projection(query_output) if return_dict: return (language_model_inputs, vision_outputs, query_outputs) return language_model_inputs
Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images.
github-repos
def _parse_single_video(self, example_proto): context_features = { "game_duration_loops": tf.io.FixedLenFeature([1], tf.int64), "game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32), "n_steps": tf.io.FixedLenFeature([1], tf.int64), "screen_size": tf.io.FixedLenFeature([2], tf.int64), } sequence_features = { "rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string), } _, seq_feat = tf.io.parse_single_sequence_example( example_proto, context_features=context_features, sequence_features=sequence_features) video_frames = tf.map_fn( tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8) return video_frames
Parses single video from the input tfrecords. Args: example_proto: tfExample proto with a single video. Returns: dict with all frames, positions and actions.
juraj-google-style
def auth_user(self, username, password): response = self._post((self.rest_url + '/authentication'), data=json.dumps({'value': password}), params={'username': username}) if (not response.ok): return None return response.json()
Authenticate a user account against the Crowd server. Attempts to authenticate the user against the Crowd server. Args: username: The account username. password: The account password. Returns: dict: A dict mapping of user attributes if the application authentication was successful. See the Crowd documentation for the authoritative list of attributes. None: If authentication failed.
codesearchnet
def compute_k(self, memory_antecedent): if self.shared_kv: raise ValueError('compute_k cannot be called with shared_kv') ret = mtf.einsum([memory_antecedent, self.wk], reduced_dims=[self.memory_input_dim]) if self.combine_dims: ret = mtf.replace_dimensions(ret, ret.shape.dims[(- 1)], self.k_dims) return ret
Compute key Tensor k. Args: memory_antecedent: a Tensor with dimensions {memory_input_dim} + other_dims Returns: a Tensor with dimensions memory_heads_dims + {key_dim} + other_dims
codesearchnet
def most_exposes(python_type): _exposes = set() try: do_not_expose = set(python_type.__dir__(object) + \ ['__slots__', '__module__', '__weakref__']) empty = python_type.__new__(python_type) except AttributeError: try: _exposes = python_type.__slots__ except AttributeError: pass except TypeError: for _workaround in storable_workarounds: try: _exposes = _workaround(python_type) except (SystemExit, KeyboardInterrupt): raise except: pass else: break else: all_members = empty.__dir__() for attr in all_members: if attr in do_not_expose: continue try: getattr(empty, attr) except AttributeError as e: if e.args: msg = e.args[0] if msg == attr or msg.endswith("' object has no attribute '{}'".format(attr)): _exposes.add(attr) except (SystemExit, KeyboardInterrupt): raise except: pass for attr in ('__dict__',): if attr in all_members: _exposes.add(attr) return list(_exposes)
Core engine for the automatic generation of storable instances. Finds the attributes exposed by the objects of a given type. Mostly Python3-only. Does not handle types which `__new__` method requires extra arguments either. Arguments: python_type (type): object type. Returns: list: attributes exposed.
juraj-google-style
def set_tensor_final(self, tensor_name): tensor = self._name_to_tensor(tensor_name) self._final_tensors.add(tensor)
Denotes a tensor as a final output of the computation. Args: tensor_name: a string, name of a tensor in the graph.
juraj-google-style
def get_context(self, max_frames=None, missing_entities=[]): if not max_frames or max_frames > len(self.frame_stack): max_frames = len(self.frame_stack) missing_entities = list(missing_entities) context = [] for i in xrange(max_frames): frame_entities = [entity.copy() for entity in self.frame_stack[i].entities] for entity in frame_entities: entity['confidence'] = entity.get('confidence', 1.0) / (2.0 + i) context += frame_entities result = [] if len(missing_entities) > 0: for entity in context: if entity.get('data') in missing_entities: result.append(entity) missing_entities.remove(entity.get('data')) else: result = context return result
Constructs a list of entities from the context. Args: max_frames(int): maximum number of frames to look back missing_entities(list of str): a list or set of tag names, as strings Returns: list: a list of entities
juraj-google-style
def nth(series, n, order_by=None): if order_by is not None: series = order_series_by(series, order_by) try: return series.iloc[n] except: return np.nan
Returns the nth value of a series. Args: series (pandas.Series): column to summarize. n (integer): position of desired value. Returns `NaN` if out of range. Kwargs: order_by: a pandas.Series or list of series (can be symbolic) to order the input series by before summarization.
juraj-google-style
def getProperty(self, orgresource, dummy = 56184): url = nurls['getProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, } r = self.session.post(url = url, data = data) j = json.loads(r.text) if self.resultManager(r.text): f = FileInfo() result = j['resultvalue'] f.resourcetype = result['resourcetype'] f.resourceno = result['resourceno'] return f else: return False
GetProperty Args: dummy: ??? orgresource: File path Returns: FileInfo object: False: Failed to get property
juraj-google-style
def fetch_github_pull_request(destination_directory: str, repository: github_repository.GithubRepository, pull_request_number: int, verbose: bool ) -> prepared_env.PreparedEnv: branch = 'pull/{}/head'.format(pull_request_number) os.chdir(destination_directory) print('chdir', destination_directory, file=sys.stderr) shell_tools.run_cmd( 'git', 'init', None if verbose else '--quiet', out=sys.stderr) result = _git_fetch_for_comparison(remote=repository.as_remote(), actual_branch=branch, compare_branch='master', verbose=verbose) shell_tools.run_cmd( 'git', 'branch', None if verbose else '--quiet', 'compare_commit', result.compare_commit_id, log_run_to_stderr=verbose) shell_tools.run_cmd( 'git', 'checkout', None if verbose else '--quiet', '-b', 'actual_commit', result.actual_commit_id, log_run_to_stderr=verbose) return prepared_env.PreparedEnv( github_repo=repository, actual_commit_id=result.actual_commit_id, compare_commit_id=result.compare_commit_id, destination_directory=destination_directory, virtual_env_path=None)
Uses content from github to create a dir for testing and comparisons. Args: destination_directory: The location to fetch the contents into. repository: The github repository that the commit lives under. pull_request_number: The id of the pull request to clone. If None, then the master branch is cloned instead. verbose: When set, more progress output is produced. Returns: Commit ids corresponding to content to test/compare.
juraj-google-style
def get_send_request_correct_body(self, path, action): (path_name, path_spec) = self.get_path_spec(path) if ((path_spec is not None) and (action in path_spec.keys())): for (name, spec) in path_spec[action]['parameters'].items(): if (spec['in'] == 'body'): if ('type' in spec.keys()): return self.get_example_from_prop_spec(spec) elif ('schema' in spec.keys()): if (('type' in spec['schema'].keys()) and (spec['schema']['type'] == 'array')): if ('$ref' in spec['schema']['items']): definition_name = self.get_definition_name_from_ref(spec['schema']['items']['$ref']) return [self.definitions_example[definition_name]] else: definition_name = self.get_definition_name_from_ref(spec['schema']['items']['type']) return [definition_name] elif ('type' in spec['schema'].keys()): return self.get_example_from_prop_spec(spec['schema']) else: definition_name = self.get_definition_name_from_ref(spec['schema']['$ref']) return self.definitions_example[definition_name]
Get an example body which is correct to send to the given path with the given action. Args: path: path of the request action: action of the request (get, post, put, delete) Returns: A dict representing a correct body for the request or None if no body is required.
codesearchnet
def assert_key_has_value(self, key, caller): assert key, ("key parameter must be specified.") self.assert_key_exists(key, caller) if self[key] is None: raise KeyInContextHasNoValueError( f"context['{key}'] must have a value for {caller}.")
Assert that context contains key which also has a value. Args: key: validate this key exists in context AND has a value that isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None
juraj-google-style
def is_valid(draco_query: List[str], debug=False) -> bool: _, stdout = run_clingo( draco_query, files=["define.lp", "hard.lp", "hard-integrity.lp"], silence_warnings=True, debug=debug, ) return json.loads(stdout)["Result"] != "UNSATISFIABLE"
Check a task. Args: draco_query: a list of facts Returns: whether the task is valid
juraj-google-style
def unique(seen, *iterables): _add = seen.add return (i for i in chain(*iterables) if ((i not in seen) and (not _add(i))))
Get the unique items in iterables while preserving order. Note that this mutates the seen set provided only when the returned generator is used. Args: seen (set): either an empty set, or the set of things already seen *iterables: one or more iterable lists to chain together Returns: generator:
codesearchnet
def find_furious_yaml(config_file=__file__): checked = set() result = _find_furious_yaml(os.path.dirname(config_file), checked) if (not result): result = _find_furious_yaml(os.getcwd(), checked) return result
Traverse directory trees to find a furious.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of furious.yaml or None if not found
codesearchnet
def setAccessPolicy(self, pid, accessPolicy, serialVersion, vendorSpecific=None): response = self.setAccessPolicyResponse( pid, accessPolicy, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
See Also: setAccessPolicyResponse() Args: pid: accessPolicy: serialVersion: vendorSpecific: Returns:
juraj-google-style
def _sign_input(cls, input_, message, key_pairs): if isinstance(input_.fulfillment, Ed25519Sha256): return cls._sign_simple_signature_fulfillment(input_, message, key_pairs) elif isinstance(input_.fulfillment, ThresholdSha256): return cls._sign_threshold_signature_fulfillment(input_, message, key_pairs) else: raise ValueError("Fulfillment couldn't be matched to Cryptocondition fulfillment type.")
Signs a single Input. Note: This method works only for the following Cryptoconditions currently: - Ed25519Fulfillment - ThresholdSha256. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The Input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with.
codesearchnet
def _FormatMessageShort(self, event): _, message_short = self._output_mediator.GetFormattedMessages(event) if message_short is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) return message_short
Formats the short message. Args: event (EventObject): event. Returns: str: short message field. Raises: NoFormatterFound: if no event formatter can be found to match the data type in the event.
juraj-google-style
def FlagCxx11Features(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] include = Match('\\s* if (include and (include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error'))): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.' % include.group(1))) if (Match('\\s* return for top_name in ('alignment_of', 'aligned_union'): if Search(('\\bstd::%s\\b' % top_name), line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style an example of where it would make your code more readable, and they may let you use it.' % top_name))
Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def load_users(path=settings.LOGIN_FILE): if (not os.path.exists(path)): return {} data = '' with open(path) as f: data = f.read().splitlines() users = {} cnt = 1 for line in data: line = line.split(':') assert (len(line) == 7), ("Bad number of fields in '%s', at line %d!" % (path, cnt)) users[line[0]] = {'pass_hash': line[1], 'uid': line[2], 'gid': line[3], 'full_name': line[4], 'home': line[5], 'shell': line[6]} cnt += 1 return users
Read passwd file and return dict with users and all their settings. Args: path (str, default settings.LOGIN_FILE): path of the file, which will be loaded (default :attr:`ftp.settings.LOGIN_FILE`). Returns: (dict): username: {pass_hash, uid, gid, full_name, home, shell} Example of returned data:: { "xex": { "pass_hash": "$asd$aiosjdaiosjdásghwasdjo", "uid": "2000", "gid": "2000", "full_name": "ftftf", "home": "/home/ftp/xex", "shell": "/bin/false" } }
codesearchnet
def GetFileEntryByPathSpec(self, path_spec): return compressed_stream_file_entry.CompressedStreamFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): a path specification. Returns: CompressedStreamFileEntry: a file entry or None if not available.
juraj-google-style
def python_graph(self): return self._python_graph
Get the Python graph. Returns: If the Python graph has been set, returns a `tf.Graph` object. Otherwise, returns None.
github-repos
class AsList(AsSideInput): @staticmethod def _from_runtime_iterable(it, options): return list(it) def _side_input_data(self) -> SideInputData: return SideInputData(common_urns.side_inputs.ITERABLE.urn, self._window_mapping_fn, list)
Marker specifying that an entire PCollection is to be used as a side input. Intended for use in side-argument specification---the same places where AsSingleton and AsIter are used, but forces materialization of this PCollection as a list. Args: pcoll: Input pcollection. Returns: An AsList-wrapper around a PCollection whose one element is a list containing all elements in pcoll.
github-repos
def avg(self, vars_list: List[str]) -> 'TensorFluent': operand = self if operand.dtype == tf.bool: operand = operand.cast(tf.float32) return self._aggregation_op(tf.reduce_mean, operand, vars_list)
Returns the TensorFluent for the avg aggregation function. Args: vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the avg aggregation function.
juraj-google-style
def get_subgraph_for_concept(self, concept: str, depth: int=1, reverse: bool=False): nodeset = {concept} if reverse: func = self.predecessors else: func = self.successors for i in range(depth): nodeset.update(chain.from_iterable([list(func(n)) for n in nodeset])) return AnalysisGraph(self.subgraph(nodeset).copy())
Returns a new subgraph of the analysis graph for a single concept. Args: concept: The concept that the subgraph will be centered around. depth: The depth to which the depth-first search must be performed. reverse: Sets the direction of causal influence flow to examine. Setting this to False (default) will search for upstream causal influences, and setting it to True will search for downstream causal influences. Returns: AnalysisGraph
codesearchnet
def convert_obatoms_to_molecule(self, atoms, residue_name=None, site_property="ff_map"): restore_site_props = True if residue_name is not None else False if restore_site_props and not hasattr(self, "map_residue_to_mol"): self._set_residue_map() coords = [] zs = [] for atm in atoms: coords.append(list(atm.coords)) zs.append(atm.atomicnum) mol = Molecule(zs, coords) if restore_site_props: props = [] ref = self.map_residue_to_mol[residue_name].copy() assert len(mol) == len(ref) assert ref.formula == mol.formula for i, site in enumerate(mol): assert site.specie.symbol == ref[i].specie.symbol props.append(getattr(ref[i], site_property)) mol.add_site_property(site_property, props) return mol
Convert list of openbabel atoms to MOlecule. Args: atoms ([OBAtom]): list of OBAtom objects residue_name (str): the key in self.map_residue_to_mol. Usec to restore the site properties in the final packed molecule. site_property (str): the site property to be restored. Returns: Molecule object
juraj-google-style
def build_configuration(self): configuration = config.Configuration() pegtree = pegnode.parse(self.filestring) for section_node in pegtree: if isinstance(section_node, pegnode.GlobalSection): configuration.globall = self.build_global(section_node) elif isinstance(section_node, pegnode.FrontendSection): configuration.frontends.append(self.build_frontend(section_node)) elif isinstance(section_node, pegnode.DefaultsSection): configuration.defaults.append(self.build_defaults(section_node)) elif isinstance(section_node, pegnode.ListenSection): configuration.listens.append(self.build_listen(section_node)) elif isinstance(section_node, pegnode.UserlistSection): configuration.userlists.append(self.build_userlist(section_node)) elif isinstance(section_node, pegnode.BackendSection): configuration.backends.append(self.build_backend(section_node)) return configuration
Parse the haproxy config file Raises: Exception: when there are unsupported section Returns: config.Configuration: haproxy config object
codesearchnet
def get_configuration(self, uri): req_headers = { 'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json' } return self._api.request('get', '/api/partstudios/d/' + uri["did"] + '/' + uri["wvm_type"] + '/' + uri["wvm"] + '/e/' + uri["eid"] + '/configuration', headers=req_headers)
get the configuration of a PartStudio Args: - uri (dict): points to a particular element Returns: - requests.Response: Onshape response data
juraj-google-style
def AddMapping(self, filename, new_mapping): for field in self._REQUIRED_MAPPING_FIELDS: if field not in new_mapping: raise problems.InvalidMapping(field) if filename in self.GetKnownFilenames(): raise problems.DuplicateMapping(filename) self._file_mapping[filename] = new_mapping
Adds an entry to the list of known filenames. Args: filename: The filename whose mapping is being added. new_mapping: A dictionary with the mapping to add. Must contain all fields in _REQUIRED_MAPPING_FIELDS. Raises: DuplicateMapping if the filename already exists in the mapping InvalidMapping if not all required fields are present
juraj-google-style
def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states output_attentions = output_attentions if output_attentions else self.config.output_attentions return_dict = return_dict if return_dict else self.config.return_dict outputs = self.wav2vec2(input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs
Returns: Example: ```python >>> from transformers import AutoProcessor, TFWav2Vec2Model >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") >>> model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ```
github-repos
def EnumValueName(self, enum, value): return self.enum_types_by_name[enum].values_by_number[value].name
Returns the string name of an enum value. This is just a small helper method to simplify a common operation. Args: enum: string name of the Enum. value: int, value of the enum. Returns: string name of the enum value. Raises: KeyError if either the Enum doesn't exist or the value is not a valid value for the enum.
juraj-google-style
def get_weights(self): self._check_sess() return {k: v.eval(session=self.sess) for (k, v) in self.variables.items()}
Returns a dictionary containing the weights of the network. Returns: Dictionary mapping variable names to their weights.
codesearchnet
def __init__(self, **kwinfo): self._author_fakename = getpass.getuser() self._author_truename = ProjectInfo.find_pakcage_info( 'author', SRC_FOLDER, PROJECT_NAME, '__init__.py') self._email = ProjectInfo.find_pakcage_info( 'email', SRC_FOLDER, PROJECT_NAME, '__init__.py') self._project_name = os.path.basename( os.path.dirname(os.path.realpath(__file__))) self._project_version = ProjectInfo.find_pakcage_info( 'version', SRC_FOLDER, PROJECT_NAME, '__init__.py') for key, info in kwinfo.items(): key = '_' + key setattr(self, key, info)
init project info Args: author_fakename (str): TODO author_truename (str): TODO email (str): TODO project_name (str): TODO project_version (str): TODO
juraj-google-style
def mean_centroid(candidates): sum_x = 0.0 sum_y = 0.0 for (centroid_x, centroid_y, _, _) in candidates: sum_x += centroid_x sum_y += centroid_y denom = (3.0 * len(candidates)) return ((sum_x / denom), (sum_y / denom))
Take the mean of all centroids in set of reference triangles. .. note:: This is used **only** as a helper for :func:`locate_point`. Args: candidates (List[Tuple[float, float, float, numpy.ndarray]): List of 4-tuples, each of which has been produced by :func:`locate_point`. Each 4-tuple contains * Three times centroid ``x``-value * Three times centroid ``y``-value * "Width" of a parameter space for a surface * Control points for a surface We only use the first two values, which are triple the desired value so that we can put off division by three until summing in our average. We don't use the other two values, they are just an artifact of the way ``candidates`` is constructed by the caller. Returns: Tuple[float, float]: The mean of all centroids.
codesearchnet
def wrefs(self, index = None, recurse=True): targets =[] self._helper_wrefs(targets, recurse) if index is None: return targets else: return targets[index]
Returns a list of word references, these can be Words but also Morphemes or Phonemes. Arguments: index (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
juraj-google-style
def _CompareStores(self, storage_reader, compare_storage_reader): storage_counters = self._CalculateStorageCounters(storage_reader) compare_storage_counters = self._CalculateStorageCounters( compare_storage_reader) return storage_counters == compare_storage_counters
Compares the contents of two stores. Args: storage_reader (StorageReader): storage reader. compare_storage_reader (StorageReader): storage to compare against. Returns: bool: True if the content of the stores is identical.
juraj-google-style
def softmax(x, axis=-1): if isinstance(axis, int) and x.shape[axis] == 1: warnings.warn(f'You are using a softmax over axis {axis} of a tensor of shape {x.shape}. This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?') if any_symbolic_tensors((x,)): return Softmax(axis).symbolic_call(x) if isinstance(axis, tuple): axis_to_keep = [v for v in range(len(x.shape)) if v not in axis] x_transposed = backend.numpy.transpose(x, axes=(*axis_to_keep, *axis)) x_reshaped = backend.numpy.reshape(x_transposed, (*[x.shape[v] for v in axis_to_keep], -1)) x = backend.nn.softmax(x_reshaped, axis=-1) x = backend.numpy.reshape(x, x_transposed.shape) x = backend.numpy.transpose(x, axes=list(backend.numpy.argsort([*axis_to_keep, *axis]))) return x else: return backend.nn.softmax(x, axis=axis)
Softmax activation function. The elements of the output vector lie within the range `(0, 1)`, and their total sum is exactly 1 (excluding the floating point rounding error). Each vector is processed independently. The `axis` argument specifies the axis along which the function is applied within the input. It is defined as: `f(x) = exp(x) / sum(exp(x))` Args: x: Input tensor. axis: Integer, axis along which the softmax is applied. Returns: A tensor with the same shape as `x`. Example: >>> x = np.array([-1., 0., 1.]) >>> x_softmax = keras.ops.softmax(x) >>> print(x_softmax) array([0.09003057, 0.24472847, 0.66524096], shape=(3,), dtype=float64)
github-repos
def recipe_dbm_to_sheets(config, auth_read, report_id, report_name, sheet, tab): dbm(config, {'auth': auth_read, 'report': {'report_id': report_id, 'name': report_name}, 'out': {'sheets': {'sheet': sheet, 'tab': tab, 'range': 'A1'}}})
Move existing DV360 report into a Sheets tab. Args: auth_read (authentication) - Credentials used for reading data. report_id (integer) - DV360 report ID given in UI, not needed if name used. report_name (string) - Name of report, not needed if ID used. sheet (string) - Full URL to sheet being written to. tab (string) - Existing tab in sheet to write to.
github-repos
def LookupClients(self, keywords): if isinstance(keywords, string_types): raise ValueError( "Keywords should be an iterable, not a string (got %s)." % keywords) start_time, end_time, filtered_keywords, unversioned_keywords = ( self._AnalyzeKeywords(keywords)) last_seen_map = None if unversioned_keywords: last_seen_map = {} raw_results = self.Lookup( list(map(self._NormalizeKeyword, filtered_keywords)), start_time=start_time.AsMicrosecondsSinceEpoch(), end_time=end_time.AsMicrosecondsSinceEpoch(), last_seen_map=last_seen_map) if not raw_results: return [] if unversioned_keywords: universal_last_seen_raw = {} self.ReadPostingLists( list(map(self._NormalizeKeyword, raw_results)), start_time=start_time.AsMicrosecondsSinceEpoch(), end_time=end_time.AsMicrosecondsSinceEpoch(), last_seen_map=universal_last_seen_raw) universal_last_seen = {} for (_, client_id), ts in iteritems(universal_last_seen_raw): universal_last_seen[client_id] = ts old_results = set() for keyword in unversioned_keywords: for result in raw_results: if last_seen_map[(keyword, result)] < universal_last_seen[result]: old_results.add(result) raw_results -= old_results return [rdf_client.ClientURN(result) for result in raw_results]
Returns a list of client URNs associated with keywords. Args: keywords: The list of keywords to search by. Returns: A list of client URNs. Raises: ValueError: A string (single keyword) was passed instead of an iterable.
juraj-google-style
def cellsiter_to_dataframe(cellsiter, args, drop_allna=True): from modelx.core.cells import shareable_parameters if len(args): indexes = shareable_parameters(cellsiter) else: indexes = get_all_params(cellsiter.values()) result = None for cells in cellsiter.values(): df = cells_to_dataframe(cells, args) if (drop_allna and df.isnull().all().all()): continue if (df.index.names != [None]): if isinstance(df.index, pd.MultiIndex): if (_pd_ver < (0, 20)): df = _reset_naindex(df) df = df.reset_index() missing_params = (set(indexes) - set(df)) for params in missing_params: df[params] = np.nan if (result is None): result = df else: try: result = pd.merge(result, df, how='outer') except MergeError: result = pd.concat([result, df], axis=1) except ValueError: cols = (set(result.columns) & set(df.columns)) for col in cols: if (len([str(frame[col].dtype) for frame in (result, df) if (str(frame[col].dtype) == 'object')]) == 1): if (str(result[col].dtype) == 'object'): frame = df else: frame = result frame[[col]] = frame[col].astype('object') result = pd.merge(result, df, how='outer') if (result is None): return pd.DataFrame() else: return (result.set_index(indexes) if indexes else result)
Convert multiple cells to a frame. If args is an empty sequence, all values are included. If args is specified, cellsiter must have shareable parameters. Args: cellsiter: A mapping from cells names to CellsImpl objects. args: A sequence of arguments
codesearchnet
def explain_text(self, labels, instance, column_name=None, num_features=10, num_samples=5000): from lime.lime_text import LimeTextExplainer if ((len(self._text_columns) > 1) and (not column_name)): raise ValueError(('There are multiple text columns in the input of the model. ' + 'Please specify "column_name".')) elif (column_name and (column_name not in self._text_columns)): raise ValueError(('Specified column_name "%s" not found in the model input.' % column_name)) text_column_name = (column_name if column_name else self._text_columns[0]) if isinstance(instance, six.string_types): instance = next(csv.DictReader([instance], fieldnames=self._headers)) predict_fn = self._make_text_predict_fn(labels, instance, text_column_name) explainer = LimeTextExplainer(class_names=labels) exp = explainer.explain_instance(instance[text_column_name], predict_fn, labels=range(len(labels)), num_features=num_features, num_samples=num_samples) return exp
Explain a text field of a prediction. It analyze the prediction by LIME, and returns a report of which words are most impactful in contributing to certain labels. Args: labels: a list of labels to explain. instance: the prediction instance. It needs to conform to model's input. Can be a csv line string, or a dict. column_name: which text column to explain. Can be None if there is only one text column in the model input. num_features: maximum number of words (features) to analyze. Passed to LIME LimeTextExplainer directly. num_samples: size of the neighborhood to learn the linear model. Passed to LIME LimeTextExplainer directly. Returns: A LIME's lime.explanation.Explanation. Throws: ValueError if the given text column is not found in model input or column_name is None but there are multiple text columns in model input.
codesearchnet
def _GetMetadataRequest(self, metadata_url, params=None, timeout=None): headers = {'Metadata-Flavor': 'Google'} params = urlparse.urlencode(params or {}) url = '%s?%s' % (metadata_url, params) request = urlrequest.Request(url, headers=headers) request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) timeout = timeout or self.timeout return request_opener.open(request, timeout=timeout*1.1)
Performs a GET request with the metadata headers. Args: metadata_url: string, the URL to perform a GET request on. params: dictionary, the query parameters in the GET request. timeout: int, timeout in seconds for metadata requests. Returns: HTTP response from the GET request. Raises: urlerror.HTTPError: raises when the GET request fails.
juraj-google-style
def plot(self, ax_list=None, fontsize=12, **kwargs): history = self.history num_plots, ncols, nrows = len(history), 1, 1 if num_plots > 1: ncols = 2 nrows = num_plots ax_list, fig, plot = get_axarray_fig_plt(ax_list, nrows=nrows, ncols=ncols, sharex=True, sharey=False, squeeze=False) ax_list = np.array(ax_list).ravel() iter_num = np.array(list(range(self.num_iterations))) + 1 label = kwargs.pop("label", None) for i, ((key, values), ax) in enumerate(zip(history.items(), ax_list)): ax.grid(True) ax.set_xlabel('Relaxation Step') ax.set_xticks(iter_num, minor=False) ax.set_ylabel(key) xx, yy = iter_num, values if not kwargs and label is None: ax.plot(xx, yy, "-o", lw=2.0) else: ax.plot(xx, yy, label=label if i == 0 else None, **kwargs) if key in _VARS_SUPPORTING_LOGSCALE and np.all(yy > 1e-22): ax.set_yscale("log") if key in _VARS_WITH_YRANGE: ymin, ymax = _VARS_WITH_YRANGE[key] val_min, val_max = np.min(yy), np.max(yy) if abs(val_max - val_min) > abs(ymax - ymin): ax.set_ylim(ymin, ymax) if label is not None: ax.legend(loc="best", fontsize=fontsize, shadow=True) if num_plots % ncols != 0: ax_list[-1].plot(xx, yy, lw=0.0) ax_list[-1].axis('off') return fig
Plot relaxation history i.e. the results of the last iteration of each SCF cycle. Args: ax_list: List of axes. If None a new figure is produced. fontsize: legend fontsize. kwargs: keyword arguments are passed to ax.plot Returns: matplotlib figure
juraj-google-style
def _executeMassiveMethod(path, method, args=None, classArgs=None): response = {} if (args is None): args = {} if (classArgs is None): classArgs = {} sys.path.append(path) exclude = ['__init__.py', 'base.py'] for f in AtomShieldsScanner._getFiles(path, '*.py', exclude=exclude): try: instance = AtomShieldsScanner._getClassInstance(path=f, args=classArgs) if (instance is not None): if callable(method): args['instance'] = instance output = method(**args) response[instance.__class__.NAME] = output elif hasattr(instance, method): output = getattr(instance, method)(**args) response[instance.__class__.NAME] = output else: continue except Exception as e: AtomShieldsScanner._debug(('[!] %s' % e)) sys.path.remove(path) return response
Execute an specific method for each class instance located in path Args: path (str): Absolute path which contains the .py files method (str): Method to execute into class instance Returns: dict: Dictionary which contains the response for every class instance. The dictionary keys are the value of 'NAME' class variable.
codesearchnet
def _ListActiveBreakpoints(self, service): try: response = service.debuggees().breakpoints().list( debuggeeId=self._debuggee_id, waitToken=self._wait_token, successOnTimeout=True).execute() if not response.get('waitExpired'): self._wait_token = response.get('nextWaitToken') breakpoints = response.get('breakpoints') or [] if self._breakpoints != breakpoints: self._breakpoints = breakpoints native.LogInfo( 'Breakpoints list changed, %d active, wait token: %s' % ( len(self._breakpoints), self._wait_token)) self.on_active_breakpoints_changed(copy.deepcopy(self._breakpoints)) except BaseException: native.LogInfo('Failed to query active breakpoints: ' + traceback.format_exc()) self._debuggee_id = None return (True, self.list_backoff.Failed()) self.list_backoff.Succeeded() return (False, 0)
Single attempt query the list of active breakpoints. Must not be called before the debuggee has been registered. If the request fails, this function resets self._debuggee_id, which triggers repeated debuggee registration. Args: service: client to use for API calls Returns: (registration_required, delay) tuple
juraj-google-style
def query(self, sql_query, return_as='dataframe'): if isinstance(sql_query, str): pass elif isinstance(sql_query, unicode): sql_query = str(sql_query) else: raise QueryDbError('query() requires a str or unicode input.') query = sqlalchemy.sql.text(sql_query) if (return_as.upper() in ['DF', 'DATAFRAME']): return self._to_df(query, self._engine) elif (return_as.upper() in ['RESULT', 'RESULTPROXY']): with self._engine.connect() as conn: result = conn.execute(query) return result else: raise QueryDbError('Other return types not implemented.')
Execute a raw SQL query against the the SQL DB. Args: sql_query (str): A raw SQL query to execute. Kwargs: return_as (str): Specify what type of object should be returned. The following are acceptable types: - "dataframe": pandas.DataFrame or None if no matching query - "result": sqlalchemy.engine.result.ResultProxy Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result (specified with return_as="result") Raises: QueryDbError
codesearchnet