code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def usergroups_users_update(self, *, usergroup: str, users: List[str], **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({'usergroup': usergroup, 'users': users}) return self.api_call('usergroups.users.update', json=kwargs)
Update the list of users for a User Group Args: usergroup (str): The encoded ID of the User Group to update. e.g. 'S0604QSJC' users (list): A list user IDs that represent the entire list of users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
codesearchnet
def create(self, specify_uri=False, ignore_tombstone=False, serialization_format=None, stream=False, auto_refresh=None): if self.exists: raise Exception('resource exists attribute True, aborting') else: if specify_uri: verb = 'PUT' else: verb = 'POST' logger.debug(('creating resource %s with verb %s' % (self.uri, verb))) if issubclass(type(self), NonRDFSource): self.binary._prep_binary() data = self.binary.data else: if (not serialization_format): serialization_format = self.repo.default_serialization data = self.rdf.graph.serialize(format=serialization_format) logger.debug('Serialized graph used for resource creation:') logger.debug(data.decode('utf-8')) self.headers['Content-Type'] = serialization_format response = self.repo.api.http_request(verb, self.uri, data=data, headers=self.headers, stream=stream) return self._handle_create(response, ignore_tombstone, auto_refresh)
Primary method to create resources. Args: specify_uri (bool): If True, uses PUT verb and sets the URI during creation. If False, uses POST and gets repository minted URI ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry serialization_format(str): Content-Type header / mimetype that will be used to serialize self.rdf.graph, and set headers for PUT/POST requests auto_refresh (bool): If True, refreshes resource after update. If left None, defaults to repo.default_auto_refresh
codesearchnet
def single_qubit_state_tomography(sampler: sim.Sampler, qubit: devices.GridQubit, circuit: circuits.Circuit, repetitions: int=1000) -> TomographyResult: circuit_z = (circuit + circuits.Circuit.from_ops(ops.measure(qubit, key='z'))) results = sampler.run(circuit_z, repetitions=repetitions) rho_11 = np.mean(results.measurements['z']) rho_00 = (1.0 - rho_11) circuit_x = circuits.Circuit.from_ops(circuit, (ops.X(qubit) ** 0.5), ops.measure(qubit, key='z')) results = sampler.run(circuit_x, repetitions=repetitions) rho_01_im = (np.mean(results.measurements['z']) - 0.5) circuit_y = circuits.Circuit.from_ops(circuit, (ops.Y(qubit) ** (- 0.5)), ops.measure(qubit, key='z')) results = sampler.run(circuit_y, repetitions=repetitions) rho_01_re = (0.5 - np.mean(results.measurements['z'])) rho_01 = (rho_01_re + (1j * rho_01_im)) rho_10 = np.conj(rho_01) rho = np.array([[rho_00, rho_01], [rho_10, rho_11]]) return TomographyResult(rho)
Single-qubit state tomography. The density matrix of the output state of a circuit is measured by first doing projective measurements in the z-basis, which determine the diagonal elements of the matrix. A X/2 or Y/2 rotation is then added before the z-basis measurement, which determines the imaginary and real parts of the off-diagonal matrix elements, respectively. See Vandersypen and Chuang, Rev. Mod. Phys. 76, 1037 for details. Args: sampler: The quantum engine or simulator to run the circuits. qubit: The qubit under test. circuit: The circuit to execute on the qubit before tomography. repetitions: The number of measurements for each basis rotation. Returns: A TomographyResult object that stores and plots the density matrix.
codesearchnet
def ParseZeitgeistEventRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = ZeitgeistActivityEventData() event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.subject_uri = self._GetRowValue(query_hash, row, 'subj_uri') timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_UNKNOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a zeitgeist event row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def from_file(cls, jss, filename): tree = ElementTree.parse(filename) root = tree.getroot() return cls(jss, root)
Create a new JSSObject from an external XML file. Args: jss: A JSS object. filename: String path to an XML file.
juraj-google-style
def _copy_delpoy_scripts(self, scripts): if (not os.path.exists(self.paths.scripts())): os.makedirs(self.paths.scripts()) new_scripts = [] for script in scripts: script = os.path.expandvars(script) if (not os.path.exists(script)): raise RuntimeError(('Script %s does not exist' % script)) sanitized_name = script.replace('/', '_') new_script_cur_path = os.path.expandvars(self.paths.scripts(sanitized_name)) shutil.copy(script, new_script_cur_path) new_script_init_path = os.path.join('$LAGO_PREFIX_PATH', os.path.basename(self.paths.scripts()), sanitized_name) new_scripts.append(new_script_init_path) return new_scripts
Copy the given deploy scripts to the scripts dir in the prefix Args: scripts(list of str): list of paths of the scripts to copy to the prefix Returns: list of str: list with the paths to the copied scripts, with a prefixed with $LAGO_PREFIX_PATH so the full path is not hardcoded
codesearchnet
def setup(template, version=None): temple.check.is_git_ssh_path(template) temple.check.not_in_git_repo() repo_path = temple.utils.get_repo_path(template) msg = 'You will be prompted for the parameters of your new project. Please read the docs at https: print(msg) (cc_repo_dir, config) = temple.utils.get_cookiecutter_config(template, version=version) if (not version): with temple.utils.cd(cc_repo_dir): ret = temple.utils.shell('git rev-parse HEAD', stdout=subprocess.PIPE) version = ret.stdout.decode('utf-8').strip() _generate_files(repo_dir=cc_repo_dir, config=config, template=template, version=version)
Sets up a new project from a template Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'setup' during the duration of this function. Args: template (str): The git SSH path to a template version (str, optional): The version of the template to use when updating. Defaults to the latest version
codesearchnet
def create_bird_config_files(bird_configuration): for ip_version in bird_configuration: config_file = bird_configuration[ip_version]['config_file'] try: touch(config_file) except OSError as exc: raise ValueError("failed to create {f}:{e}" .format(f=config_file, e=exc)) if bird_configuration[ip_version]['keep_changes']: history_dir = os.path.join(os.path.dirname(config_file), 'history') try: os.mkdir(history_dir) except FileExistsError: pass except OSError as exc: raise ValueError("failed to make directory {d} for keeping a " "history of changes for {b}:{e}" .format(d=history_dir, b=config_file, e=exc)) else: print("{d} is created".format(d=history_dir))
Create bird configuration files per IP version. Creates bird configuration files if they don't exist. It also creates the directories where we store the history of changes, if this functionality is enabled. Arguments: bird_configuration (dict): A dictionary with settings for bird. Returns: None Raises: ValueError if we can't create bird configuration files and the directory to store the history of changes in bird configuration file.
juraj-google-style
def get_videos_for_course(course_id, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None): return _get_videos_for_filter( {'courses__course_id': six.text_type(course_id), 'courses__is_hidden': False}, sort_field, sort_dir, pagination_conf, )
Returns an iterator of videos for the given course id. Args: course_id (String) sort_field (VideoSortField) sort_dir (SortDirection) Returns: A generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order.
juraj-google-style
def call(self, inputs, state): original_shape = inputs.shape if (len(original_shape) < 2): inputs = tf.reshape(inputs, [1, (- 1)]) (out, state) = self.lstm_cell(inputs, state) out = self.output_layer(out) correct_shape = tf.concat((original_shape[:(- 1)], tf.shape(input=out)[(- 1):]), 0) out = tf.reshape(out, correct_shape) loc = out[(..., :self.dimensions)] scale_diag = (tf.nn.softplus(out[(..., self.dimensions:)]) + 1e-05) return (tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag), state)
Runs the model to generate a distribution for a single timestep. This generates a batched MultivariateNormalDiag distribution using the output of the recurrent model at the current timestep to parameterize the distribution. Args: inputs: The sampled value of `z` at the previous timestep, i.e., `z_{t-1}`, of shape [..., dimensions]. `z_0` should be set to the empty matrix. state: A tuple containing the (hidden, cell) state. Returns: A tuple of a MultivariateNormalDiag distribution, and the state of the recurrent function at the end of the current timestep. The distribution will have event shape [dimensions], batch shape [...], and sample shape [sample_shape, ..., dimensions].
codesearchnet
def __init__(self, *rows, **kwargs): if not all([isinstance(r, Row) for r in rows]): raise TypeError('All elements of Grid must be Row instances') self.type = 'grid' self.rows = rows
Init method. Args: *rows (): the instances of Row. **kwargs (): not used.
juraj-google-style
def get(self, url, params={}, headers={}, auth=(), certificate_path=None): certificate_path = (certificate_path if certificate_path else False) return self.session.get(url, params=params, headers=headers, verify=certificate_path, auth=auth, timeout=self.timeout)
Returns the response payload from the request to the given URL. Args: url (str): The URL for the WEB API that the request is being made too. params (dict): Dictionary containing the query string parameters. headers (dict): HTTP Headers that may be needed for the request. auth (tuple): User ID and password for Basic Auth certificate_path (str): Path to the ssl certificate. Returns: response: (HttpResponse): Response object from requests.get api request
codesearchnet
def from_csv(input_csv, headers=None, schema_file=None): if (headers is not None): names = headers elif (schema_file is not None): with _util.open_local_or_gcs(schema_file, mode='r') as f: schema = json.load(f) names = [x['name'] for x in schema] else: raise ValueError('Either headers or schema_file is needed') all_files = _util.glob_files(input_csv) all_df = [] for file_name in all_files: with _util.open_local_or_gcs(file_name, mode='r') as f: all_df.append(pd.read_csv(f, names=names)) df = pd.concat(all_df, ignore_index=True) if (('target' not in df) or ('predicted' not in df)): raise ValueError('Cannot find "target" or "predicted" column') labels = sorted((set(df['target']) | set(df['predicted']))) cm = confusion_matrix(df['target'], df['predicted'], labels=labels) return ConfusionMatrix(cm, labels)
Create a ConfusionMatrix from a csv file. Args: input_csv: Path to a Csv file (with no header). Can be local or GCS path. headers: Csv headers. If present, it must include 'target' and 'predicted'. schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None. If present, it must include 'target' and 'predicted' columns. Returns: A ConfusionMatrix that can be plotted. Raises: ValueError if both headers and schema_file are None, or it does not include 'target' or 'predicted' columns.
codesearchnet
def create_ingress_rule(self, app, rule): if isinstance(rule, dict): start_port = rule.get('start_port') end_port = rule.get('end_port') protocol = rule.get('protocol', 'tcp') requested_cross_account = rule.get('env', self.env) if (self.env == requested_cross_account): cross_account_env = None cross_account_vpc_id = None else: cross_account_env = requested_cross_account cross_account_vpc_id = get_vpc_id(cross_account_env, self.region) else: start_port = rule end_port = rule protocol = 'tcp' cross_account_env = None cross_account_vpc_id = None created_rule = {'app': app, 'start_port': start_port, 'end_port': end_port, 'protocol': protocol, 'cross_account_env': cross_account_env, 'cross_account_vpc_id': cross_account_vpc_id} self.log.debug('Normalized ingress rule: %s', created_rule) return created_rule
Create a normalized ingress rule. Args: app (str): Application name rule (dict or int): Allowed Security Group ports and protocols. Returns: dict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id
codesearchnet
def GetUsername(self, event, default_username='-'): username = getattr(event, 'username', None) if (username and (username != '-')): return username session_identifier = event.GetSessionIdentifier() if (session_identifier is None): return default_username user_sid = getattr(event, 'user_sid', None) username = self._knowledge_base.GetUsernameByIdentifier(user_sid, session_identifier=session_identifier) return (username or default_username)
Retrieves the username related to the event. Args: event (EventObject): event. default_username (Optional[str]): default username. Returns: str: username.
codesearchnet
def GetScripts(self, dest_dir): metadata_dict = self.watcher.GetMetadata() or {} try: instance_data = metadata_dict['instance']['attributes'] except KeyError: instance_data = None self.logger.warning('Instance attributes were not found.') try: project_data = metadata_dict['project']['attributes'] except KeyError: project_data = None self.logger.warning('Project attributes were not found.') return (self._GetAttributeScripts(instance_data, dest_dir) or self._GetAttributeScripts(project_data, dest_dir))
Retrieve the scripts to execute. Args: dest_dir: string, the path to a directory for storing metadata scripts. Returns: dict, a dictionary mapping set metadata keys with associated scripts.
juraj-google-style
def _build(self, inputs): if nest.is_sequence(inputs): merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)] return nest.pack_sequence_as(inputs, merged_tensors) return self._merge(inputs)
Connects the MergeDims module into the graph. Args: inputs: Tensor or a nested list of Tensors to merge. Its rank must be greater than or equal to `start` + `size`. Returns: The merged Tensor or a nested list of merged Tensors. Raises: ValueError: If any of the `inputs` tensors has insufficient rank.
codesearchnet
def __init__(self, asynchronous_correlation_value=None): super(PollRequestPayload, self).__init__( enums.Tags.REQUEST_PAYLOAD ) self._asynchronous_correlation_value = None self.asynchronous_correlation_value = asynchronous_correlation_value
Construct a Poll request payload struct. Args: asynchronous_correlation_value (bytes): The ID of a pending operation to poll the status of, in bytes. Optional, defaults to None.
juraj-google-style
def unpackVersion(ver): major = ((ver >> (20 * 2)) & mask20) minor = ((ver >> 20) & mask20) patch = (ver & mask20) return (major, minor, patch)
Unpack a system normalized integer representing a softare version into its component parts. Args: ver (int): System normalized integer value to unpack into a tuple. Returns: (int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.
codesearchnet
def __init__(self, cache_folder, genome_build): self.cache = EnsemblCache(cache_folder, genome_build) self.prior_time = time.time() - 1 self.rate_limit = 0.067 server_dict = {"grch37": "grch37.", "grch38": ""} self.server = "http: self.check_ensembl_api_version()
obtain the sequence for a transcript from ensembl Args: cache_folder: path to folder for caching data requested from Ensembl genome_build: string indicating the genome build ("grch37" or "grch38")
juraj-google-style
def parse_cytoband(lines): cytobands = {} for line in lines: line = line.rstrip() splitted_line = line.split('\t') chrom = splitted_line[0].lstrip('chr') start = int(splitted_line[1]) stop = int(splitted_line[2]) name = splitted_line[3] if chrom in cytobands: cytobands[chrom][start:stop] = name else: new_tree = intervaltree.IntervalTree() new_tree[start:stop] = name cytobands[chrom] = new_tree return cytobands
Parse iterable with cytoband coordinates Args: lines(iterable): Strings on format "chr1\t2300000\t5400000\tp36.32\tgpos25" Returns: cytobands(dict): Dictionary with chromosome names as keys and interval trees as values
juraj-google-style
def check_phonefy(self, query, kwargs={}): data = self.launchQueryForMode(query=query, mode="phonefy") if self._somethingFound(data, mode="phonefy"): return data return None
Verifying a mailfy query in this platform. This might be redefined in any class inheriting from Platform. The only condition is that any of this should return a dictionary as defined. Args: ----- query: The element to be searched. kwargs: Dictionary with extra parameters. Just in case. Return: ------- Returns the collected data if exists or None if not.
juraj-google-style
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu): strides = [1] + strides + [1] rates = [1] + rates + [1] with self.cached_session(use_gpu=use_gpu): out_tensor = nn_ops.erosion2d(constant_op.constant(image), constant_op.constant(kernel), strides=strides, rates=rates, padding=padding, name='erosion2d') self.assertAllClose(out, self.evaluate(out_tensor))
Verifies the output values of the erosion function. Args: image: Input tensor with shape: [batch, in_height, in_width, channels]. kernel: Filter tensor with shape: [filter_height, filter_width, channels]. strides: Output strides, specified as [stride_height, stride_width]. rates: Atrous rates, specified as [rate_height, rate_width]. padding: Padding type. out: Expected output. use_gpu: Whether we are running on GPU.
github-repos
def export_as_file(self, file_path, cv_source): if os.path.exists(file_path): raise exceptions.UserError('{} already exists'.format(file_path)) with open(file_path, 'wb') as f: f.write(self.export_as_code(cv_source).encode('utf8'))
Export the ensemble as a single Python file and saves it to `file_path`. This is EXPERIMENTAL as putting different modules together would probably wreak havoc especially on modules that make heavy use of global variables. Args: file_path (str, unicode): Absolute/local path of place to save file in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features.
juraj-google-style
def iter_package_families(paths=None): for path in (paths or config.packages_path): repo = package_repository_manager.get_repository(path) for resource in repo.iter_package_families(): (yield PackageFamily(resource))
Iterate over package families, in no particular order. Note that multiple package families with the same name can be returned. Unlike packages, families later in the searchpath are not hidden by earlier families. Args: paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: `PackageFamily` iterator.
codesearchnet
def add_table(self, table): import astropy table_array = table.__array__() self.table_keys= table.keys() table_columns= [] for i in range(0,len(table.columns[0])): row_data = [] for item in table_array[i]: if isinstance(item, bytes): row_data.append(item.decode('utf-8')) else: row_data.append(item) table_columns.append(row_data) self.table_columns = table_columns self.table_flag= not self.table_flag
load a VOTable -already accessible on the python side- into the widget Args: table: votable object
juraj-google-style
def generate(self, model_len=None, model_width=None): if model_len is None: model_len = Constant.MODEL_LEN if model_width is None: model_width = Constant.MODEL_WIDTH if isinstance(model_width, list) and not len(model_width) == model_len: raise ValueError("The length of 'model_width' does not match 'model_len'") elif isinstance(model_width, int): model_width = [model_width] * model_len graph = Graph(self.input_shape, False) output_node_id = 0 n_nodes_prev_layer = self.input_shape[0] for width in model_width: output_node_id = graph.add_layer( StubDense(n_nodes_prev_layer, width), output_node_id ) output_node_id = graph.add_layer( StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id ) output_node_id = graph.add_layer(StubReLU(), output_node_id) n_nodes_prev_layer = width graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id) return graph
Generates a Multi-Layer Perceptron. Args: model_len: An integer. Number of hidden layers. model_width: An integer or a list of integers of length `model_len`. If it is a list, it represents the number of nodes in each hidden layer. If it is an integer, all hidden layers have nodes equal to this value. Returns: An instance of the class Graph. Represents the neural architecture graph of the generated model.
juraj-google-style
def delete_course_completion(self, user_id, payload): return self._delete(urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.completion_status_api_path), payload, self.COMPLETION_PROVIDER_SCOPE)
Delete a completion status previously sent to the Degreed Completion Status endpoint Args: user_id: Unused. payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit) containing the required completion status fields for deletion per Degreed documentation. Returns: A tuple containing the status code and the body of the response. Raises: HTTPError: if we received a failure response code from Degreed
codesearchnet
def _list_all_concrete_functions_for_serialization(self): seen_signatures = [] if self.input_signature is not None: seen_signatures.append((self.input_signature, {})) else: concrete_functions = self._list_all_concrete_functions() for concrete_function in concrete_functions: signature = concrete_function.structured_input_signature flattened = nest.flatten(signature) if any((isinstance(arg, func_graph_module.UnknownArgument) for arg in flattened)): logging.info('Unsupported signature for serialization: %s.', signature) continue equal_to_signature = functools.partial(function_type_utils.is_same_structure, signature, check_values=True) if not any((equal_to_signature(s) for s in seen_signatures)): seen_signatures.append(signature) concrete_functions = [] for args, kwargs in seen_signatures: concrete_functions.append(self.get_concrete_function(*args, **kwargs)) return concrete_functions
Returns all concrete functions for serialization. Returns: A list of instances of `ConcreteFunction`.
github-repos
def whoami(self) -> dict: if not self.access_token: return {} self._try_refresh_access_token() return self.session.get(self.WHOAMI_URL).json()
Returns the basic information about the authenticated character. Obviously doesn't do anything if this Preston instance is not authenticated, so it returns an empty dict. Args: None Returns: character info if authenticated, otherwise an empty dict
juraj-google-style
def clean(self, force: bool=False): with (yield from self._lock): for connection in tuple(self.ready): if force or connection.closed(): connection.close() self.ready.remove(connection)
Clean closed connections. Args: force: Clean connected and idle connections too. Coroutine.
juraj-google-style
def markdown_to_safe_html(markdown_string): warning = '' if isinstance(markdown_string, six.binary_type): markdown_string_decoded = markdown_string.decode('utf-8') markdown_string = markdown_string_decoded.replace(u'\x00', u'') num_null_bytes = (len(markdown_string_decoded) - len(markdown_string)) if num_null_bytes: warning = ('<!-- WARNING: discarded %d null bytes in markdown string after UTF-8 decoding -->\n' % num_null_bytes) string_html = markdown.markdown(markdown_string, extensions=['markdown.extensions.tables']) string_sanitized = bleach.clean(string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES) return (warning + string_sanitized)
Convert Markdown to HTML that's safe to splice into the DOM. Arguments: markdown_string: A Unicode string or UTF-8--encoded bytestring containing Markdown source. Markdown tables are supported. Returns: A string containing safe HTML.
codesearchnet
def __intervals_from_tops(self, tops, values, basis, components, field=None, ignore_nan=True): length = float(basis.size) start, stop = basis[0], basis[-1] tops = [start + (p/(length-1)) * (stop-start) for p in tops] bases = tops[1:] + [stop] list_of_Intervals = [] for i, t in enumerate(tops): v, c, d = values[i], [], {} if ignore_nan and np.isnan(v): continue if (field is not None): d = {field: v} if components is not None: try: c = [deepcopy(components[int(v)])] except IndexError: c = [] if c and (c[0] is None): c = [] interval = Interval(t, bases[i], data=d, components=c) list_of_Intervals.append(interval) return list_of_Intervals
Private method. Take a sequence of tops in an arbitrary dimension, and provide a list of intervals from which a striplog can be made. This is only intended to be used by ``from_image()``. Args: tops (iterable). A list of floats. values (iterable). A list of values to look up. basis (iterable). A list of components. components (iterable). A list of Components. Returns: List. A list of Intervals.
juraj-google-style
def __str__(self): name = self.__class__.__name__ return '%s(Handle %d, Address %d)' % (name, self.Handle, self.Addr)
Returns a formatted string describing the breakpoint. Args: self (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instance Returns: Stirng representation of the breakpoint.
juraj-google-style
def put(self, rid, data, raise_on_error=True): response_data = None headers = {'Content-Type': 'application/json', 'DB-Method': 'PUT'} url = '/v2/exchange/db/{}/{}/{}'.format(self.domain, self.data_type, rid) r = self.tcex.session.post(url, json=data, headers=headers) self.tcex.log.debug('datastore put status code: {}'.format(r.status_code)) if r.ok and 'application/json' in r.headers.get('content-type', ''): response_data = r.json() else: error = r.text or r.reason self.tcex.handle_error(805, ['put', r.status_code, error], raise_on_error) return response_data
Update the data for the provided Id. Args: rid (str): The record identifier. data (dict): A search query raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response.
juraj-google-style
def sunrise(self, date=None, zenith=None): return (segment.sunrise(date, zenith) for segment in self)
Calculate sunrise times for locations. Args: date (datetime.date): Calculate rise or set for given date zenith (str): Calculate sunrise events, or end of twilight Returns: list of list of datetime.datetime: The time for the sunrise for each point in each segment
codesearchnet
def clean_df(df, header=None, **read_csv_kwargs): df = read_csv(df, header=header, **read_csv_kwargs) df = df.fillna(' ') for col in df.columns: df[col] = df[col].apply(unicode2ascii) return df
Convert UTF8 characters in a CSV file or dataframe into ASCII Args: df (DataFrame or str): DataFrame or path or url to CSV
juraj-google-style
def register_handler(self, handler, event_name, args): if self.started: raise IllegalStateError("Can't register service after polling is started") self.lock.acquire() try: if (event_name in self.handlers): raise DuplicateError('A handler for {} already exists'.format(event_name)) self.handlers[event_name] = (handler, args) finally: self.lock.release()
Registers an event handler. One type of event can only have one event handler associated with it. Args: handler: The event handler function to be registered. event_name: Name of the event the handler is for. args: User arguments to be passed to the handler when it's called. Raises: IllegalStateError: Raised if attempts to register a handler after the dispatcher starts running. DuplicateError: Raised if attempts to register more than one handler for one type of event.
codesearchnet
def change_extension(self, filepath, new_extension): (filename, ext) = os.path.splitext(filepath) return '.'.join([filename, new_extension])
Change final filename extension. Args: filepath (str): A file path (relative or absolute). new_extension (str): New extension name (without leading dot) to apply. Returns: str: Filepath with new extension.
codesearchnet
def from_json(data): memfiles = InMemoryFiles() memfiles.files = json.loads(data) return memfiles
Convert JSON into a in memory file storage. Args: data (str): valid JSON with path and filenames and the base64 encoding of the file content. Returns: InMemoryFiles: in memory file storage
juraj-google-style
def write(self, name, **data): data['name'] = name if (not ('timestamp' in data)): data['timestamp'] = datetime.utcnow() try: self.producer.send(topic=self.topic, value=data) self.producer.flush() except (KafkaTimeoutError, NoBrokersAvailable) as exc: logger.warning('writing metric %r failure %r', data, exc)
Write the metric to kafka Args: name (str): The name of the metric to write data (dict): Additional data to store with the metric
codesearchnet
def tap(self, locator, x_offset=None, y_offset=None, count=1): driver = self._current_application() el = self._element_find(locator, True, True) action = TouchAction(driver) action.tap(el,x_offset,y_offset, count).perform()
Tap element identified by ``locator``. Args: - ``x_offset`` - (optional) x coordinate to tap, relative to the top left corner of the element. - ``y_offset`` - (optional) y coordinate. If y is used, x must also be set, and vice versa - ``count`` - can be used for multiple times of tap on that element
juraj-google-style
def subscribe_registration_ids_to_topic(self, registration_ids, topic_name): url = 'https: payload = {'to': ('/topics/' + topic_name), 'registration_tokens': registration_ids} response = self.requests_session.post(url, json=payload) if (response.status_code == 200): return True elif (response.status_code == 400): error = response.json() raise InvalidDataError(error['error']) else: raise FCMError()
Subscribes a list of registration ids to a topic Args: registration_ids (list): ids to be subscribed topic_name (str): name of topic Returns: True: if operation succeeded Raises: InvalidDataError: data sent to server was incorrectly formatted FCMError: an error occured on the server
codesearchnet
def to_dataframe(self, start_row=0, max_rows=None): fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows) count = 0 page_token = None df = None while True: page_rows, page_token = fetcher(page_token, count) if len(page_rows): count += len(page_rows) if df is None: df = pandas.DataFrame.from_records(page_rows) else: df = df.append(page_rows, ignore_index=True) if not page_token: break ordered_fields = [field.name for field in self.schema] return df[ordered_fields] if df is not None else pandas.DataFrame()
Exports the table to a Pandas dataframe. Args: start_row: the row of the table at which to start the export (default 0) max_rows: an upper limit on the number of rows to export (default None) Returns: A Pandas dataframe containing the table data.
juraj-google-style
def is_value_type_valid_for_exact_conditions(self, value): if (isinstance(value, string_types) or isinstance(value, (numbers.Integral, float))): return True return False
Method to validate if the value is valid for exact match type evaluation. Args: value: Value to validate. Returns: Boolean: True if value is a string, boolean, or number. Otherwise False.
codesearchnet
def get_end_time_metric(result: PipelineResult, namespace: str, name: str) -> int: distributions = result.metrics().query(MetricsFilter().with_namespace(namespace).with_name(name))['distributions'] max_list = list(map(lambda m: m.result.max, distributions)) return max(max_list) if len(max_list) > 0 else -1
get the end time out of all times recorded by the specified distribution metric Args: result: the PipelineResult which metrics are read from namespace: a string representing the namespace of wanted metric name: a string representing the name of the wanted metric Returns: the largest time in the metric or -1 if it doesn't exist
github-repos
def as_tuning_range(self, name): return {'Name': name, 'MinValue': to_str(self.min_value), 'MaxValue': to_str(self.max_value), 'ScalingType': self.scaling_type}
Represent the parameter range as a dicionary suitable for a request to create an Amazon SageMaker hyperparameter tuning job. Args: name (str): The name of the hyperparameter. Returns: dict[str, str]: A dictionary that contains the name and values of the hyperparameter.
juraj-google-style
def clean_channel_worker_username(self): channel_worker_username = self.cleaned_data['channel_worker_username'].strip() try: User.objects.get(username=channel_worker_username) except User.DoesNotExist: raise ValidationError(ValidationMessages.INVALID_CHANNEL_WORKER.format(channel_worker_username=channel_worker_username)) return channel_worker_username
Clean enterprise channel worker user form field Returns: str: the cleaned value of channel user username for transmitting courses metadata.
codesearchnet
def _build_trial_meta(cls, expr_dir): meta_file = os.path.join(expr_dir, EXPR_META_FILE) meta = parse_json(meta_file) if not meta: job_id = expr_dir.split("/")[-2] trial_id = expr_dir[-8:] params = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE)) meta = { "trial_id": trial_id, "job_id": job_id, "status": "RUNNING", "type": "TUNE", "start_time": os.path.getctime(expr_dir), "end_time": None, "progress_offset": 0, "result_offset": 0, "params": params } if not meta.get("start_time", None): meta["start_time"] = os.path.getctime(expr_dir) if isinstance(meta["start_time"], float): meta["start_time"] = timestamp2date(meta["start_time"]) if meta.get("end_time", None): meta["end_time"] = timestamp2date(meta["end_time"]) meta["params"] = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE)) return meta
Build meta file for trial. Args: expr_dir (str): Directory path of the experiment. Return: A dict of trial meta info.
juraj-google-style
def parse_expression(src): src = STANDARD_PREAMBLE + src.strip() node = parse(src, preamble_len=STANDARD_PREAMBLE_LEN, single_node=True) if __debug__: if not isinstance(node, gast.Expr): raise ValueError('expected exactly one node of type Expr, got {}'.format(node)) return node.value
Returns the AST of given identifier. Args: src: A piece of code that represents a single Python expression Returns: A gast.AST object. Raises: ValueError: if src does not consist of a single Expression.
github-repos
def transpose(vari): if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = transpose(core[key]) return Poly(core, vari.dim, vari.shape[::-1], vari.dtype) return numpy.transpose(vari)
Transpose a shapeable quantety. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Quantety of interest. Returns: (chaospy.poly.base.Poly, numpy.ndarray): Same type as ``vari``. Examples: >>> P = chaospy.reshape(chaospy.prange(4), (2,2)) >>> print(P) [[1, q0], [q0^2, q0^3]] >>> print(chaospy.transpose(P)) [[1, q0^2], [q0, q0^3]]
juraj-google-style
def read_nose(in_file): suites = {} doc_xml = minidom.parse(in_file) suite_xml = doc_xml.getElementsByTagName('testsuite')[0] for case_xml in suite_xml.getElementsByTagName('testcase'): classname = case_xml.getAttribute('classname') if (classname not in suites): suites[classname] = [] case = {'name': case_xml.getAttribute('name'), 'time': float(case_xml.getAttribute('time'))} skipped_xml = case_xml.getElementsByTagName('skipped') if skipped_xml: if skipped_xml[0].hasAttribute('type'): type = skipped_xml[0].getAttribute('type') else: type = '' case['skipped'] = {'type': type, 'message': skipped_xml[0].getAttribute('message'), 'text': ''.join([child.nodeValue for child in skipped_xml[0].childNodes])} failure_xml = case_xml.getElementsByTagName('failure') if failure_xml: if failure_xml[0].hasAttribute('type'): type = failure_xml[0].getAttribute('type') else: type = '' case['failure'] = {'type': type, 'message': failure_xml[0].getAttribute('message'), 'text': ''.join([child.nodeValue for child in failure_xml[0].childNodes])} error_xml = case_xml.getElementsByTagName('error') if error_xml: if error_xml[0].hasAttribute('type'): type = error_xml[0].getAttribute('type') else: type = '' case['error'] = {'type': type, 'message': error_xml[0].getAttribute('message'), 'text': ''.join([child.nodeValue for child in error_xml[0].childNodes])} suites[classname].append(case) return suites
Parse nose-style test reports into a `dict` Args: in_file (:obj:`str`): path to nose-style test report Returns: :obj:`dict`: dictionary of test suites
codesearchnet
def add(self, index): if ((index - self.flush_at) < self.interval): return now = time.time() elapsed = (now - self.lap) elapsed_total = (now - self.start) it = (index - self.flush_at) self.lap = now if self.verbose: logger.info('iter={} {{{}}}={}[sec/{}iter] {}[sec]'.format(index, self.name, elapsed, it, elapsed_total)) if (self.fd is not None): print('{} {} {} {}'.format(index, elapsed, it, elapsed_total), file=self.fd) self.flush_at = index
Calculate time elapsed from the point previously called this method or this object is created to this is called. Args: index (int): Index to be displayed, and be used to take intervals.
codesearchnet
def _convert_and_export_metrics(self, convert_func, *args, **kwargs): self._increase_conversion_attempt_metric() self._save_conversion_params_metric() start_time = time.process_time() result = convert_func(self, *args, **kwargs) elapsed_time_ms = (time.process_time() - start_time) * 1000 if result: self._increase_conversion_success_metric() self._set_conversion_latency_metric(round(elapsed_time_ms)) self._tflite_metrics.export_metrics() if self.exclude_conversion_metadata or self._experimental_use_buffer_offset: return result model_object = flatbuffer_utils.convert_bytearray_to_object(result) if _check_model_use_buffer_offset(model_object): return result sparsity_modes = _get_sparsity_modes(model_object) model_hash = _get_model_hash(model_object) self._metadata.options.modelOptimizationModes.extend(sparsity_modes) self._metadata.environment.modelHash = model_hash model_object = _populate_conversion_metadata(model_object, self._metadata) return flatbuffer_utils.convert_object_to_bytearray(model_object)
Wraps around convert function to export metrics. Args: convert_func: The convert function to wrap. *args: Positional arguments of the convert function. **kwargs: The keyword arguments of the convert function. Returns: The decorator to wrap the convert function.
github-repos
def values(self, *args): return [dict(zip(args, values_list)) for values_list in self.values_list(flatten=False, *args)]
Returns list of dicts (field names as keys) for given fields. Args: \*args: List of fields to be returned as dict. Returns: list of dicts for given fields. Example: >>> Person.objects.filter(age__gte=16, name__startswith='jo').values('name', 'lastname')
juraj-google-style
def _get_path_for_op_id(self, id: str) -> Optional[str]: for path_key, path_value in self._get_spec()['paths'].items(): for method in self.METHODS: if method in path_value: if self.OPERATION_ID_KEY in path_value[method]: if path_value[method][self.OPERATION_ID_KEY] == id: return path_key return None
Searches the spec for a path matching the operation id. Args: id: operation id Returns: path to the endpoint, or None if not found
juraj-google-style
def guid(valu=None): if valu is None: return binascii.hexlify(os.urandom(16)).decode('utf8') byts = s_msgpack.en(valu) return hashlib.md5(byts).hexdigest()
Get a 16 byte guid value. By default, this is a random guid value. Args: valu: Object used to construct the guid valu from. This must be able to be msgpack'd. Returns: str: 32 character, lowercase ascii string.
juraj-google-style
def get_preprocessor(model_name: str) -> Optional[Union['AutoTokenizer', 'AutoFeatureExtractor', 'AutoProcessor']]: from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer try: return AutoProcessor.from_pretrained(model_name) except (ValueError, OSError, KeyError): tokenizer, feature_extractor = (None, None) try: tokenizer = AutoTokenizer.from_pretrained(model_name) except (OSError, KeyError): pass try: feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) except (OSError, KeyError): pass if tokenizer is not None and feature_extractor is not None: raise ValueError(f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor.") elif tokenizer is None and feature_extractor is None: return None elif tokenizer is not None: return tokenizer else: return feature_extractor
Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. Args: model_name (`str`): Name of the model for which a preprocessor are loaded. Returns: `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns `None` if no preprocessor is found.
github-repos
def _maybe_repeat(self, x): if isinstance(x, list): assert len(x) == self.n return x else: return [x] * self.n
Utility function for processing arguments that are singletons or lists. Args: x: either a list of self.n elements, or not a list. Returns: a list of self.n elements.
juraj-google-style
def __set_unkown_effect(self, hgvs_string): unknown_effect_list = ['?', '(=)', '='] if hgvs_string in unknown_effect_list: self.unknown_effect = True elif "(" in hgvs_string: self.unknown_effect = True else: self.unknown_effect = False if "?" in hgvs_string: self.is_missing_info = True else: self.is_missing_info = False
Sets a flag for unkown effect according to HGVS syntax. The COSMIC database also uses unconventional questionmarks to denote missing information. Args: hgvs_string (str): hgvs syntax with "p." removed
juraj-google-style
def CreateKey(self, private_key=None): if (private_key is None): private_key = bytes(Random.get_random_bytes(32)) key = KeyPair(priv_key=private_key) self._keys[key.PublicKeyHash.ToBytes()] = key return key
Create a KeyPair Args: private_key (iterable_of_ints): (optional) 32 byte private key Returns: KeyPair: a KeyPair instance
codesearchnet
def transform_tensor(self, tensor): dim = tensor.shape rank = len(dim) assert all([i == 3 for i in dim]) lc = string.ascii_lowercase indices = lc[:rank], lc[rank:2 * rank] einsum_string = ','.join([a + i for a, i in zip(*indices)]) einsum_string += ',{}->{}'.format(*indices[::-1]) einsum_args = [self.rotation_matrix] * rank + [tensor] return np.einsum(einsum_string, *einsum_args)
Applies rotation portion to a tensor. Note that tensor has to be in full form, not the Voigt form. Args: tensor (numpy array): a rank n tensor Returns: Transformed tensor.
juraj-google-style
def createResourceMapFromStream(in_stream, base_url=d1_common.const.URL_DATAONE_ROOT): pids = [] for line in in_stream: pid = line.strip() if ((pid == ' continue if (len(pids) < 2): raise ValueError('Insufficient numbers of identifiers provided.') logging.info('Read {} identifiers'.format(len(pids))) ore = ResourceMap(base_url=base_url) logging.info('ORE PID = {}'.format(pids[0])) ore.initialize(pids[0]) logging.info('Metadata PID = {}'.format(pids[1])) ore.addMetadataDocument(pids[1]) ore.addDataDocuments(pids[2:], pids[1]) return ore
Create a simple OAI-ORE Resource Map with one Science Metadata document and any number of Science Data objects, using a stream of PIDs. Args: in_stream: The first non-blank line is the PID of the resource map itself. Second line is the science metadata PID and remaining lines are science data PIDs. Example stream contents: :: PID_ORE_value sci_meta_pid_value data_pid_1 data_pid_2 data_pid_3 base_url : str Root of the DataONE environment in which the Resource Map will be used. Returns: ResourceMap : OAI-ORE Resource Map
codesearchnet
def from_rotation_and_translation_and_time_reversal(rotation_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), translation_vec=(0, 0, 0), time_reversal=1, tol=0.1): symmop = SymmOp.from_rotation_and_translation(rotation_matrix=rotation_matrix, translation_vec=translation_vec, tol=tol) return MagSymmOp.from_symmop(symmop, time_reversal)
Creates a symmetry operation from a rotation matrix, translation vector and time reversal operator. Args: rotation_matrix (3x3 array): Rotation matrix. translation_vec (3x1 array): Translation vector. time_reversal (int): Time reversal operator, +1 or -1. tol (float): Tolerance to determine if rotation matrix is valid. Returns: MagSymmOp object
codesearchnet
def _ParsePropertiesXMLFile(self, xml_data): xml_root = ElementTree.fromstring(xml_data) properties = {} for xml_element in xml_root.iter(): if (not xml_element.text): continue (_, _, name) = xml_element.tag.partition('}') if (name == 'lpstr'): continue property_name = self._PROPERTY_NAMES.get(name, None) if (not property_name): property_name = self._FormatPropertyName(name) properties[property_name] = xml_element.text return properties
Parses a properties XML file. Args: xml_data (bytes): data of a _rels/.rels XML file. Returns: dict[str, object]: properties. Raises: zipfile.BadZipfile: if the properties XML file cannot be read.
codesearchnet
def __eq__(self, rhs): return (isinstance(rhs, MockMethod) and self._name == rhs._name and self._params == rhs._params and self._named_params == rhs._named_params)
Test whether this MockMethod is equivalent to another MockMethod. Args: # rhs: the right hand side of the test rhs: MockMethod
juraj-google-style
def load_glossary(file_path: str, read_json=False) -> List[str]: if read_json: if file_path.endswith('.gz'): return json.load(gzip.open(file_path)) return json.load(open(file_path)) return open(file_path).read().splitlines()
A glossary is a text file, one entry per line. Args: file_path (str): path to a text file containing a glossary. read_json (bool): set True if the glossary is in json format Returns: List of the strings in the glossary.
codesearchnet
def wait(fs, timeout=None, return_when=ALL_COMPLETED): with _AcquireFutures(fs): done = set((f for f in fs if (f._state in [CANCELLED_AND_NOTIFIED, FINISHED]))) not_done = (set(fs) - done) if ((return_when == FIRST_COMPLETED) and done): return DoneAndNotDoneFutures(done, not_done) elif ((return_when == FIRST_EXCEPTION) and done): if any((f for f in done if ((not f.cancelled()) and (f.exception() is not None)))): return DoneAndNotDoneFutures(done, not_done) if (len(done) == len(fs)): return DoneAndNotDoneFutures(done, not_done) waiter = _create_and_install_waiters(fs, return_when) waiter.event.wait(timeout) for f in fs: with f._condition: f._waiters.remove(waiter) done.update(waiter.finished_futures) return DoneAndNotDoneFutures(done, (set(fs) - done))
Wait for the futures in the given sequence to complete. Args: fs: The sequence of Futures (possibly created by different Executors) to wait upon. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. return_when: Indicates when this function should return. The options are: FIRST_COMPLETED - Return when any future finishes or is cancelled. FIRST_EXCEPTION - Return when any future finishes by raising an exception. If no future raises an exception then it is equivalent to ALL_COMPLETED. ALL_COMPLETED - Return when all futures finish or are cancelled. Returns: A named 2-tuple of sets. The first set, named 'done', contains the futures that completed (is finished or cancelled) before the wait completed. The second set, named 'not_done', contains uncompleted futures.
codesearchnet
def ellipse_distance(item_a, time_a, item_b, time_b, max_value): ts = np.array([0, np.pi]) ell_a = item_a.get_ellipse_model(time_a) ell_b = item_b.get_ellipse_model(time_b) ends_a = ell_a.predict_xy(ts) ends_b = ell_b.predict_xy(ts) distances = np.sqrt((((ends_a[(:, 0:1)] - ends_b[(:, 0:1)].T) ** 2) + ((ends_a[(:, 1:)] - ends_b[(:, 1:)].T) ** 2))) return (np.minimum(distances[(0, 1)], max_value) / float(max_value))
Calculate differences in the properties of ellipses fitted to each object. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
codesearchnet
def get_subscribers(object_type: str) -> List[str]: return DB.get_list(_keys.subscribers(object_type))
Get the list of subscribers to events of the object type. Args: object_type (str): Type of object. Returns: List[str], list of subscriber names.
codesearchnet
def expand_recurring(number, repeat=5): if "[" in number: pattern_index = number.index("[") pattern = number[pattern_index + 1:-1] number = number[:pattern_index] number = number + pattern * (repeat + 1) return number
Expands a recurring pattern within a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) repeat: the number of times to expand the pattern. Returns: The original number with recurring pattern expanded. Example: >>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3) (1, '.', 0, 9, 9, 9, 9)
juraj-google-style
def result_code(self, value): if value == self._defaults['resultCode'] and 'resultCode' in self._values: del self._values['resultCode'] else: self._values['resultCode'] = value
The result_code property. Args: value (string). the property value.
juraj-google-style
def __init__(self, name, number, aliases=None, description=None): super(EnumerationValue, self).__init__() self.aliases = aliases or [] self.description = description self.name = name self.number = number
Initializes an enumeration value. Args: name (str): name. number (int): number. aliases (Optional[list[str]]): aliases. description (Optional[str]): description.
juraj-google-style
def __delitem__(self, obj, sync=True): if self._is_item: raise TypeError("This an item of the parent ListNode") list(self._generate_instances()) _lnk_key = None if isinstance(obj, six.string_types): _lnk_key = obj _obj = self.node_dict[obj] elif not isinstance(obj, self.__class__): _lnk_key = obj.key _obj = self.node_dict[obj.key] del self.node_dict[obj.key] else: _obj = obj self.node_stack.remove(_obj) if _lnk_key and sync: rel_name = "%s.%s" % (_obj.__class__.__name__, _obj.get_link()['field']) remote_node_name = self._root_node.get_link(field=rel_name)['reverse'] _lnk_obj = getattr(_obj, _obj.get_link()['field']) getattr(_lnk_obj, remote_node_name).__delitem__(self._root_node.key, sync=False) self._root_node.on_save.append(_lnk_obj.save)
Allow usage of "del" statement on ListNodes with bracket notation. Args: obj: ListNode item or relation key. Raises: TypeError: If it's called on a ListNode item (intstead of ListNode's itself)
juraj-google-style
def parse_rfc3339_utc_string(rfc3339_utc_string): m = re.match('(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):(\\d{2}).?(\\d*)Z', rfc3339_utc_string) if (not m): return None groups = m.groups() if (len(groups[6]) not in (0, 3, 6, 9)): return None g = [int(val) for val in groups[:6]] fraction = groups[6] if (not fraction): micros = 0 elif (len(fraction) == 3): micros = (int(fraction) * 1000) elif (len(fraction) == 6): micros = int(fraction) elif (len(fraction) == 9): micros = int(round((int(fraction) / 1000))) else: assert False, 'Fraction length not 0, 6, or 9: {}'.len(fraction) try: return datetime(g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc) except ValueError as e: assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(rfc3339_utc_string, e)
Converts a datestamp from RFC3339 UTC to a datetime. Args: rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format Returns: A datetime.
codesearchnet
def output(self, filename): info = 'Inheritance\n' if (not self.contracts): return info += (blue('Child_Contract -> ') + green('Immediate_Base_Contracts')) info += green(' [Not_Immediate_Base_Contracts]') for child in self.contracts: info += blue(f) if child.inheritance: immediate = child.immediate_inheritance not_immediate = [i for i in child.inheritance if (i not in immediate)] info += (' -> ' + green(', '.join(map(str, immediate)))) if not_immediate: info += ((', [' + green(', '.join(map(str, not_immediate)))) + ']') info += (green('\n\nBase_Contract -> ') + blue('Immediate_Child_Contracts')) info += blue(' [Not_Immediate_Child_Contracts]') for base in self.contracts: info += green(f) children = list(self._get_child_contracts(base)) if children: immediate = [child for child in children if (base in child.immediate_inheritance)] not_immediate = [child for child in children if (not (child in immediate))] info += (' -> ' + blue(', '.join(map(str, immediate)))) if not_immediate: info += ((', [' + blue(', '.join(map(str, not_immediate)))) + ']') self.info(info)
Output the inheritance relation _filename is not used Args: _filename(string)
codesearchnet
def build_sanitiser_node_dict( cfg, sinks_in_file ): sanitisers = list() for sink in sinks_in_file: sanitisers.extend(sink.sanitisers) sanitisers_in_file = list() for sanitiser in sanitisers: for cfg_node in cfg.nodes: if sanitiser in cfg_node.label: sanitisers_in_file.append(Sanitiser(sanitiser, cfg_node)) sanitiser_node_dict = dict() for sanitiser in sanitisers: sanitiser_node_dict[sanitiser] = list(find_sanitiser_nodes( sanitiser, sanitisers_in_file )) return sanitiser_node_dict
Build a dict of string -> TriggerNode pairs, where the string is the sanitiser and the TriggerNode is a TriggerNode of the sanitiser. Args: cfg(CFG): cfg to traverse. sinks_in_file(list[TriggerNode]): list of TriggerNodes containing the sinks in the file. Returns: A string -> TriggerNode dict.
juraj-google-style
def most_exposes(python_type): _exposes = set() try: do_not_expose = set((python_type.__dir__(object) + ['__slots__', '__module__', '__weakref__'])) empty = python_type.__new__(python_type) except AttributeError: try: _exposes = python_type.__slots__ except AttributeError: pass except TypeError: for _workaround in storable_workarounds: try: _exposes = _workaround(python_type) except (SystemExit, KeyboardInterrupt): raise except: pass else: break else: all_members = empty.__dir__() for attr in all_members: if (attr in do_not_expose): continue try: getattr(empty, attr) except AttributeError as e: if e.args: msg = e.args[0] if ((msg == attr) or msg.endswith("' object has no attribute '{}'".format(attr))): _exposes.add(attr) except (SystemExit, KeyboardInterrupt): raise except: pass for attr in ('__dict__',): if (attr in all_members): _exposes.add(attr) return list(_exposes)
Core engine for the automatic generation of storable instances. Finds the attributes exposed by the objects of a given type. Mostly Python3-only. Does not handle types which `__new__` method requires extra arguments either. Arguments: python_type (type): object type. Returns: list: attributes exposed.
codesearchnet
def InspectZipFile(self, parser_mediator, zip_file): try: xml_data = zip_file.read('_rels/.rels') property_files = self._ParseRelationshipsXMLFile(xml_data) except (IndexError, IOError, KeyError, OverflowError, ValueError, zipfile.BadZipfile) as exception: parser_mediator.ProduceExtractionWarning(( 'Unable to parse relationships XML file: _rels/.rels with error: ' '{0!s}').format(exception)) return metadata = {} for path in property_files: try: xml_data = zip_file.read(path) properties = self._ParsePropertiesXMLFile(xml_data) except (IndexError, IOError, KeyError, OverflowError, ValueError, zipfile.BadZipfile) as exception: parser_mediator.ProduceExtractionWarning(( 'Unable to parse properties XML file: {0:s} with error: ' '{1!s}').format(path, exception)) continue metadata.update(properties) event_data = OpenXMLEventData() event_data.app_version = self._GetPropertyValue( parser_mediator, metadata, 'app_version') event_data.app_version = self._GetPropertyValue( parser_mediator, metadata, 'app_version') event_data.author = self._GetPropertyValue( parser_mediator, metadata, 'author') event_data.creating_app = self._GetPropertyValue( parser_mediator, metadata, 'creating_app') event_data.doc_security = self._GetPropertyValue( parser_mediator, metadata, 'doc_security') event_data.hyperlinks_changed = self._GetPropertyValue( parser_mediator, metadata, 'hyperlinks_changed') event_data.i4 = self._GetPropertyValue( parser_mediator, metadata, 'i4') event_data.last_saved_by = self._GetPropertyValue( parser_mediator, metadata, 'last_saved_by') event_data.links_up_to_date = self._GetPropertyValue( parser_mediator, metadata, 'links_up_to_date') event_data.number_of_characters = self._GetPropertyValue( parser_mediator, metadata, 'number_of_characters') event_data.number_of_characters_with_spaces = self._GetPropertyValue( parser_mediator, metadata, 'number_of_characters_with_spaces') event_data.number_of_lines = self._GetPropertyValue( parser_mediator, metadata, 'number_of_lines') event_data.number_of_pages = self._GetPropertyValue( parser_mediator, metadata, 'number_of_pages') event_data.number_of_paragraphs = self._GetPropertyValue( parser_mediator, metadata, 'number_of_paragraphs') event_data.number_of_words = self._GetPropertyValue( parser_mediator, metadata, 'number_of_words') event_data.revision_number = self._GetPropertyValue( parser_mediator, metadata, 'revision_number') event_data.scale_crop = self._GetPropertyValue( parser_mediator, metadata, 'scale_crop') event_data.shared_doc = self._GetPropertyValue( parser_mediator, metadata, 'shared_doc') event_data.template = self._GetPropertyValue( parser_mediator, metadata, 'template') event_data.total_time = self._GetPropertyValue( parser_mediator, metadata, 'total_time') self._ProduceEvent( parser_mediator, event_data, metadata, 'created', definitions.TIME_DESCRIPTION_CREATION, 'creation time') self._ProduceEvent( parser_mediator, event_data, metadata, 'modified', definitions.TIME_DESCRIPTION_MODIFICATION, 'modification time') self._ProduceEvent( parser_mediator, event_data, metadata, 'last_printed', definitions.TIME_DESCRIPTION_LAST_PRINTED, 'last printed time')
Parses an OXML file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. zip_file (zipfile.ZipFile): the zip file containing OXML content. It is not be closed in this method, but will be closed by the parser logic in czip.py. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def GetUserByEmail(self, email): user = self.rpc_helper.GetAccountInfoByEmail(email) return GitkitUser.FromApiResponse(user)
Gets user info by email. Args: email: string, the user email. Returns: GitkitUser, containing the user info.
juraj-google-style
def deserialize_subject_info(subject_info_xml): try: return d1_common.xml.deserialize(subject_info_xml) except ValueError as e: raise d1_common.types.exceptions.InvalidToken(0, 'Could not deserialize SubjectInfo. subject_info="{}", error="{}"'.format(subject_info_xml, str(e)))
Deserialize SubjectInfo XML doc to native object. Args: subject_info_xml: str SubjectInfo XML doc Returns: SubjectInfo PyXB object
codesearchnet
def url_is_project(url, default='not_a_func'): try: u = resolve(url) if u and u.func != default: return True except Resolver404: static_url = settings.STATIC_URL static_url_wd = static_url.lstrip('/') if url.startswith(static_url): url = url[len(static_url):] elif url.startswith(static_url_wd): url = url[len(static_url_wd):] else: return False if finders.find(url): return True return False
Check if URL is part of the current project's URLs. Args: url (str): URL to check. default (callable): used to filter out some URLs attached to function. Returns:
juraj-google-style
def ParseLSQuarantineRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = LsQuarantineEventData() event_data.agent = self._GetRowValue(query_hash, row, 'Agent') event_data.data = self._GetRowValue(query_hash, row, 'Data') event_data.query = query event_data.url = self._GetRowValue(query_hash, row, 'URL') timestamp = self._GetRowValue(query_hash, row, 'Time') date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a launch services quarantine event row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def get_tracks_for_album(self, artist, album, full_album_art_uri=False): subcategories = [artist, album] result = self.get_album_artists(full_album_art_uri=full_album_art_uri, subcategories=subcategories, complete_result=True) result._metadata['search_type'] = 'tracks_for_album' return result
Get the tracks of an artist's album. Args: artist (str): an artist's name. album (str): an album name. full_album_art_uri: whether the album art URI should be absolute (i.e. including the IP address). Default `False`. Returns: A `SearchResult` instance.
codesearchnet
def port(self, value): self._port = value if value is None: try: del self._connectionXML.attrib['port'] except KeyError: pass else: self._connectionXML.set('port', value)
Set the connection's port property. Args: value: New port value. String. Returns: Nothing.
juraj-google-style
def _parse_query_key(self, key, val, is_escaped): if key.endswith('__contains'): key = key[:(- 10)] val = self._parse_query_modifier('contains', val, is_escaped) elif key.endswith('__range'): key = key[:(- 7)] val = self._parse_query_modifier('range', val, is_escaped) elif key.endswith('__startswith'): key = key[:(- 12)] val = self._parse_query_modifier('startswith', val, is_escaped) elif key.endswith('__endswith'): key = key[:(- 10)] val = self._parse_query_modifier('endswith', val, is_escaped) elif key.endswith('__lt'): key = key[:(- 4)] val = self._parse_query_modifier('lt', val, is_escaped) elif key.endswith('__gt'): key = key[:(- 4)] val = self._parse_query_modifier('gt', val, is_escaped) elif key.endswith('__lte'): key = key[:(- 5)] val = self._parse_query_modifier('lte', val, is_escaped) elif key.endswith('__gte'): key = key[:(- 5)] val = self._parse_query_modifier('gte', val, is_escaped) elif ((key != 'NOKEY') and (not is_escaped)): val = self._escape_query(val) return (key, val)
Strips query modifier from key and call's the appropriate value modifier. Args: key (str): Query key val: Query value Returns: Parsed query key and value.
codesearchnet
def run(self): while self.should_run: try: self.logger.debug(('Sending heartbeat, seq ' + last_sequence)) self.ws.send(json.dumps({'op': 1, 'd': last_sequence})) except Exception as e: self.logger.error(f'Got error in heartbeat: {str(e)}') finally: elapsed = 0.0 while ((elapsed < self.interval) and self.should_run): time.sleep(self.TICK_INTERVAL) elapsed += self.TICK_INTERVAL
Runs the thread This method handles sending the heartbeat to the Discord websocket server, so the connection can remain open and the bot remain online for those commands that require it to be. Args: None
codesearchnet
def Erase(self, partition, timeout_ms=None): self._SimpleCommand(b'erase', arg=partition, timeout_ms=timeout_ms)
Erases the given partition. Args: partition: Partition to clear.
juraj-google-style
def stage_tc_batch_xid(xid_type, xid_value, owner): xid_string = '{}-{}-{}'.format(xid_type, xid_value, owner) hash_object = hashlib.sha256(xid_string.encode('utf-8')) return hash_object.hexdigest()
Create an xid for a batch job. Args: xid_type (str): [description] xid_value (str): [description] owner (str): [description] Returns: [type]: [description]
juraj-google-style
def set_save_handler(save_handler: Optional[Callable[..., Any]]) -> Optional[Callable[..., Any]]: if save_handler and (not callable(save_handler)): raise ValueError('`save_handler` must be callable.') global _SAVE_HANDLER old_handler = _SAVE_HANDLER _SAVE_HANDLER = save_handler return old_handler
Sets global save handler. Args: save_handler: A callable object that takes at least one argument as value to save. `symbolic.save` method will pass through all arguments to this handler and return its return value. Returns: Previous global save handler.
github-repos
def _SetHeader(self, new_values): row = self.row_class() row.row = 0 for v in new_values: row[v] = v self._table[0] = row
Sets header of table to the given tuple. Args: new_values: Tuple of new header values.
juraj-google-style
def estimate_motion(self, time, intensity_grid, max_u, max_v): ti = np.where((time == self.times))[0][0] mask_vals = np.where((self.masks[ti].ravel() == 1)) i_vals = self.i[ti].ravel()[mask_vals] j_vals = self.j[ti].ravel()[mask_vals] obj_vals = self.timesteps[ti].ravel()[mask_vals] u_shifts = np.arange((- max_u), (max_u + 1)) v_shifts = np.arange((- max_v), (max_v + 1)) min_error = 99999999999.0 best_u = 0 best_v = 0 for u in u_shifts: j_shift = (j_vals - u) for v in v_shifts: i_shift = (i_vals - v) if np.all(((((0 <= i_shift) & (i_shift < intensity_grid.shape[0])) & (0 <= j_shift)) & (j_shift < intensity_grid.shape[1]))): shift_vals = intensity_grid[(i_shift, j_shift)] else: shift_vals = np.zeros(i_shift.shape) error = np.abs((shift_vals - obj_vals)).mean() if (error < min_error): min_error = error best_u = (u * self.dx) best_v = (v * self.dx) self.u[ti] = best_u self.v[ti] = best_v return (best_u, best_v, min_error)
Estimate the motion of the object with cross-correlation on the intensity values from the previous time step. Args: time: time being evaluated. intensity_grid: 2D array of intensities used in cross correlation. max_u: Maximum x-component of motion. Used to limit search area. max_v: Maximum y-component of motion. Used to limit search area Returns: u, v, and the minimum error.
codesearchnet
def ipv4_is_defined(address): query_ip = IPv4Address(str(address)) results = namedtuple('ipv4_is_defined_results', 'is_defined, ietf_name, ' 'ietf_rfc') if query_ip in IPv4Network('0.0.0.0/8'): return results(True, 'This Network', 'RFC 1122, Section 3.2.1.3') elif query_ip.is_loopback: return results(True, 'Loopback', 'RFC 1122, Section 3.2.1.3') elif query_ip.is_link_local: return results(True, 'Link Local', 'RFC 3927') elif query_ip in IPv4Network('192.0.0.0/24'): return results(True, 'IETF Protocol Assignments', 'RFC 5736') elif query_ip in IPv4Network('192.0.2.0/24'): return results(True, 'TEST-NET-1', 'RFC 5737') elif query_ip in IPv4Network('192.88.99.0/24'): return results(True, '6to4 Relay Anycast', 'RFC 3068') elif query_ip in IPv4Network('198.18.0.0/15'): return (results(True, 'Network Interconnect Device Benchmark Testing', 'RFC 2544')) elif query_ip in IPv4Network('198.51.100.0/24'): return results(True, 'TEST-NET-2', 'RFC 5737') elif query_ip in IPv4Network('203.0.113.0/24'): return results(True, 'TEST-NET-3', 'RFC 5737') elif query_ip.is_multicast: return results(True, 'Multicast', 'RFC 3171') elif query_ip in IPv4Network('255.255.255.255/32'): return results(True, 'Limited Broadcast', 'RFC 919, Section 7') elif query_ip.is_private: return results(True, 'Private-Use Networks', 'RFC 1918') elif query_ip in IPv4Network('198.97.38.0/24'): return results(True, 'IANA Reserved', '') return results(False, '', '')
The function for checking if an IPv4 address is defined (does not need to be resolved). Args: address (:obj:`str`): An IPv4 address. Returns: namedtuple: :is_defined (bool): True if given address is defined, otherwise False :ietf_name (str): IETF assignment name if given address is defined, otherwise '' :ietf_rfc (str): IETF assignment RFC if given address is defined, otherwise ''
juraj-google-style
def fit_gaussian(samples, ddof=0): if (len(samples.shape) == 1): return (np.mean(samples), np.std(samples, ddof=ddof)) return (np.mean(samples, axis=1), np.std(samples, axis=1, ddof=ddof))
Calculates the mean and the standard deviation of the given samples. Args: samples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all values. If two dimensional, we fit the Gaussian for every set of samples over the first dimension. ddof (int): the difference degrees of freedom in the std calculation. See numpy.
codesearchnet
def timestamp(value, fmt=None): if fmt: return _timestamp_formats.get(fmt, (lambda v: timestamp_fmt(v, fmt)))(value) l = len(value) if ((19 <= l <= 24) and (value[3] == ' ')): try: return timestamp_d_b_Y_H_M_S(value) except (KeyError, ValueError, OverflowError): pass if (30 <= l <= 31): try: return timestamp_a__d_b_Y_H_M_S_z(value) except (KeyError, ValueError, OverflowError): pass if (l == 14): try: return timestamp_YmdHMS(value) except (ValueError, OverflowError): pass try: return timestamp_epoch(value) except ValueError: pass return timestamp_any(value)
Parse a datetime to a unix timestamp. Uses fast custom parsing for common datetime formats or the slow dateutil parser for other formats. This is a trade off between ease of use and speed and is very useful for fast parsing of timestamp strings whose format may standard but varied or unknown prior to parsing. Common formats include: 1 Feb 2010 12:00:00 GMT Mon, 1 Feb 2010 22:00:00 +1000 20100201120000 1383470155 (seconds since epoch) See the other timestamp_*() functions for more details. Args: value: A string representing a datetime. fmt: A timestamp format string like for time.strptime(). Returns: The time in seconds since epoch as and integer for the value specified.
codesearchnet
def has_relationship(self, left_id, left_type, right_id, right_type, rel_type='Related To'): data = self.get_object(left_id, left_type) if not data: raise CRITsOperationalError('Crits Object not found with id {}' 'and type {}'.format(left_id, left_type)) if 'relationships' not in data: return False for relationship in data['relationships']: if relationship['relationship'] != rel_type: continue if relationship['value'] != right_id: continue if relationship['type'] != right_type: continue return True return False
Checks if the two objects are related Args: left_id: The CRITs ID of the first indicator left_type: The CRITs TLO type of the first indicator right_id: The CRITs ID of the second indicator right_type: The CRITs TLO type of the second indicator rel_type: The relationships type ("Related To", etc) Returns: True or False if the relationship exists or not.
juraj-google-style
def __init__(self, identifier): super(FormatSpecification, self).__init__() self.identifier = identifier self.signatures = []
Initializes a specification. Args: identifier (str): unique name for the format.
juraj-google-style
class Mean(Metric): def __init__(self, name='mean', dtype=None): super().__init__(name=name, dtype=dtype) self.total = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='total') self.count = self.add_variable(shape=(), initializer=initializers.Zeros(), dtype=self.dtype, name='count') def update_state(self, values, sample_weight=None): values, sample_weight = reduce_to_samplewise_values(values, sample_weight, reduce_fn=ops.mean, dtype=self.dtype) self.total.assign_add(ops.sum(values)) if sample_weight is not None: num_samples = ops.sum(sample_weight) elif len(values.shape) >= 1: num_samples = ops.shape(values)[0] else: num_samples = 1 self.count.assign_add(ops.cast(num_samples, dtype=self.dtype)) def reset_state(self): self.total.assign(0) self.count.assign(0) def result(self): return ops.divide_no_nan(self.total, ops.cast(self.count, dtype=self.dtype))
Compute the (weighted) mean of the given values. For example, if values is `[1, 3, 5, 7]` then the mean is 4. If `sample_weight` was specified as `[1, 1, 0, 0]` then the mean would be 2. This metric creates two variables, `total` and `count`. The mean value returned is simply `total` divided by `count`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Example: >>> m = Mean() >>> m.update_state([1, 3, 5, 7]) >>> m.result() 4.0 >>> m.reset_state() >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0]) >>> m.result() 2.0
github-repos
def end_before(self, document_fields): return self._cursor_helper(document_fields, before=True, start=False)
End query results before a particular document value. The result set will **exclude** the document specified by ``document_fields``. If the current query already has specified an end cursor -- either via this method or :meth:`~.firestore_v1beta1.query.Query.end_at` -- this will overwrite it. When the query is sent to the server, the ``document_fields`` will be used in the order given by fields set by :meth:`~.firestore_v1beta1.query.Query.order_by`. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. Acts as a copy of the current query, modified with the newly added "end before" cursor.
codesearchnet
def request(self, request): url = "{}{}".format(self._base_url, request.path) timeout = self.poll_timeout if request.stream is True: timeout = self.stream_timeout try: http_response = self._session.request( request.method, url, headers=self._headers, params=request.params, data=request.body, stream=request.stream, timeout=timeout ) except requests.exceptions.ConnectionError: raise V20ConnectionError(url) except requests.exceptions.ConnectTimeout: raise V20Timeout(url, "connect") except requests.exceptions.ReadTimeout: raise V20Timeout(url, "read") request.headers = http_response.request.headers response = Response( request, request.method, http_response.url, http_response.status_code, http_response.reason, http_response.headers ) if request.stream: response.set_line_parser( request.line_parser ) response.set_lines( http_response.iter_lines( self.stream_chunk_size ) ) else: response.set_raw_body(http_response.text) return response
Perform an HTTP request through the context Args: request: A v20.request.Request object Returns: A v20.response.Response object
juraj-google-style