code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __add_kickoff_task(cls, job_config, mapreduce_spec): params = {"mapreduce_id": job_config.job_id} kickoff_task = taskqueue.Task( url=job_config._base_path + "/kickoffjob_callback/" + job_config.job_id, headers=util._get_task_headers(job_config.job_id), params=params) if job_config._hooks_cls: hooks = job_config._hooks_cls(mapreduce_spec) try: hooks.enqueue_kickoff_task(kickoff_task, job_config.queue_name) return except NotImplementedError: pass kickoff_task.add(job_config.queue_name, transactional=True)
Add kickoff task to taskqueue. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec,
juraj-google-style
def update(self, data): for key, value in data.items(): setattr(self, key, value)
Update the current memory record with the given data dict. Args: data (dict): Data dictionary to update the record attributes with.
juraj-google-style
def InternalSendApdu(self, apdu_to_send): response = None if not self.use_legacy_format: response = apdu.ResponseApdu(self.transport.SendMsgBytes( apdu_to_send.ToByteArray())) if response.sw1 == 0x67 and response.sw2 == 0x00: self.use_legacy_format = True return self.InternalSendApdu(apdu_to_send) else: response = apdu.ResponseApdu(self.transport.SendMsgBytes( apdu_to_send.ToLegacyU2FByteArray())) return response
Send an APDU to the device. Sends an APDU to the device, possibly falling back to the legacy encoding format that is not ISO7816-4 compatible. Args: apdu_to_send: The CommandApdu object to send Returns: The ResponseApdu object constructed out of the devices reply.
juraj-google-style
def from_string(rxn_string): rct_str, prod_str = rxn_string.split("->") def get_comp_amt(comp_str): return {Composition(m.group(2)): float(m.group(1) or 1) for m in re.finditer(r"([\d\.]*(?:[eE]-?[\d\.]+)?)\s*([A-Z][\w\.\(\)]*)", comp_str)} return BalancedReaction(get_comp_amt(rct_str), get_comp_amt(prod_str))
Generates a balanced reaction from a string. The reaction must already be balanced. Args: rxn_string: The reaction string. For example, "4 Li + O2-> 2Li2O" Returns: BalancedReaction
juraj-google-style
def assert_not_present(self, selector, testid=None, **kwargs): self.info_log(('Assert not present selector(%s) testid(%s)' % (selector, testid))) wait_until_not_present = kwargs.get('wait_until_not_present', BROME_CONFIG['proxy_driver']['wait_until_not_present_before_assert_not_present']) self.debug_log(('effective wait_until_not_present: %s' % wait_until_not_present)) if wait_until_not_present: ret = self.wait_until_not_present(selector, raise_exception=False) else: ret = (not self.is_present(selector)) if ret: if (testid is not None): self.create_test_result(testid, True) return True else: if (testid is not None): self.create_test_result(testid, False) return False
Assert that the element is not present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_not_present (bool) Returns: bool: True is the assertion succeed; False otherwise.
codesearchnet
def get_maybe_abstract_instance(self, data): if data.is_concrete: data_type = type(data.pyval) if data_type in self.primitive_instances: return self.primitive_instances[data_type] return data
Get an instance of the same type as the given data, abstract if possible. Get an abstract instance of primitive data stored as a ConcreteValue. Return any other data as-is. This is used by constant_to_var to discard concrete values that have been kept around for InterpreterFunction. This method intentionally does not descend into containers, as doing so causes new timeouts. If you need to discard concrete values inside containers, use abstract_utils.abstractify_variable instead. Arguments: data: The data. Returns: An instance of the same type as the data, abstract if possible.
github-repos
def _get_api_version(self): url = '{base_url}/api/server_info'.format(base_url=self._base_url()) server_info = self._make_request(url=url, method='get') return server_info['latest_api_version']
Fetches the most recent API version Returns: str
codesearchnet
def _ConvertValueBinaryDataToUBInt64(self, value): if not value: return None integer_map = self._GetDataTypeMap('uint64be') try: return self._ReadStructureFromByteStream(value, 0, integer_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse integer value with error: {0!s}'.format( exception))
Converts a binary data value into an integer. Args: value (bytes): binary data value containing an unsigned 64-bit big-endian integer. Returns: int: integer representation of binary data value or None if value is not set. Raises: ParseError: if the integer value cannot be parsed.
juraj-google-style
def get_opt_val(obj_pyxb, attr_str, default_val=None): try: return get_req_val(getattr(obj_pyxb, attr_str)) except (ValueError, AttributeError): return default_val
Get an optional Simple Content value from a PyXB element. The attributes for elements that are optional according to the schema and not set in the PyXB object are present and set to None. PyXB validation will fail if required elements are missing. Args: obj_pyxb: PyXB object attr_str: str Name of an attribute that the PyXB object may contain. default_val: any object Value to return if the attribute is not present. Returns: str : Value of the attribute if present, else ``default_val``.
codesearchnet
def output(self, _filename): txt = "" for contract in self.contracts: print('Contract {}'.format(contract.name)) for function in contract.functions: if function.contract == contract: print('\tFunction {}'.format(function.full_name)) for node in function.nodes: if node.expression: print('\t\tExpression: {}'.format(node.expression)) print('\t\tIRs:') for ir in node.irs: print('\t\t\t{}'.format(ir)) elif node.irs: print('\t\tIRs:') for ir in node.irs: print('\t\t\t{}'.format(ir)) for modifier in contract.modifiers: if modifier.contract == contract: print('\tModifier {}'.format(modifier.full_name)) for node in modifier.nodes: print(node) if node.expression: print('\t\tExpression: {}'.format(node.expression)) print('\t\tIRs:') for ir in node.irs: print('\t\t\t{}'.format(ir)) self.info(txt)
_filename is not used Args: _filename(string)
juraj-google-style
def write_to_file(path, contents, file_type='text'): FILE_TYPES = ('json', 'text', 'binary') if (file_type not in FILE_TYPES): raise ScriptWorkerException('Unknown file_type {} not in {}!'.format(file_type, FILE_TYPES)) if (file_type == 'json'): contents = format_json(contents) if (file_type == 'binary'): with open(path, 'wb') as fh: fh.write(contents) else: with open(path, 'w') as fh: print(contents, file=fh, end='')
Write ``contents`` to ``path`` with optional formatting. Small helper function to write ``contents`` to ``file`` with optional formatting. Args: path (str): the path to write to contents (str, object, or bytes): the contents to write to the file file_type (str, optional): the type of file. Currently accepts ``text`` or ``binary`` (contents are unchanged) or ``json`` (contents are formatted). Defaults to ``text``. Raises: ScriptWorkerException: with an unknown ``file_type`` TypeError: if ``file_type`` is ``json`` and ``contents`` isn't JSON serializable
codesearchnet
def get_fleet(self, airline_key): url = AIRLINE_FLEET_BASE.format(airline_key) return self._fr24.get_airline_fleet_data(url, (self.AUTH_TOKEN != ''))
Get the fleet for a particular airline. Given a airline code form the get_airlines() method output, this method returns the fleet for the airline. Args: airline_key (str): The code for the airline on flightradar24 Returns: A list of dicts, one for each aircraft in the airlines fleet Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_fleet('ai-aic')
codesearchnet
def HasExactlyCalls(self, *calls): if len(calls) == 1 and _IsIterable(calls[0]) and (not isinstance(calls[0], mock._Call)): calls = calls[0] return AssertThat(self._actual.mock_calls).ContainsExactlyElementsIn(calls)
Assert that the mocked function was called with exactly the given calls. Args: *calls: iterable of mock.call objects. Developers may also pass a single iterable of mock.call objects, for compatibility with mock's assert_has_calls() method, although this form is not preferred. Returns: If the mocked function was called exactly with the expected calls, returns an _Ordered predicate on which .InOrder() can be subsequently called. Raises: TruthAssertionError: the mocked function is missing any of the expected calls, or it contains any call not in the expected calls.
github-repos
def main(argv=None): args = None cmd = None try: args = parse_args(argv) if args.quiet: logger.setLevel(logging.CRITICAL) elif args.verbose: logger.setLevel(logging.DEBUG) cmd = args.func(args) ret = cmd.run_cmd() except KeyboardInterrupt: logger.exception("interrupted by the user") ret = 252 except NotDvcRepoError: logger.exception("") ret = 253 except DvcParserError: ret = 254 except Exception: logger.exception("unexpected error") ret = 255 Analytics().send_cmd(cmd, args, ret) return ret
Run dvc CLI command. Args: argv: optional list of arguments to parse. sys.argv is used by default. Returns: int: command's return code.
juraj-google-style
def generate_sb(date: datetime.datetime, project: str, programme_block: str) -> dict: date = date.strftime('%Y%m%d') instance_id = randint(0, 9999) sb_id = 'SB-{}-{}-{:04d}'.format(date, project, instance_id) return dict(id=sb_id, project=project, programme_block=programme_block)
Generate a Scheduling Block data object. Args: date (datetime.datetime): UTC date of the SBI project (str): Project Name programme_block (str): Programme Returns: str, Scheduling Block Instance (SBI) ID.
juraj-google-style
def get_releasenotes(repo_path, from_commit=None, bugtracker_url=''): repo = dulwich.repo.Repo(repo_path) tags = get_tags(repo) refs = get_refs(repo) maj_version = 0 feat_version = 0 fix_version = 0 start_including = False release_notes_per_major = OrderedDict() cur_line = '' if (from_commit is None): start_including = True prev_version = (maj_version, feat_version, fix_version) prev_version_str = ('%s.%s.%s' % prev_version) bugs = [] features = [] api_break_changes = [] for (commit_sha, children) in reversed(get_children_per_first_parent(repo_path).items()): commit = get_repo_object(repo, commit_sha) (maj_version, feat_version, fix_version) = get_version(commit=commit, tags=tags, maj_version=maj_version, feat_version=feat_version, fix_version=fix_version, children=children) version = (maj_version, feat_version, fix_version) version_str = ('%s.%s.%s' % version) if (start_including or commit_sha.startswith(from_commit) or fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))): start_including = True parent_commit_type = get_commit_type(commit=commit, children=children, tags=tags, prev_version=prev_version) cur_line = pretty_commit(commit=commit, version=version_str, bugtracker_url=bugtracker_url, commit_type=parent_commit_type) for child in children: commit_type = get_commit_type(commit=commit, tags=tags, prev_version=prev_version) cur_line += pretty_commit(commit=child, version=None, commit_type=commit_type, bugtracker_url=bugtracker_url) if (parent_commit_type == 'api_break'): release_notes_per_major[prev_version_str] = (api_break_changes, features, bugs) (bugs, features, api_break_changes) = ([], [], []) api_break_changes.append(cur_line) elif (parent_commit_type == 'feature'): features.append(cur_line) else: bugs.append(cur_line) prev_version = version prev_version_str = version_str release_notes_per_major[prev_version_str] = (api_break_changes, features, bugs) releasenotes = '' for (major_version, lines) in reversed(release_notes_per_major.items()): (api_break_changes, features, bugs) = lines releasenotes += (u'New changes for version %s\n=================================\n\nAPI Breaking changes\n--------------------\n%s\nNew features\n------------\n%s\nBugfixes and minor changes\n--------------------------\n%s\n\n' % (major_version, ('\n'.join(reversed(api_break_changes)) or 'No new API breaking changes\n'), ('\n'.join(reversed(features)) or 'No new features\n'), ('\n'.join(reversed(bugs)) or 'No new bugs\n'))) return releasenotes.strip()
Given a repo and optionally a base revision to start from, will return a text suitable for the relase notes announcement, grouping the bugs, the features and the api-breaking changes. Args: repo_path(str): Path to the code git repository. from_commit(str): Refspec of the commit to start aggregating the authors from. bugtracker_url(str): URL to be prepended to any bug ids found in the commits. Returns: str: Release notes text.
codesearchnet
def set_custom_getter_compose(custom_getter): tf.get_variable_scope().set_custom_getter( _compose_custom_getters(tf.get_variable_scope().custom_getter, custom_getter))
Set a custom getter in the current variable scope. Do not overwrite the existing custom getter - rather compose with it. Args: custom_getter: a custom getter.
juraj-google-style
def swap(self, left, right): if type(left) is not type(right): raise LayoutError('The method swap only works with elements of the same type.') temp = self[left] self[left] = self[right] self[right] = temp
Swaps the map between left and right. Args: left (tuple or int): Item to swap with right. right (tuple or int): Item to swap with left. Raises: LayoutError: If left and right have not the same type.
juraj-google-style
def get_mask(self, layers=None, output='vector', in_global_mask=True): if in_global_mask: output = 'vector' if layers is None: layers = self.layers.keys() elif not isinstance(layers, list): layers = [layers] layers = map(lambda x: x if isinstance(x, string_types) else self.stack[x], layers) layers = [self.layers[l] for l in layers if l in self.layers] layers.append(self.full) layers = np.vstack(layers).T.astype(bool) mask = layers.all(axis=1) mask = self.get_image(mask, output) return mask[self.global_mask] if in_global_mask else mask
Set the current mask by taking the conjunction of all specified layers. Args: layers: Which layers to include. See documentation for add() for format. include_global_mask: Whether or not to automatically include the global mask (i.e., self.volume) in the conjunction.
juraj-google-style
def CreateTaskCompletion(self): self.completion_time = int((time.time() * definitions.MICROSECONDS_PER_SECOND)) task_completion = TaskCompletion() task_completion.aborted = self.aborted task_completion.identifier = self.identifier task_completion.session_identifier = self.session_identifier task_completion.timestamp = self.completion_time return task_completion
Creates a task completion. Returns: TaskCompletion: task completion attribute container.
codesearchnet
def fetch_url(self, url): url_path = urlparse.urlsplit(url).path dst_path = os.path.basename(url_path) dst_path = self.paths.prefixed(dst_path) with LogTask('Downloading %s' % url): urllib.urlretrieve(url=os.path.expandvars(url), filename=dst_path) return dst_path
Retrieves the given url to the prefix Args: url(str): Url to retrieve Returns: str: path to the downloaded file
juraj-google-style
def override_from_dict(self, values_dict): for (name, value) in values_dict.items(): self.set_hparam(name, value) return self
Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed.
codesearchnet
def score_and_learn(self, data): assert self._underlying if self._underlying._features is not None: x = beam.Row(**{f: getattr(data, f) for f in self._underlying._features}) else: x = beam.Row(**data._asdict()) y_pred = self._underlying.score_one(x) self._underlying.learn_one(x) return y_pred
Scores and learns from a single data point. Args: data: A `beam.Row` representing the input data point. Returns: float: The anomaly score predicted by the model.
github-repos
def _rapply(input_layer, operation, *op_args, **op_kwargs): op_args = list(op_args) op_args.append(input_layer.tensor) return input_layer.with_tensor(operation(*op_args, **op_kwargs))
Applies the given operation to this after expanding op_args. Args: input_layer: The input layer for this op. operation: An operation that takes a tensor and the supplied args. *op_args: Extra arguments for operation. **op_kwargs: Keyword arguments for the operation. Returns: A new layer with operation applied.
juraj-google-style
def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec): ast_type = ast.__class__.__name__ trigger_functions = rule.get("trigger_function", []) trigger_types = rule.get("trigger_type", []) rule_subject = rule.get("subject") rule_relation = rule.get("relation") rule_object = rule.get("object") log.debug(f"Running {rule_relation} Type: {ast_type}") if isinstance(ast, Function): function_name = ast.name args = ast.args parent_function = ast.parent_function if function_name in trigger_functions: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"1: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"2: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) elif ast_type in trigger_types: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"3: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"4: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if isinstance(ast, NSArg): term = "{}:{}".format(ast.namespace, ast.value) parent_function = ast.parent_function if ast_type in trigger_types: if rule_subject == "trigger_value": subject = term if rule_object == "args": for arg in args: log.debug(f"5: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"6: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if hasattr(ast, "args"): for arg in ast.args: process_rule(edges, arg, rule, spec)
Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule
juraj-google-style
def _update_services_target_state(sdp_target_state: str): service_states = get_service_state_list() for service in service_states: if (service.current_state != sdp_target_state): LOG.debug('Setting the target state of %s to be %s', service.id, sdp_target_state) service.update_target_state(sdp_target_state)
Update the target states of services based on SDP target state. When we get a new target state this function is called to ensure components receive the target state(s) and/or act on them. Args: sdp_target_state (str): Target state of SDP
codesearchnet
def save(self, response_choice=None, async=False, callback=None): return self._manage_child_object(nurest_object=self, method=HTTP_METHOD_PUT, async=async, callback=callback, response_choice=response_choice)
Update object and call given callback in case of async call Args: async (bool): Boolean to make an asynchronous call. Default is False callback (function): Callback method that will be triggered in case of asynchronous call Example: >>> entity.name = "My Super Object" >>> entity.save() # will save the new name in the server
juraj-google-style
def split_window(self, fpath, vertical=False, size=None, bufopts=None): command = 'split {}'.format(fpath) if fpath else 'new' if vertical: command = 'v' + command if size: command = str(size) + command self._vim.command(command) if bufopts: self.set_buffer_options(bufopts)
Open file in a new split window. Args: fpath (str): Path of the file to open. If ``None``, a new empty split is created. vertical (bool): Whether to open a vertical split. size (Optional[int]): The height (or width) to set for the new window. bufopts (Optional[dict]): Buffer-local options to set in the split window. See :func:`.set_buffer_options`.
juraj-google-style
def load_parameters(path, proto=None, needs_proto=False): _, ext = os.path.splitext(path) if ext == '.h5': import warnings warnings.simplefilter('ignore', category=FutureWarning) import h5py with h5py.File(path, 'r') as hd: keys = [] def _get_keys(name): ds = hd[name] if not isinstance(ds, h5py.Dataset): return keys.append((ds.attrs.get('index', None), name)) hd.visit(_get_keys) for _, key in sorted(keys): ds = hd[key] var = get_parameter_or_create( key, ds.shape, need_grad=ds.attrs['need_grad']) var.data.cast(ds.dtype)[...] = ds[...] if needs_proto: if proto is None: proto = nnabla_pb2.NNablaProtoBuf() parameter = proto.parameter.add() parameter.variable_name = key parameter.shape.dim.extend(ds.shape) parameter.data.extend( numpy.array(ds[...]).flatten().tolist()) parameter.need_grad = False if ds.attrs['need_grad']: parameter.need_grad = True else: if proto is None: proto = nnabla_pb2.NNablaProtoBuf() if ext == '.protobuf': with open(path, 'rb') as f: proto.MergeFromString(f.read()) set_parameter_from_proto(proto) elif ext == '.nntxt' or ext == '.prototxt': with open(path, 'r') as f: text_format.Merge(f.read(), proto) set_parameter_from_proto(proto) elif ext == '.nnp': try: tmpdir = tempfile.mkdtemp() with zipfile.ZipFile(path, 'r') as nnp: for name in nnp.namelist(): nnp.extract(name, tmpdir) _, ext = os.path.splitext(name) if ext in ['.protobuf', '.h5']: proto = load_parameters(os.path.join( tmpdir, name), proto, needs_proto) finally: shutil.rmtree(tmpdir) logger.info("Parameter load ({}): {}".format(format, path)) else: pass return proto
Load parameters from a file with the specified format. Args: path : path or file object
juraj-google-style
def InsertMessage(self, message, timeout=None): if (not isinstance(message, common_pb2.Message)): raise InvalidArgument(('Attempt to send unexpected message type: %s' % message.__class__.__name__)) if (not message.HasField('source')): message.source.service_name = self._service_name if (not message.message_id): message.message_id = os.urandom(32) return self._RetryLoop((lambda t: self._stub.InsertMessage(message, timeout=t)))
Inserts a message into the Fleetspeak server. Sets message.source, if unset. Args: message: common_pb2.Message The message to send. timeout: How many seconds to try for. Raises: grpc.RpcError: if the RPC fails. InvalidArgument: if message is not a common_pb2.Message.
codesearchnet
def _client_send(self, msg): try: self._client.write(msg.encode('utf8') + b'\n') self._client.flush() self.log.debug('Snippet sent %s.', msg) except socket.error as e: raise Error(self._ad, 'Encountered socket error "%s" sending RPC message "%s"' % (e, msg))
Sends an Rpc message through the connection. Args: msg: string, the message to send. Raises: Error: a socket error occurred during the send.
github-repos
def create_message(self, channel_id, text): baseurl = (self.rest_baseurl + '/channels/{}/messages'.format(channel_id)) requests.post(baseurl, headers=self.headers, data=json.dumps({'content': text}))
Sends a message to a Discord channel or user via REST API Args: channel_id (string): ID of destingation Discord channel text (string): Content of message
codesearchnet
def _open_interface(self, client, uuid, iface, key): conn_id = self._validate_connection('open_interface', uuid, key) if (conn_id is None): return conn_data = self._connections[uuid] conn_data['last_touch'] = monotonic() slug = self._build_device_slug(uuid) try: resp = (yield self._manager.open_interface(conn_id, iface)) except Exception as exc: self._logger.exception('Error in manager open interface') resp = {'success': False, 'reason': ('Internal error: %s' % str(exc))} message = {'type': 'response', 'operation': 'open_interface', 'client': client} message['success'] = resp['success'] if (not message['success']): message['failure_reason'] = resp['reason'] self._publish_response(slug, message)
Open an interface on a connected device. Args: client (string): The client id who is requesting this operation uuid (int): The id of the device we're opening the interface on iface (string): The name of the interface that we're opening key (string): The key to authenticate the caller
codesearchnet
def get_cloudflare_records(self, *, account): zones = [] for zobj in self.__cloudflare_list_zones(account=account): try: self.log.debug('Processing DNS zone CloudFlare/{}'.format(zobj['name'])) zone = {'zone_id': get_resource_id('cfz', zobj['name']), 'name': zobj['name'], 'source': 'CloudFlare', 'comment': None, 'tags': {}, 'records': []} for record in self.__cloudflare_list_zone_records(account=account, zoneID=zobj['id']): zone['records'].append({'id': get_resource_id('cfr', zobj['id'], ['{}={}'.format(k, v) for (k, v) in record.items()]), 'zone_id': zone['zone_id'], 'name': record['name'], 'value': record['value'], 'type': record['type']}) if (len(zone['records']) > 0): zones.append(zone) except CloudFlareError: self.log.exception('Failed getting records for CloudFlare zone {}'.format(zobj['name'])) return zones
Return a `list` of `dict`s containing the zones and their records, obtained from the CloudFlare API Returns: account (:obj:`CloudFlareAccount`): A CloudFlare Account object :obj:`list` of `dict`
codesearchnet
def create_repository(cls, repository_data): location = ('memory{%s}' % hex(id(repository_data))) resource_pool = ResourcePool(cache_size=None) repo = MemoryPackageRepository(location, resource_pool) repo.data = repository_data return repo
Create a standalone, in-memory repository. Using this function bypasses the `package_repository_manager` singleton. This is usually desired however, since in-memory repositories are for temporarily storing programmatically created packages, which we do not want to cache and that do not persist. Args: repository_data (dict): Repository data, see class docstring. Returns: `MemoryPackageRepository` object.
codesearchnet
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: outputs, past = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs, past = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs
Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration >>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large") >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```
github-repos
def all_label_values(self, label_list_ids=None): values = set() for utterance in self.utterances.values(): values = values.union(utterance.all_label_values(label_list_ids=label_list_ids)) return values
Return a set of all label-values occurring in this corpus. Args: label_list_ids (list): If not None, only labels from label-lists with an id contained in this list are considered. Returns: :class:`set`: A set of distinct label-values.
juraj-google-style
def ListFileEntries(self, base_path_specs, output_writer): for base_path_spec in base_path_specs: file_system = resolver.Resolver.OpenFileSystem(base_path_spec) file_entry = resolver.Resolver.OpenFileEntry(base_path_spec) if file_entry is None: logging.warning( 'Unable to open base path specification:\n{0:s}'.format( base_path_spec.comparable)) return self._ListFileEntry(file_system, file_entry, '', output_writer)
Lists file entries in the base path specification. Args: base_path_specs (list[dfvfs.PathSpec]): source path specification. output_writer (StdoutWriter): output writer.
juraj-google-style
def random_unitary(dim, seed=None): if ((dim == 0) or (not math.log2(dim).is_integer())): raise QiskitError('Desired unitary dimension not a positive power of 2.') matrix = np.zeros([dim, dim], dtype=complex) for j in range(dim): if (j == 0): a = random_state(dim, seed) else: a = random_state(dim) matrix[(:, j)] = np.copy(a) i = (j - 1) while (i >= 0): dc = np.vdot(matrix[(:, i)], a) matrix[(:, j)] = (matrix[(:, j)] - (dc * matrix[(:, i)])) i = (i - 1) matrix[(:, j)] = (matrix[(:, j)] * (1.0 / np.sqrt(np.vdot(matrix[(:, j)], matrix[(:, j)])))) return Operator(matrix)
Return a random dim x dim unitary Operator from the Haar measure. Args: dim (int): the dim of the state space. seed (int): Optional. To set a random seed. Returns: Operator: (dim, dim) unitary operator. Raises: QiskitError: if dim is not a positive power of 2.
codesearchnet
def __init__(self, tcex, domain, data_type, ttl_minutes=None, mapping=None): self.tcex = tcex self.ttl = None if ttl_minutes is not None: self.ttl = self._dt_to_epoch(datetime.now() - timedelta(minutes=int(ttl_minutes))) self.ds = self.tcex.datastore(domain, data_type, mapping)
Initialize class properties. Args: tcex (object): An instance of TcEx. domain (): [description] data_type ([type]): [description] ttl_minutes (int, optional): Defaults to None. Number of minutes the cache is valid. mapping ([type], optional): Defaults to None. [description]
juraj-google-style
def to_api(self): vals = {} for (attribute, attribute_type) in self._props.items(): prop = getattr(self, attribute) vals[self._to_camel_case(attribute)] = self._to_api_value(attribute_type, prop) return vals
Return a dictionary to send to the API. Returns: dict: Mapping representing this object that can be sent to the API.
codesearchnet
def set_category(self, category): pcategory = self.find('general/category') pcategory.clear() name = ElementTree.SubElement(pcategory, 'name') if isinstance(category, Category): id_ = ElementTree.SubElement(pcategory, 'id') id_.text = category.id name.text = category.name elif isinstance(category, basestring): name.text = category
Set the policy's category. Args: category: A category object.
codesearchnet
def get_revisions(page): start_string = " <revision>\n" end_string = " </revision>\n" ret = [] current_pos = 0 while True: start_pos = page.find(start_string, current_pos) if start_pos == -1: break end_pos = page.find(end_string, start_pos) assert end_pos != -1 ret.append(page[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) return ret
Extract the revisions of a page. Args: page: a string Returns: a list of strings
juraj-google-style
def _CaptureExpression(self, frame, expression): rc, value = _EvaluateExpression(frame, expression) if not rc: return {'name': expression, 'status': value} return self.CaptureNamedVariable(expression, value, 0, self.expression_capture_limits)
Evalutes the expression and captures it into a Variable object. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: Variable object (which will have error status if the expression fails to evaluate).
juraj-google-style
def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training)
Selects `x` in test phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Args: x: What to return in test phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on `K.learning_phase`.
github-repos
def iaf_hparams(hidden_size=512, filter_size=4096): hparams = common_hparams.basic_params1() hparams.hidden_size = hidden_size hparams.add_hparam('attention_key_channels', None) hparams.add_hparam('attention_value_channels', None) hparams.add_hparam('num_heads', 4) hparams.add_hparam('attention_dropout', 0.1) hparams.add_hparam('shared_rel', False) hparams.add_hparam('block_width', 1) hparams.add_hparam('block_length', 1) hparams.add_hparam('q_filter_width', 1) hparams.add_hparam('kv_filter_width', 1) hparams.layer_preprocess_sequence = 'n' hparams.layer_prepostprocess_dropout = 0.1 hparams.norm_type = 'layer' hparams.norm_epsilon = 1e-06 hparams.layer_prepostprocess_dropout_broadcast_dims = '' hparams.layer_postprocess_sequence = 'da' hparams.add_hparam('filter_size', filter_size) hparams.add_hparam('ffn_layer', 'conv_hidden_relu') hparams.add_hparam('relu_dropout', 0.1) return hparams
Create hyperpameters for inverse autoregressive flows. Args: hidden_size: Width of attention layers and neural network output layer. filter_size: Hidden layer width for neural network. Returns: hparams: Hyperpameters with basic presets for inverse autoregressive flows.
codesearchnet
def UploadFile(self, fd, offset=0, amount=None): return self._UploadChunkStream(self._streamer.StreamFile(fd, offset=offset, amount=amount))
Uploads chunks of a given file descriptor to the transfer store flow. Args: fd: A file descriptor to upload. offset: An integer offset at which the file upload should start on. amount: An upper bound on number of bytes to stream. If it is `None` then the whole file is uploaded. Returns: A `BlobImageDescriptor` object.
codesearchnet
def clone(self, callable=None, **overrides): old = {k: v for (k, v) in self.get_param_values() if (k not in ['callable', 'name'])} params = dict(old, **overrides) callable = (self.callable if (callable is None) else callable) return self.__class__(callable, **params)
Clones the Callable optionally with new settings Args: callable: New callable function to wrap **overrides: Parameter overrides to apply Returns: Cloned Callable object
codesearchnet
def _ParseFValue(self, registry_key): registry_value = registry_key.GetValueByName('F') if (not registry_value): raise errors.ParseError('missing value: "F" in Windows Registry key: {0:s}.'.format(registry_key.name)) f_value_map = self._GetDataTypeMap('f_value') try: return self._ReadStructureFromByteStream(registry_value.data, 0, f_value_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(exception)
Parses an F value. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Returns: f_value: F value stored in the Windows Registry key. Raises: ParseError: if the Windows Registry key does not contain an F value or F value cannot be parsed.
codesearchnet
def remove_acl(path): if ((platform.system() == constants.PLATFORM_DARWIN) and os.path.isfile('/bin/chmod')): subprocess.call(['/bin/chmod', '-R', '-N', path]) elif ((platform.system() == constants.PLATFORM_LINUX) and os.path.isfile('/bin/setfacl')): subprocess.call(['/bin/setfacl', '-R', '-b', path])
Remove the ACL of the file or folder located on the given path. Also remove the ACL of any file and folder below the given one, recursively. Args: path (str): Path to the file or folder to remove the ACL for, recursively.
codesearchnet
def dump_database_as_insert_sql(engine: Engine, fileobj: TextIO = sys.stdout, include_ddl: bool = False, multirow: bool = False) -> None: for tablename in get_table_names(engine): dump_table_as_insert_sql( engine=engine, table_name=tablename, fileobj=fileobj, include_ddl=include_ddl, multirow=multirow )
Reads an entire database and writes SQL to replicate it to the output file-like object. Args: engine: SQLAlchemy :class:`Engine` fileobj: file-like object to write to include_ddl: if ``True``, include the DDL to create the table as well multirow: write multi-row ``INSERT`` statements
juraj-google-style
def enum_value_descriptor_to_code_string(enum_value_descriptor: descriptor.EnumValueDescriptor) -> str: original_code = annotation_utils.get_enum_value_original_code(enum_value_descriptor) return original_code if original_code is not None else enum_value_descriptor.name.lower().replace('_', '-')
Returns the code string describing the enum value. Args: enum_value_descriptor: The EnumValueDescriptor to convert. Returns: The code string describing the enum value.
github-repos
def rollaxis(a, axis, start=0): if isinstance(a, np.ndarray): return np.rollaxis(a, axis, start) if (axis not in range(a.ndim)): raise ValueError(('rollaxis: axis (%d) must be >=0 and < %d' % (axis, a.ndim))) if (start not in range((a.ndim + 1))): raise ValueError(('rollaxis: start (%d) must be >=0 and < %d' % (axis, (a.ndim + 1)))) axes = list(range(a.ndim)) axes.remove(axis) axes.insert(start, axis) return transpose(a, axes)
Roll the specified axis backwards, until it lies in a given position. Args: a (array_like): Input array. axis (int): The axis to roll backwards. The positions of the other axes do not change relative to one another. start (int, optional): The axis is rolled until it lies before this position. The default, 0, results in a "complete" roll. Returns: res (ndarray)
codesearchnet
def issubset(self, other): other = self._cast_to_frameset(other) if (other is NotImplemented): return NotImplemented return (self.items <= other.items)
Check if the contents of `self` is a subset of the contents of `other.` Args: other (:class:`FrameSet`): Returns: bool: :class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
codesearchnet
def compute_jaccard_index(x_set, y_set): if ((not x_set) or (not y_set)): return 0.0 intersection_cardinal = len((x_set & y_set)) union_cardinal = len((x_set | y_set)) return (intersection_cardinal / float(union_cardinal))
Return the Jaccard similarity coefficient of 2 given sets. Args: x_set (set): first set. y_set (set): second set. Returns: float: Jaccard similarity coefficient.
codesearchnet
def create_symmetric_key(self, algorithm, length): if (algorithm not in self._symmetric_key_algorithms.keys()): raise exceptions.InvalidField('The cryptographic algorithm {0} is not a supported symmetric key algorithm.'.format(algorithm)) cryptography_algorithm = self._symmetric_key_algorithms.get(algorithm) if (length not in cryptography_algorithm.key_sizes): raise exceptions.InvalidField('The cryptographic length ({0}) is not valid for the cryptographic algorithm ({1}).'.format(length, algorithm.name)) self.logger.info('Generating a {0} symmetric key with length: {1}'.format(algorithm.name, length)) key_bytes = os.urandom((length try: cryptography_algorithm(key_bytes) except Exception as e: self.logger.exception(e) raise exceptions.CryptographicFailure('Invalid bytes for the provided cryptographic algorithm.') return {'value': key_bytes, 'format': enums.KeyFormatType.RAW}
Create a symmetric key. Args: algorithm(CryptographicAlgorithm): An enumeration specifying the algorithm for which the created key will be compliant. length(int): The length of the key to be created. This value must be compliant with the constraints of the provided algorithm. Returns: dict: A dictionary containing the key data, with the following key/value fields: * value - the bytes of the key * format - a KeyFormatType enumeration for the bytes format Raises: InvalidField: Raised when the algorithm is unsupported or the length is incompatible with the algorithm. CryptographicFailure: Raised when the key generation process fails. Example: >>> engine = CryptographyEngine() >>> key = engine.create_symmetric_key( ... CryptographicAlgorithm.AES, 256)
codesearchnet
def set_el(cls, el, value): if (not el): return tag_name = el.elt.tagName.lower() if (tag_name == 'textarea'): cls._set_textarea(el, value) elif (tag_name == 'input'): if ('typeahead' in el.class_name.lower()): cls._set_typeahead(el, value) else: cls._set_input(el, value) elif (tag_name == 'select'): el.value = value else: raise ValueError(('Setter for %s (%s) not implemented!' % (tag_name, el.id)))
Set given `el` tag element to `value`. Automatically choose proper method to set the `value` based on the type of the `el`. Args: el (obj): Element reference to the input you want to convert to typeahead. value (list): List of dicts with two keys: ``source`` and ``val``.
codesearchnet
def submit(self): if (self._future is not None): raise JobError('We have already submitted the job!') validate_qobj_against_schema(self._qobj) self._future = self._executor.submit(self._fn, self._job_id, self._qobj)
Submit the job to the backend for execution. Raises: QobjValidationError: if the JSON serialization of the Qobj passed during construction does not validate against the Qobj schema. JobError: if trying to re-submit the job.
codesearchnet
def _ExtractWithFilter(self, source_path_specs, destination_path, output_writer, artifact_filters, filter_file, artifact_definitions_path, custom_artifacts_path, skip_duplicates=True): extraction_engine = engine.BaseEngine() if (self._source_type in self._SOURCE_TYPES_TO_PREPROCESS): self._PreprocessSources(extraction_engine) for source_path_spec in source_path_specs: (file_system, mount_point) = self._GetSourceFileSystem(source_path_spec, resolver_context=self._resolver_context) display_name = path_helper.PathHelper.GetDisplayNameForPathSpec(source_path_spec) output_writer.Write('Extracting file entries from: {0:s}\n'.format(display_name)) filter_find_specs = extraction_engine.BuildFilterFindSpecs(artifact_definitions_path, custom_artifacts_path, extraction_engine.knowledge_base, artifact_filters, filter_file) searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point) for path_spec in searcher.Find(find_specs=filter_find_specs): self._ExtractFileEntry(path_spec, destination_path, output_writer, skip_duplicates=skip_duplicates) file_system.Close()
Extracts files using a filter expression. This method runs the file extraction process on the image and potentially on every VSS if that is wanted. Args: source_path_specs (list[dfvfs.PathSpec]): path specifications to extract. destination_path (str): path where the extracted files should be stored. output_writer (CLIOutputWriter): output writer. artifact_definitions_path (str): path to artifact definitions file. custom_artifacts_path (str): path to custom artifact definitions file. artifact_filters (list[str]): names of artifact definitions that are used for filtering file system and Windows Registry key paths. filter_file (str): path of the file that contains the filter file path filters. skip_duplicates (Optional[bool]): True if files with duplicate content should be skipped.
codesearchnet
def _UpdateLatestProcessingTime(self, task): self._latest_task_processing_time = max( self._latest_task_processing_time, task.last_processing_time)
Updates the latest processing time of the task manager from the task. This method does not lock the manager and should be called by a method holding the manager lock. Args: task (Task): task to update the processing time of.
juraj-google-style
def read_int64(self, little_endian=True): if little_endian: endian = '<' else: endian = '>' return self.unpack(('%sq' % endian), 8)
Read 8 bytes as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
codesearchnet
def _maybe_commit(self, transaction): try: transaction._commit() return True except exceptions.GoogleAPICallError as exc: if transaction._read_only: raise if isinstance(exc, exceptions.Aborted): return False else: raise
Try to commit the transaction. If the transaction is read-write and the ``Commit`` fails with the ``ABORTED`` status code, it will be retried. Any other failure will not be caught. Args: transaction (~.firestore_v1beta1.transaction.Transaction): The transaction to be ``Commit``-ed. Returns: bool: Indicating if the commit succeeded.
juraj-google-style
def allconcat_ring(xs, devices, concat_axis): n = len(xs) if n == 1: return xs parts = [[xs[target] if target == source else None for source in xrange(n)] for target in xrange(n)] for distance in xrange(1, n for target in xrange(n): source = (target + distance) % n if parts[target][source] is None: with tf.device(devices[target]): parts[target][source] = tf.identity(parts[(target + 1) % n][source]) source = (target - distance) % n if parts[target][source] is None: with tf.device(devices[target]): parts[target][source] = tf.identity(parts[(target - 1) % n][source]) return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n)
Concatenate all Tensors everywhere. Performance-optimized for a ring of devices. Args: xs: a list of n tf.Tensors devices: a list of n strings concat_axis: an integer Returns: a list of n Tensors
juraj-google-style
def get_slab(self, shift=0, tol=0.1, energy=None): h = self._proj_height p = (h / self.parent.lattice.d_hkl(self.miller_index)) if self.in_unit_planes: nlayers_slab = int(math.ceil((self.min_slab_size / p))) nlayers_vac = int(math.ceil((self.min_vac_size / p))) else: nlayers_slab = int(math.ceil((self.min_slab_size / h))) nlayers_vac = int(math.ceil((self.min_vac_size / h))) nlayers = (nlayers_slab + nlayers_vac) species = self.oriented_unit_cell.species_and_occu props = self.oriented_unit_cell.site_properties props = {k: (v * nlayers_slab) for (k, v) in props.items()} frac_coords = self.oriented_unit_cell.frac_coords frac_coords = (np.array(frac_coords) + np.array([0, 0, (- shift)])[(None, :)]) frac_coords -= np.floor(frac_coords) (a, b, c) = self.oriented_unit_cell.lattice.matrix new_lattice = [a, b, (nlayers * c)] frac_coords[(:, 2)] = (frac_coords[(:, 2)] / nlayers) all_coords = [] for i in range(nlayers_slab): fcoords = frac_coords.copy() fcoords[(:, 2)] += (i / nlayers) all_coords.extend(fcoords) slab = Structure(new_lattice, (species * nlayers_slab), all_coords, site_properties=props) scale_factor = self.slab_scale_factor if self.lll_reduce: lll_slab = slab.copy(sanitize=True) mapping = lll_slab.lattice.find_mapping(slab.lattice) scale_factor = np.dot(mapping[2], scale_factor) slab = lll_slab if self.center_slab: avg_c = np.average([c[2] for c in slab.frac_coords]) slab.translate_sites(list(range(len(slab))), [0, 0, (0.5 - avg_c)]) if self.primitive: prim = slab.get_primitive_structure(tolerance=tol) if (energy is not None): energy = ((prim.volume / slab.volume) * energy) slab = prim ouc = self.oriented_unit_cell.copy() if self.primitive: slab_l = slab.lattice ouc = ouc.get_primitive_structure(constrain_latt={'a': slab_l.a, 'b': slab_l.b, 'alpha': slab_l.alpha, 'beta': slab_l.beta, 'gamma': slab_l.gamma}) return Slab(slab.lattice, slab.species_and_occu, slab.frac_coords, self.miller_index, ouc, shift, scale_factor, energy=energy, site_properties=slab.site_properties, reorient_lattice=self.reorient_lattice)
This method takes in shift value for the c lattice direction and generates a slab based on the given shift. You should rarely use this method. Instead, it is used by other generation algorithms to obtain all slabs. Arg: shift (float): A shift value in Angstrom that determines how much a slab should be shifted. tol (float): Tolerance to determine primitive cell. energy (float): An energy to assign to the slab. Returns: (Slab) A Slab object with a particular shifted oriented unit cell.
codesearchnet
def _DecodeURL(self, url): if not url: return '' decoded_url = urlparse.unquote(url) if isinstance(decoded_url, py2to3.BYTES_TYPE): try: decoded_url = decoded_url.decode('utf-8') except UnicodeDecodeError as exception: decoded_url = decoded_url.decode('utf-8', errors='replace') logger.warning( 'Unable to decode URL: {0:s} with error: {1!s}'.format( url, exception)) return decoded_url
Decodes the URL, replaces %XX to their corresponding characters. Args: url (str): encoded URL. Returns: str: decoded URL.
juraj-google-style
def recall_at_precision(y_true, y_pred, precision): (y_true, y_pred) = _mask_value_nan(y_true, y_pred) (precision, recall, _) = skm.precision_recall_curve(y_true, y_pred) return recall[np.searchsorted((precision - precision), 0)]
Recall at a certain precision threshold Args: y_true: true labels y_pred: predicted labels precision: resired precision level at which where to compute the recall
codesearchnet
def __init__(self, channel): self.AnalyzeSentiment = channel.unary_unary( "/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment", request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeSentimentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeSentimentResponse.FromString, ) self.AnalyzeEntities = channel.unary_unary( "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities", request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeEntitiesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeEntitiesResponse.FromString, ) self.AnalyzeEntitySentiment = channel.unary_unary( "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment", request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeEntitySentimentRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeEntitySentimentResponse.FromString, ) self.AnalyzeSyntax = channel.unary_unary( "/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax", request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeSyntaxRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnalyzeSyntaxResponse.FromString, ) self.ClassifyText = channel.unary_unary( "/google.cloud.language.v1beta2.LanguageService/ClassifyText", request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.ClassifyTextRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.ClassifyTextResponse.FromString, ) self.AnnotateText = channel.unary_unary( "/google.cloud.language.v1beta2.LanguageService/AnnotateText", request_serializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnnotateTextRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_language__v1beta2_dot_proto_dot_language__service__pb2.AnnotateTextResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def reqHeadTimeStamp(self, contract: Contract, whatToShow: str, useRTH: bool, formatDate: int=1) -> datetime.datetime: return self._run(self.reqHeadTimeStampAsync(contract, whatToShow, useRTH, formatDate))
Get the datetime of earliest available historical data for the contract. Args: contract: Contract of interest. useRTH: If True then only show data from within Regular Trading Hours, if False then show all data. formatDate: If set to 2 then the result is returned as a timezone-aware datetime.datetime with UTC timezone.
codesearchnet
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() file_entry_type = event_values.get('file_entry_type', None) if file_entry_type is not None: event_values['file_entry_type'] = self._FILE_ENTRY_TYPES.get( file_entry_type, 'UNKNOWN') if (not event_values.get('allocated', False) and not event_values.get('is_allocated', False)): event_values['unallocated'] = 'unallocated' return self._ConditionalFormatMessages(event_values)
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def save(self, recipe): if 'id' in recipe and recipe['id'] is not None: self.logger.debug("Updating existing recipe: " + json.dumps(recipe)) url = '%(base_url)s/recipe/json/%(recipe_id)s' % { 'base_url': self.base_url, 'recipe_id': recipe['id'] } r = self.gbdx_connection.put(url, json=recipe) try: r.raise_for_status() except: print(r.text) raise return recipe['id'] else: self.logger.debug("Creating new recipe: " + json.dumps(recipe)) url = '%(base_url)s/recipe/json' % { 'base_url': self.base_url } r = self.gbdx_connection.post(url, json=recipe) try: r.raise_for_status() except: print(r.text) raise recipe_json = r.json() return recipe_json['id']
Saves an AnswerFactory Recipe Args: recipe (dict): Dictionary specifying a recipe Returns: AnswerFactory Recipe id
juraj-google-style
def _inf_or_operator_handler_factory(c_start, is_delegate=True): @coroutine def inf_or_operator_handler(c, ctx): next_ctx = None if not is_delegate: ctx.value.append(c_start) c, self = yield else: assert ctx.value[0] == c_start assert c not in _DIGITS ctx.queue.unread(c) next_ctx = ctx _, self = yield assert c == _ maybe_inf = True ctx.set_ion_type(IonType.FLOAT) match_index = 0 trans = ctx.immediate_transition(self) while True: if maybe_inf: if match_index < len(_INF_SUFFIX): maybe_inf = c == _INF_SUFFIX[match_index] else: if _ends_value(c) or (ctx.container.ion_type is IonType.SEXP and c in _OPERATORS): yield ctx.event_transition( IonEvent, IonEventType.SCALAR, IonType.FLOAT, c_start == _MINUS and _NEG_INF or _POS_INF ) else: maybe_inf = False if maybe_inf: match_index += 1 else: ctx.set_unicode() if match_index > 0: next_ctx = ctx.derive_child_context(ctx.whence) for ch in _INF_SUFFIX[0:match_index]: next_ctx.value.append(ch) break c, self = yield trans if ctx.container is not _C_SEXP: _illegal_character(c, next_ctx is None and ctx or next_ctx, 'Illegal character following %s.' % (_chr(c_start),)) if match_index == 0: if c in _OPERATORS: yield ctx.immediate_transition(_operator_symbol_handler(c, ctx)) yield ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, ctx.value.as_symbol()) yield _CompositeTransition( ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, ctx.value.as_symbol()), ctx, partial(_unquoted_symbol_handler, c), next_ctx ) return inf_or_operator_handler
Generates handler co-routines for values that may be `+inf` or `-inf`. Args: c_start (int): The ordinal of the character that starts this token (either `+` or `-`). is_delegate (bool): True if a different handler began processing this token; otherwise, False. This will only be true for `-inf`, because it is not the only value that can start with `-`; `+inf` is the only value (outside of a s-expression) that can start with `+`.
juraj-google-style
def _create_filters(col_params, extractors): result = [] for col_param, extractor in zip(col_params, extractors): a_filter = _create_filter(col_param, extractor) if a_filter: result.append(a_filter) return result
Creates filters for the given col_params. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. extractors: list of extractor functions of the same length as col_params. Each element should extract the column described by the corresponding element of col_params. Returns: A list of filter functions. Each corresponding to a single col_params.filter oneof field of _request
juraj-google-style
def add(x1, x2): if any_symbolic_tensors((x1, x2)): return Add().symbolic_call(x1, x2) return backend.numpy.add(x1, x2)
Add arguments element-wise. Args: x1: First input tensor. x2: Second input tensor. Returns: The tensor containing the element-wise sum of `x1` and `x2`. Examples: >>> x1 = keras.ops.convert_to_tensor([1, 4]) >>> x2 = keras.ops.convert_to_tensor([5, 6]) >>> keras.ops.add(x1, x2) array([6, 10], dtype=int32) `keras.ops.add` also broadcasts shapes: >>> x1 = keras.ops.convert_to_tensor( ... [[5, 4], ... [5, 6]] ... ) >>> x2 = keras.ops.convert_to_tensor([5, 6]) >>> keras.ops.add(x1, x2) array([[10 10] [10 12]], shape=(2, 2), dtype=int32)
github-repos
def _buckets_nearly_equal(a_dist, b_dist): (a_type, a_buckets) = _detect_bucket_option(a_dist) (b_type, b_buckets) = _detect_bucket_option(b_dist) if (a_type != b_type): return False elif (a_type == u'linearBuckets'): return _linear_buckets_nearly_equal(a_buckets, b_buckets) elif (a_type == u'exponentialBuckets'): return _exponential_buckets_nearly_equal(a_buckets, b_buckets) elif (a_type == u'explicitBuckets'): return _explicit_buckets_nearly_equal(a_buckets, b_buckets) else: return False
Determines whether two `Distributions` are nearly equal. Args: a_dist (:class:`Distribution`): an instance b_dist (:class:`Distribution`): another instance Return: boolean: `True` if the two instances are approximately equal, otherwise False
codesearchnet
def handle_triple(self, lhs, relation, rhs): relation = relation.replace(':', '', 1) if self.is_relation_inverted(relation): (source, target, inverted) = (rhs, lhs, True) relation = self.invert_relation(relation) else: (source, target, inverted) = (lhs, rhs, False) source = _default_cast(source) target = _default_cast(target) if (relation == ''): relation = None return Triple(source, relation, target, inverted)
Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on relations * de-inverts all inverted relations * sets empty relations to `None` * casts numeric string sources and targets to their numeric types (e.g. float, int) Args: lhs: the left hand side of an observed triple relation: the triple relation (possibly inverted) rhs: the right hand side of an observed triple Returns: The processed (source, relation, target) triple. By default, it is returned as a Triple object.
codesearchnet
def get_http_raw(self, url=None, retry_count=3, headers=None, request_type='GET', form_data=None): if (headers is None): headers = {'Accept': 'text/html'} enc_form_data = None if form_data: enc_form_data = urlencode(form_data) try: enc_form_data = bytes(enc_form_data, encoding='ascii') except TypeError: pass try: log.debug('HTTP query for {0} at {1}'.format(self.address_str, url)) try: conn = Request(url=url, data=enc_form_data, headers=headers, **{'method': request_type}) except TypeError: conn = Request(url=url, data=enc_form_data, headers=headers) data = self.opener.open(conn, timeout=self.timeout) try: d = data.readall().decode('ascii', 'ignore') except AttributeError: d = data.read().decode('ascii', 'ignore') return str(d) except (URLError, socket.timeout, socket.error) as e: log.debug('HTTP query socket error: {0}'.format(e)) if (retry_count > 0): log.debug('HTTP query retrying (count: {0})'.format(str(retry_count))) return self.get_http_raw(url=url, retry_count=(retry_count - 1), headers=headers, request_type=request_type, form_data=form_data) else: raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url)) except HTTPLookupError as e: raise e except Exception: raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))
The function for retrieving a raw HTML result via HTTP. Args: url (:obj:`str`): The URL to retrieve (required). retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. headers (:obj:`dict`): The HTTP headers. The Accept header defaults to 'text/html'. request_type (:obj:`str`): Request type 'GET' or 'POST'. Defaults to 'GET'. form_data (:obj:`dict`): Optional form POST data. Returns: str: The raw data. Raises: HTTPLookupError: The HTTP lookup failed.
codesearchnet
def get_json_files(files, recursive=False): json_files = [] if not files: return json_files for fn in files: if os.path.isdir(fn): children = list_json_files(fn, recursive) json_files.extend(children) elif is_json(fn): json_files.append(fn) else: continue if not json_files: raise NoJSONFileFoundError("No JSON files found!") return json_files
Return a list of files to validate from `files`. If a member of `files` is a directory, its children with a ``.json`` extension will be added to the return value. Args: files: A list of file paths and/or directory paths. recursive: If ``true``, this will descend into any subdirectories of input directories. Returns: A list of file paths to validate.
juraj-google-style
def _GetLinkedPath(self, event): if hasattr(event, 'local_path'): return event.local_path if hasattr(event, 'network_path'): return event.network_path if hasattr(event, 'relative_path'): paths = [] if hasattr(event, 'working_directory'): paths.append(event.working_directory) paths.append(event.relative_path) return '\\'.join(paths) return 'Unknown'
Determines the linked path. Args: event (EventObject): event that contains a linked path. Returns: str: linked path.
codesearchnet
def query_google(point, max_distance, key): if (not key): return [] if from_cache(GG_CACHE, point, max_distance): return from_cache(GG_CACHE, point, max_distance) req = requests.get((GOOGLE_PLACES_URL % (point.lat, point.lon, max_distance, key))) if (req.status_code != 200): return [] response = req.json() results = response['results'] final_results = [] for local in results: final_results.append({'label': local['name'], 'distance': Point(local['geometry']['location']['lat'], local['geometry']['location']['lng'], None).distance(point), 'types': local['types'], 'suggestion_type': 'GOOGLE'}) google_insert_cache(point, final_results) return final_results
Queries google maps API for a location Args: point (:obj:`Point`): Point location to query max_distance (float): Search radius, in meters key (str): Valid google maps api key Returns: :obj:`list` of :obj:`dict`: List of locations with the following format: { 'label': 'Coffee house', 'types': 'Commerce', 'suggestion_type': 'GOOGLE' }
codesearchnet
def find_proxy_plugin(component, plugin_name): reg = ComponentRegistry() plugins = reg.load_extensions('iotile.proxy_plugin', comp_filter=component, class_filter=TileBusProxyPlugin, product_name='proxy_plugin') for (_name, plugin) in plugins: if (plugin.__name__ == plugin_name): return plugin raise DataError('Could not find proxy plugin module in registered components or installed distributions', component=component, name=plugin_name)
Attempt to find a proxy plugin provided by a specific component Args: component (string): The name of the component that provides the plugin plugin_name (string): The name of the plugin to load Returns: TileBuxProxyPlugin: The plugin, if found, otherwise raises DataError
codesearchnet
def create_alias(target_path, alias_path): if ((platform.system() == 'Windows') and (not alias_path.endswith('.lnk'))): alias_path += '.lnk' if os.path.lexists(alias_path): os.remove(alias_path) if (platform.system() == 'Windows'): from win32com import client shell = client.Dispatch('WScript.Shell') shortcut = shell.CreateShortCut(alias_path) shortcut.Targetpath = target_path shortcut.save() else: os.symlink(target_path, alias_path)
Creates an alias at 'alias_path' pointing to the file 'target_path'. On Unix, this is implemented via symlink. On Windows, this is done by creating a Windows shortcut file. Args: target_path: Destination path that the alias should point to. alias_path: Path at which to create the new alias.
codesearchnet
def from_scf_task(cls, scf_task, ddk_tolerance=None, manager=None): if (not isinstance(scf_task, ScfTask)): raise TypeError(('task `%s` does not inherit from ScfTask' % scf_task)) new = cls(manager=manager) multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance) ddk_tasks = [] for ddk_inp in multi_ddk: ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: 'WFK'}) ddk_tasks.append(ddk_task) multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False) dde_tasks = [] dde_deps = {ddk_task: 'DDK' for ddk_task in ddk_tasks} dde_deps.update({scf_task: 'WFK'}) for dde_inp in multi_dde: dde_task = new.register_dde_task(dde_inp, deps=dde_deps) dde_tasks.append(dde_task) dte_deps = {scf_task: 'WFK DEN'} dte_deps.update({dde_task: '1WF 1DEN' for dde_task in dde_tasks}) multi_dte = scf_task.input.make_dte_inputs() dte_tasks = [] for dte_inp in multi_dte: dte_task = new.register_dte_task(dte_inp, deps=dte_deps) dte_tasks.append(dte_task) return new
Build a DteWork from a ground-state task. Args: scf_task: ScfTask object. ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default. manager: :class:`TaskManager` object.
codesearchnet
def __init__(self, action, chunk_size=None): chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE self._action = action self._streamer = streaming.Streamer(chunk_size=chunk_size)
Initializes the uploader. Args: action: A parent action that creates the uploader. Used to communicate with the parent flow. chunk_size: A number of (uncompressed) bytes per a chunk.
juraj-google-style
def shift(self, time: int) -> 'Interval': return Interval(self._begin + time, self._end + time)
Return a new interval shifted by `time` from self Args: time: time to be shifted Returns: Interval: interval shifted by `time`
juraj-google-style
def explicit(fixed_qubits: Iterable[raw_types.Qid], fallback: Optional['QubitOrder']=None) -> 'QubitOrder': result = tuple(fixed_qubits) if (len(set(result)) < len(result)): raise ValueError('Qubits appear in fixed_order twice: {}.'.format(result)) def func(qubits): remaining = (set(qubits) - set(fixed_qubits)) if (not remaining): return result if (not fallback): raise ValueError('Unexpected extra qubits: {}.'.format(remaining)) return (result + fallback.order_for(remaining)) return QubitOrder(func)
A basis that contains exactly the given qubits in the given order. Args: fixed_qubits: The qubits in basis order. fallback: A fallback order to use for extra qubits not in the fixed_qubits list. Extra qubits will always come after the fixed_qubits, but will be ordered based on the fallback. If no fallback is specified, a ValueError is raised when extra qubits are specified. Returns: A Basis instance that forces the given qubits in the given order.
codesearchnet
def compare_modules(file_, imports): modules = parse_requirements(file_) imports = [imports[i]['name'] for i in range(len(imports))] modules = [modules[i]['name'] for i in range(len(modules))] modules_not_imported = (set(modules) - set(imports)) return modules_not_imported
Compare modules in a file to imported modules in a project. Args: file_ (str): File to parse for modules to be compared. imports (tuple): Modules being imported in the project. Returns: tuple: The modules not imported in the project, but do exist in the specified file.
codesearchnet
def __init__(self, funcs, trackable_obj=None): super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)
Constructor for TFLiteConverter. Args: funcs: List of TensorFlow ConcreteFunctions. The list should not contain duplicate elements. trackable_obj: tf.AutoTrackable object associated with `funcs`. A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. `from_saved_model`).
github-repos
def __parse_tostr(self, text, **kwargs): n = self.options.get('nbest', 1) if self._KW_BOUNDARY in kwargs: patt = kwargs.get(self._KW_BOUNDARY, '.') tokens = list(self.__split_pattern(text, patt)) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) for (token, match) in tokens: bpos += 1 if match: mark = self.MECAB_INSIDE_TOKEN else: mark = self.MECAB_ANY_BOUNDARY for _ in range(1, len(self.__str2bytes(token))): self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, mark) bpos += 1 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) elif self._KW_FEATURE in kwargs: features = kwargs.get(self._KW_FEATURE, ()) fd = {morph: self.__str2bytes(feat) for morph, feat in features} tokens = self.__split_features(text, [e[0] for e in features]) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 for chunk, match in tokens: c = len(self.__str2bytes(chunk)) if match == True: self.__mecab.mecab_lattice_set_feature_constraint( self.lattice, bpos, bpos+c, fd[chunk]) bpos += c else: btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) self.__mecab.mecab_parse_lattice(self.tagger, self.lattice) if n > 1: res = self.__mecab.mecab_lattice_nbest_tostr(self.lattice, n) else: res = self.__mecab.mecab_lattice_tostr(self.lattice) if res != self.__ffi.NULL: raw = self.__ffi.string(res) return self.__bytes2str(raw).strip() else: err = self.__mecab.mecab_lattice_strerror(self.lattice) logger.error(self.__bytes2str(self.__ffi.string(err))) raise MeCabError(self.__bytes2str(self.__ffi.string(err)))
Builds and returns the MeCab function for parsing Unicode text. Args: fn_name: MeCab function name that determines the function behavior, either 'mecab_sparse_tostr' or 'mecab_nbest_sparse_tostr'. Returns: A function definition, tailored to parsing Unicode text and returning the result as a string suitable for display on stdout, using either the default or N-best behavior.
juraj-google-style
def pytest_terminal_summary_main(tr, id): from _pytest.config import create_terminal_writer if not len(id): id = 'tests' config = tr.config orig_writer = config.get_terminal_writer() orig_tbstyle = config.option.tbstyle orig_reportchars = tr.reportchars dir = f'reports/{id}' Path(dir).mkdir(parents=True, exist_ok=True) report_files = {k: f'{dir}/{k}.txt' for k in ['durations', 'errors', 'failures_long', 'failures_short', 'failures_line', 'passes', 'stats', 'summary_short', 'warnings']} dlist = [] for replist in tr.stats.values(): for rep in replist: if hasattr(rep, 'duration'): dlist.append(rep) if dlist: dlist.sort(key=lambda x: x.duration, reverse=True) with open(report_files['durations'], 'w') as f: durations_min = 0.05 f.write('slowest durations\n') for i, rep in enumerate(dlist): if rep.duration < durations_min: f.write(f'{len(dlist) - i} durations < {durations_min} secs were omitted') break f.write(f'{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n') def summary_failures_short(tr): reports = tr.getreports('failed') if not reports: return tr.write_sep('=', 'FAILURES SHORT STACK') for rep in reports: msg = tr._getfailureheadline(rep) tr.write_sep('_', msg, red=True, bold=True) longrepr = re.sub('.*_ _ _ (_ ){10,}_ _ ', '', rep.longreprtext, 0, re.M | re.S) tr._tw.line(longrepr) config.option.tbstyle = 'auto' with open(report_files['failures_long'], 'w') as f: tr._tw = create_terminal_writer(config, f) tr.summary_failures() with open(report_files['failures_short'], 'w') as f: tr._tw = create_terminal_writer(config, f) summary_failures_short(tr) config.option.tbstyle = 'line' with open(report_files['failures_line'], 'w') as f: tr._tw = create_terminal_writer(config, f) tr.summary_failures() with open(report_files['errors'], 'w') as f: tr._tw = create_terminal_writer(config, f) tr.summary_errors() with open(report_files['warnings'], 'w') as f: tr._tw = create_terminal_writer(config, f) tr.summary_warnings() tr.summary_warnings() tr.reportchars = 'wPpsxXEf' with open(report_files['summary_short'], 'w') as f: tr._tw = create_terminal_writer(config, f) tr.short_test_summary() with open(report_files['stats'], 'w') as f: tr._tw = create_terminal_writer(config, f) tr.summary_stats() tr._tw = orig_writer tr.reportchars = orig_reportchars config.option.tbstyle = orig_tbstyle
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current directory. The report files are prefixed with the test suite name. This function emulates --duration and -rA pytest arguments. This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined there. Args: - tr: `terminalreporter` passed from `conftest.py` - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-` plugins and interfere.
github-repos
def with_claims(self, additional_claims): new_additional_claims = copy.deepcopy(self._additional_claims) new_additional_claims.update(additional_claims or {}) return self.__class__( self._signer, service_account_email=self._service_account_email, scopes=self._scopes, token_uri=self._token_uri, subject=self._subject, project_id=self._project_id, additional_claims=new_additional_claims)
Returns a copy of these credentials with modified claims. Args: additional_claims (Mapping[str, str]): Any additional claims for the JWT payload. This will be merged with the current additional claims. Returns: google.auth.service_account.Credentials: A new credentials instance.
juraj-google-style
def add_variable(self, feature_column, var): del feature_column, var raise NotImplementedError('StateManager.add_variable')
Adds an existing variable to the state. Args: feature_column: A `FeatureColumn` object to associate this variable with. var: The variable.
github-repos
def create_exception_by_name(name, detailCode='0', description='', traceInformation=None, identifier=None, nodeId=None): try: dataone_exception = globals()[name] except LookupError: dataone_exception = ServiceFailure return dataone_exception(detailCode, description, traceInformation, identifier, nodeId)
Create a DataONEException based object by name. Args: name: str The type name of a DataONE Exception. E.g. NotFound. If an unknown type name is used, it is automatically set to ServiceFailure. As the XML Schema for DataONE Exceptions does not restrict the type names, this may occur when deserializing an exception not defined by DataONE. detailCode: int Optional index into a table of predefined error conditions. See Also: For remaining args, see: ``DataONEException()``
codesearchnet
def evaluate(conditions, leaf_evaluator): if isinstance(conditions, list): if conditions[0] in list(EVALUATORS_BY_OPERATOR_TYPE.keys()): return EVALUATORS_BY_OPERATOR_TYPE[conditions[0]](conditions[1:], leaf_evaluator) else: return EVALUATORS_BY_OPERATOR_TYPE[ConditionOperatorTypes.OR](conditions, leaf_evaluator) leaf_condition = conditions return leaf_evaluator(leaf_condition)
Top level method to evaluate conditions. Args: conditions: Nested array of and/or conditions, or a single leaf condition value of any type. Example: ['and', '0', ['or', '1', '2']] leaf_evaluator: Function which will be called to evaluate leaf condition values. Returns: Boolean: Result of evaluating the conditions using the operator rules and the leaf evaluator. None: if conditions couldn't be evaluated.
juraj-google-style
def optional(name, default) -> 'Wildcard': return Wildcard(min_count=1, fixed_size=True, variable_name=name, optional=default)
Create a `Wildcard` that matches a single argument with a default value. If the wildcard does not match, the substitution will contain the default value instead. Args: name: The name for the wildcard. default: The default value of the wildcard. Returns: A n optional wildcard.
codesearchnet
def select(self, selector): if self._is_single_string_selector(selector, 'name'): return self._all_models_by_name.get_all(selector['name']) else: return find(self._all_models.values(), selector)
Query this document for objects that match the given selector. Args: selector (JSON-like query dictionary) : you can query by type or by name, e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}`` Returns: seq[Model]
juraj-google-style
def _extract_type_spec_recursively(value): if isinstance(value, composite_tensor.CompositeTensor): return value._type_spec if isinstance(value, variables.Variable): return resource_variable_ops.VariableSpec(value.shape, dtype=value.dtype, trainable=value.trainable) if tensor_util.is_tensor(value): return tensor_spec.TensorSpec(value.shape, value.dtype) if isinstance(value, list): return list((_extract_type_spec_recursively(v) for v in value)) if isinstance(value, data_structures.TrackableDataStructure): return _extract_type_spec_recursively(value.__wrapped__) if isinstance(value, tuple): return type(value)((_extract_type_spec_recursively(x) for x in value)) if isinstance(value, dict): return type(value)(((k, _extract_type_spec_recursively(v)) for k, v in value.items())) return value
Return (collection of) `TypeSpec`(s) for `value` if it includes `Tensor`s. If `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If `value` is a collection containing `Tensor` values, recursively supplant them with their respective `TypeSpec`s in a collection of parallel stucture. If `value` is none of the above, return it unchanged. Args: value: a Python `object` to (possibly) turn into a (collection of) `tf.TypeSpec`(s). Returns: spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value` or `value`, if no `Tensor`s are found.
github-repos
def track_event(self, name, properties=None, measurements=None): data = channel.contracts.EventData() data.name = (name or NULL_CONSTANT_STRING) if properties: data.properties = properties if measurements: data.measurements = measurements self.track(data, self._context)
Send information about a single event that has occurred in the context of the application. Args: name (str). the data to associate to this event.\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
codesearchnet
def head(self, n=10): r = self.__repr__().split('\n') print('\n'.join(r[:n]), end=' ')
Display the top of the file. Args: n (int): Number of lines to display
juraj-google-style
def __getitem__(self, key): if key in self._layout_map: return self._layout_map[key] matching_keys = [] for k in self._layout_map: if re.search(k, key): matching_keys.append(k) if len(matching_keys) > 1: raise ValueError(f"Path '{key}' matches multiple layout specification keys: {matching_keys}. Please make sure each tensor/variable path only matches at most one layout specification key in the LayoutMap.") elif len(matching_keys) == 1: return self._layout_map[matching_keys[0]] return None
Retrieves the corresponding layout by the string key. When there isn't an exact match, all the existing keys in the layout map will be treated as a regex and map against the input key again. When there are multiple matches for the regex, an `ValueError` will be raised. Returns `None` if there isn't any match found. Args: key: String key to query a layout. Returns: Corresponding layout based on the query.
github-repos
def run(self, test_config, ref_dir, tmp_dir, mode, heartbeat=None, num_attempts=0): assert 'name' in test_config name = test_config['name'] if 'ref' in test_config: assert 'run' in test_config arm_config = { 'name': name } if mode == 'test': arm_config.update(test_config['run']) elif mode == 'update': arm_config.update(test_config['ref']) test_config = arm_config assert 'url' in test_config test_dir = tempfile.mkdtemp(dir=tmp_dir) log_file = os.path.join(test_dir, 'log.txt') output_path = os.path.join(test_dir, 'screenshot.png') logging.info('Test config:\n%s', json.dumps(test_config, indent=2)) capture_config = copy.deepcopy(test_config.get('config', {})) capture_config['targetUrl'] = test_config['url'] config_file = os.path.join(test_dir, 'config.json') json.dump(capture_config, open(config_file, 'w'), indent=2) ref_path = os.path.join(ref_dir, '%s.png' % name) if mode == 'test': assert os.path.exists(ref_path), ( 'Reference image %s does not exist. ' 'Try running in update mode.' % ref_path) elif mode == 'update': output_path = ref_path ref_path = None else: raise ValueError('Invalid mode %s' % mode) class NamedHeartbeat(workers.WorkflowItem): def run(self, message): yield heartbeat('%s: %s' % (name, message)) try: yield CaptureAndDiffWorkflowItem( name, log_file, config_file, output_path, ref_path, heartbeat=NamedHeartbeat) except capture_worker.CaptureFailedError, e: if num_attempts >= e.max_attempts: yield heartbeat('Unable to capture screenshot after %d tries.' % num_attempts) raise e else: num_attempts += 1 yield heartbeat('Capture failed, retrying (%d)' % num_attempts) yield OneTestWorkflowItem(test_config, ref_dir, tmp_dir, mode, heartbeat=heartbeat, num_attempts=num_attempts)
Build a CaptureAndDiffWorkflowItem for a test. Args: test_config: See test.yaml for structure of test_config. Returns: A CaptureAndDiffWorkflowItem
juraj-google-style