code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, dfk, memoize=True, checkpoint={}): self.dfk = dfk self.memoize = memoize if self.memoize: logger.info("App caching initialized") self.memo_lookup_table = checkpoint else: logger.info("App caching disabled for all apps") self.memo_lookup_table = {}
Initialize the memoizer. Args: - dfk (DFK obj): The DFK object KWargs: - memoize (Bool): enable memoization or not. - checkpoint (Dict): A checkpoint loaded as a dict.
juraj-google-style
def fields_equal(self, instance, fields_to_ignore=('id', 'change_date', 'changed_by')): for field in self._meta.get_fields(): if ((not field.many_to_many) and (field.name not in fields_to_ignore)): if (getattr(instance, field.name) != getattr(self, field.name)): return False return True
Compares this instance's fields to the supplied instance to test for equality. This will ignore any fields in `fields_to_ignore`. Note that this method ignores many-to-many fields. Args: instance: the model instance to compare fields_to_ignore: List of fields that should not be compared for equality. By default includes `id`, `change_date`, and `changed_by`. Returns: True if the checked fields are all equivalent, else False
codesearchnet
def compare_forks(self, cur_fork_head, new_fork_head): if new_fork_head.consensus != b"Devmode": raise \ TypeError( 'New fork head {} is not a DevMode block'.format( new_fork_head.identifier[:8])) if cur_fork_head.consensus != b"Devmode": if new_fork_head.previous_block_id == cur_fork_head.identifier: LOGGER.info( 'Choose new fork %s: New fork head switches consensus to ' 'DevMode', new_fork_head.identifier[:8]) return True raise \ TypeError( 'Trying to compare a DevMode block {} to a non-DevMode ' 'block {} that is not the direct predecessor'.format( new_fork_head.identifier[:8], cur_fork_head.identifier[:8])) if new_fork_head.block_num == cur_fork_head.block_num: cur_fork_hash = self.hash_signer_public_key( cur_fork_head.header.signer_public_key, cur_fork_head.header.previous_block_id) new_fork_hash = self.hash_signer_public_key( new_fork_head.header.signer_public_key, new_fork_head.header.previous_block_id) result = new_fork_hash < cur_fork_hash else: result = new_fork_head.block_num > cur_fork_head.block_num return result
The longest chain is selected. If they are equal, then the hash value of the previous block id and publisher signature is computed. The lowest result value is the winning block. Args: cur_fork_head: The current head of the block chain. new_fork_head: The head of the fork that is being evaluated. Returns: bool: True if choosing the new chain head, False if choosing the current chain head.
juraj-google-style
def from_node(cls, node): if not isinstance(node, aioxmpp.stanza.Message): raise AttributeError("node must be a aioxmpp.stanza.Message instance") msg = cls() msg._to = node.to msg._sender = node.from_ if None in node.body: msg.body = node.body[None] else: for key in node.body.keys(): msg.body = node.body[key] break for data in node.xep0004_data: if data.title == SPADE_X_METADATA: for field in data.fields: if field.var != "_thread_node": msg.set_metadata(field.var, field.values[0]) else: msg.thread = field.values[0] return msg
Creates a new spade.message.Message from an aixoxmpp.stanza.Message Args: node (aioxmpp.stanza.Message): an aioxmpp Message Returns: spade.message.Message: a new spade Message
juraj-google-style
def backward(ctx, grad_output): args = ctx.saved_tensors grad_fn = ctx.grad_fn if grad_fn is None: raise ValueError('grad_fn must be provided for custom gradient') grads = grad_fn(*args, upstream=grad_output) if not isinstance(grads, tuple): grads = (grads,) return (None,) + grads
Backward pass computation specification. Args: ctx: Context object. grad_output: Gradient with respect to the output.
github-repos
def _tensor_product(self, other, reverse=False): if not isinstance(other, Stinespring): other = Stinespring(other) sa_l, sa_r = self._data sb_l, sb_r = other._data din_a, dout_a = self.dim din_b, dout_b = other.dim dtr_a = sa_l.shape[0] dtr_b = sb_l.shape[0] if reverse: shape_in = (dout_b, dtr_b, dout_a, dtr_a, din_b * din_a) shape_out = (dout_b * dtr_b * dout_a * dtr_a, din_b * din_a) else: shape_in = (dout_a, dtr_a, dout_b, dtr_b, din_a * din_b) shape_out = (dout_a * dtr_a * dout_b * dtr_b, din_a * din_b) if reverse: input_dims = self.input_dims() + other.input_dims() output_dims = self.output_dims() + other.output_dims() sab_l = np.kron(sb_l, sa_l) else: input_dims = other.input_dims() + self.input_dims() output_dims = other.output_dims() + self.output_dims() sab_l = np.kron(sa_l, sb_l) sab_l = np.reshape( np.transpose(np.reshape(sab_l, shape_in), (0, 2, 1, 3, 4)), shape_out) if sa_r is None and sb_r is None: sab_r = None else: if sa_r is None: sa_r = sa_l elif sb_r is None: sb_r = sb_l if reverse: sab_r = np.kron(sb_r, sa_r) else: sab_r = np.kron(sa_r, sb_r) sab_r = np.reshape( np.transpose(np.reshape(sab_r, shape_in), (0, 2, 1, 3, 4)), shape_out) return Stinespring((sab_l, sab_r), input_dims, output_dims)
Return the tensor product channel. Args: other (QuantumChannel): a quantum channel subclass. reverse (bool): If False return self ⊗ other, if True return if True return (other ⊗ self) [Default: False] Returns: Stinespring: the tensor product channel as a Stinespring object. Raises: QiskitError: if other cannot be converted to a channel.
juraj-google-style
def _controller_name(self, objtype): if objtype.endswith('y'): return (objtype[:(- 1)] + 'ies') if ((objtype[(- 1)] in 'sx') or (objtype[(- 2):] in ['sh', 'ch'])): return (objtype + 'es') if objtype.endswith('an'): return (objtype[:(- 2)] + 'en') return (objtype + 's')
Determines the controller name for the object's type Args: objtype (str): The object type Returns: A string with the controller name
codesearchnet
def select_cross_device_ops(devices, session_config=None): requested_devices = set((device_util.canonicalize(d) for d in devices)) if ops.executing_eagerly_outside_functions(): logical_gpus = context.context().list_logical_devices(device_type='GPU') physical_gpus = context.context().list_physical_devices(device_type='GPU') if len(logical_gpus) != len(physical_gpus): logging.warning('NCCL is not supported when using virtual GPUs, fallingback to reduction to one device') return ReductionToOneDevice() machine_devices = context.context().list_logical_devices() else: machine_devices = device_lib.list_local_devices(session_config=session_config) using_devices = set() for d in machine_devices: if device_util.canonicalize(d.name) in requested_devices: using_devices.add(d.name) if len(using_devices) != len(requested_devices): logging.warning('Some requested devices in `tf.distribute.Strategy` are not visible to TensorFlow: %s', ','.join(list(requested_devices - using_devices))) if any(('gpu' not in d.lower() for d in requested_devices)): logging.warning('There are non-GPU devices in `tf.distribute.Strategy`, not using nccl allreduce.') return ReductionToOneDevice() if kernels.get_registered_kernels_for_op('NcclAllReduce'): return NcclAllReduce(num_packs=1) else: logging.warning('Nccl kernel is not found, not using nccl allreduce.') return ReductionToOneDevice()
Find the best `CrossDeviceOps` locally given a `tf.compat.v1.ConfigProto`. Args: devices: a list of devices passed to `tf.distribute.Strategy`. session_config: a `tf.compat.v1.ConfigProto` or `None`. If `None`, it will make decision based on all logical devices. Returns: A subclass of `CrossDeviceOps`.
github-repos
def trace_save_and_restore(obj): legacy_name = saveable_compat.get_saveable_name(obj) obj_save_fn = obj._serialize_to_tensors obj_restore_fn = obj._restore_from_tensors if isinstance(obj_save_fn, defun.ConcreteFunction): concrete_save = obj_save_fn else: @def_function.function def save_fn(): tensor_dict = obj_save_fn() if any((isinstance(v, tensor_callable.Callable) for v in tensor_dict.values())): raise NotImplementedError(f'Unable to export SavedModel with object of type {type(obj)} because it returns a Callable in `_serialize_to_tensors`. If you need this functionality please file a feature request.') if legacy_name: return {f'{legacy_name}{key}': value for key, value in tensor_dict.items()} return tensor_dict concrete_save = save_fn.get_concrete_function() if isinstance(obj_restore_fn, defun.ConcreteFunction): concrete_restore = obj_restore_fn else: @def_function.function def restore_fn(restored_tensors): if legacy_name: restored_tensors = {key[len(legacy_name):]: value for key, value in restored_tensors.items()} obj_restore_fn(restored_tensors) concrete_restore = restore_fn.get_concrete_function(concrete_save.structured_outputs) return (concrete_save, concrete_restore)
Traces `Trackable` serialize- and restore-from-tensors functions. Args: obj: A `Trackable` object. Returns: A concrete Function.
github-repos
def get_exception_handlers(node: astroid.node_classes.NodeNG, exception=Exception) -> List[astroid.ExceptHandler]: context = find_try_except_wrapper_node(node) if isinstance(context, astroid.TryExcept): return [handler for handler in context.handlers if error_of_type(handler, exception)] return None
Return the collections of handlers handling the exception in arguments. Args: node (astroid.NodeNG): A node that is potentially wrapped in a try except. exception (builtin.Exception or str): exception or name of the exception. Returns: list: the collection of handlers that are handling the exception or None.
codesearchnet
def _validate_alias_file_path(alias_file_path): if (not os.path.exists(alias_file_path)): raise CLIError(ALIAS_FILE_NOT_FOUND_ERROR) if os.path.isdir(alias_file_path): raise CLIError(ALIAS_FILE_DIR_ERROR.format(alias_file_path))
Make sure the alias file path is neither non-existant nor a directory Args: The alias file path to import aliases from.
codesearchnet
def unpack(self, buff, offset=0): super().unpack(buff, offset) self.wildcards = UBInt32(value=FlowWildCards.OFPFW_ALL, enum_ref=FlowWildCards) self.wildcards.unpack(buff, offset)
Unpack *buff* into this object. Do nothing, since the _length is already defined and it is just a Pad. Keep buff and offset just for compability with other unpack methods. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails.
juraj-google-style
def print_solution(model, solver): model_proto = model.Proto() response_proto = solver.ResponseProto() variables_in_objective_map = {} maximization = False if model_proto.HasField('objective'): objective = model_proto.objective for i in range(len(objective.vars)): variables_in_objective_map[objective.vars[i]] = objective.coeffs[i] if objective.scaling_factor < 0.0: maximization = True variable_assignments = [] variables_in_objective = [] num_vars = len(model_proto.variables) for var_index in range(num_vars): if not model_proto.variables[var_index].name: continue variable_name = model_proto.variables[var_index].name if var_index in variables_in_objective_map: coefficient = variables_in_objective_map[var_index] if coefficient: if maximization: coefficient *= -1 if coefficient < 0: variables_in_objective.append(' - {} * {}'.format( -coefficient, variable_name)) elif coefficient > 0: variables_in_objective.append(' + {} * {}'.format( coefficient, variable_name)) variable_assignments.append(' {} = {}\n'.format( variable_name, response_proto.solution[var_index])) print(''.join(variable_assignments), end='') if variables_in_objective and variables_in_objective[0][1] == '+': variables_in_objective[0] = variables_in_objective[0][2:] print('{}:{}'.format('Maximize' if maximization else 'Minimize', ''.join(variables_in_objective))) print('Objective value: {}\n'.format(solver.ObjectiveValue()))
Prints the solution associated with solver. If solver has already had Solve() called on it, prints the solution. This includes each variable and its assignment, along with the objective function and its optimal value. If solver has not had Solve() called on it, or there is no feasible solution, this will probably crash. Args: model: A pywrapcp.CpModel object. solver: A pywrapcp.CpSolver object. Returns: Nothing, but prints the solution associated with solver.
juraj-google-style
def get_field_to_observations_map(generator, query_for_tag=''): def increment(stat, event, tag=''): assert stat in TRACKED_FIELDS field_to_obs[stat].append(Observation(step=event.step, wall_time=event.wall_time, tag=tag)._asdict()) field_to_obs = dict([(t, []) for t in TRACKED_FIELDS]) for event in generator: if event.HasField('graph_def') and (not query_for_tag): increment('graph', event) if event.HasField('session_log') and (not query_for_tag): status = event.session_log.status if status == event_pb2.SessionLog.START: increment('sessionlog:start', event) elif status == event_pb2.SessionLog.STOP: increment('sessionlog:stop', event) elif status == event_pb2.SessionLog.CHECKPOINT: increment('sessionlog:checkpoint', event) elif event.HasField('summary'): for value in event.summary.value: if query_for_tag and value.tag != query_for_tag: continue for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items(): if value.HasField(proto_name): increment(display_name, event, value.tag) return field_to_obs
Return a field to `Observations` dict for the event generator. Args: generator: A generator over event protos. query_for_tag: A string that if specified, only create observations for events with this tag name. Returns: A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
juraj-google-style
def _on_report(self, report, connection_id): self._logger.info('Received report: %s', str(report)) self._trigger_callback('on_report', connection_id, report) return False
Callback function called when a report has been processed. Args: report (IOTileReport): The report object connection_id (int): The connection id related to this report Returns: - True to indicate that IOTileReportParser should also keep a copy of the report or False to indicate it should delete it.
juraj-google-style
def _MakeTimestamp(self, start=None, end=None): mysql_unsigned_bigint_max = 18446744073709551615 ts_start = int((start or 0)) if (end is None): ts_end = mysql_unsigned_bigint_max else: ts_end = int(end) if ((ts_start == 0) and (ts_end == mysql_unsigned_bigint_max)): return None else: return (ts_start, ts_end)
Create a timestamp using a start and end time. Args: start: Start timestamp. end: End timestamp. Returns: A tuple (start, end) of converted timestamps or None for all time.
codesearchnet
def filter_publication(publication): if settings.USE_DUP_FILTER: publication = dup_filter.filter_publication(publication) if publication and settings.USE_ALEPH_FILTER: publication = aleph_filter.filter_publication( publication, cmp_authors=settings.ALEPH_FILTER_BY_AUTHOR ) return publication
Filter :class:`.Publication` objects using settings declared in :mod:`~harvester.settings` submodule. Args: publication (obj): :class:`.Publication` instance. Returns: obj/None: None if the publication was found in Aleph or `publication` \ if not.
juraj-google-style
def CreateAdsWithCustomizations(client, adgroup_ids, feed_name): adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809') expanded_text_ad = { 'xsi_type': 'ExpandedTextAd', 'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name, 'headlinePart2': 'Only {=%s.Price}' % feed_name, 'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name, 'finalUrls': ['http: } operations = [{ 'operator': 'ADD', 'operand': { 'adGroupId': adgroup, 'ad': expanded_text_ad } } for adgroup in adgroup_ids] response = adgroup_ad_service.mutate(operations) if response and 'value' in response: for ad in response['value']: print ('Created an ad with ID "%s", type "%s", and status "%s".' % (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status'])) else: raise errors.GoogleAdsError('No ads were added.')
Creates ExpandedTextAds that use ad customizations for specified AdGroups. Args: client: an AdWordsClient instance. adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to. feed_name: the name of the feed used to apply customizations. Raises: GoogleAdsError: if no ExpandedTextAds were added.
juraj-google-style
def ClientCertFromCSR(cls, csr): builder = x509.CertificateBuilder() common_name = csr.GetCN() serial = int(common_name.split(".")[1], 16) builder = builder.serial_number(serial) builder = builder.subject_name( x509.Name( [x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))])) now = rdfvalue.RDFDatetime.Now() now_plus_year = now + rdfvalue.Duration("52w") builder = builder.not_valid_after(now_plus_year.AsDatetime()) now_minus_ten = now - rdfvalue.Duration("10s") builder = builder.not_valid_before(now_minus_ten.AsDatetime()) ca_cert = config_lib._CONFIG["CA.certificate"] builder = builder.issuer_name(ca_cert.GetIssuer()) builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey()) ca_key = config_lib._CONFIG["PrivateKeys.ca_key"] return RDFX509Cert( builder.sign( private_key=ca_key.GetRawPrivateKey(), algorithm=hashes.SHA256(), backend=openssl.backend))
Creates a new cert for the given common name. Args: csr: A CertificateSigningRequest. Returns: The signed cert.
juraj-google-style
def _set_value(instance_to_path_map, path_to_instance_map, prop_tree, config_instance): path = instance_to_path_map[config_instance] group = prop_tree for elem in path[:(- 1)]: group = getattr(group, elem) assert (group._key == config_instance.parent.key) setattr(group, config_instance.key, config_instance.value) term = getattr(group, config_instance.key) try: if hasattr(term, '_term'): term._term._config = config_instance return except KeyError: pass try: if hasattr(term, '_config'): term._config = config_instance return except KeyError: pass else: pass
Finds appropriate term in the prop_tree and sets its value from config_instance. Args: configs_map (dict): key is id of the config, value is Config instance (AKA cache of the configs) prop_tree (PropertyDictTree): poperty tree to populate. config_instance (Config):
codesearchnet
def get_day_of_month(datestring): get_day = re.compile('\\d{1,2}(st|nd|rd|th)?', re.IGNORECASE) day = get_day.search(datestring) the_day = None if day: if bool(re.search('[st|nd|rd|th]', day.group().lower())): the_day = day.group()[:(- 2)] else: the_day = day.group() if (int(the_day) < 10): the_day = add_zero(the_day) return str(the_day)
Transforms an ordinal number into plain number with padding zero. E.g. 3rd -> 03, or 12th -> 12 Keyword arguments: datestring -- a string Returns: String, or None if the transformation fails
codesearchnet
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): if ((self._last_header > header_path) and Match('^\\s* return False return True
Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order.
codesearchnet
def restore_server_connection(self, port=None): try: self.host_port = port self._make_connection() except Exception as e: self.log.error('Failed to re-connect to the server.') raise errors.ServerRestoreConnectionError(self._device, f'Failed to restore server connection for {self.package} at host port {self.host_port}, device port {self.device_port}.') from e self._proc = None self._restore_event_client()
Restores the server after the device got reconnected. Instead of creating a new instance of the client: - Uses the given port (or find a new available host port if none is given). - Tries to connect to the remote server with the selected port. Args: port: int, if given, this is the host port from which to connect to the remote device port. If not provided, find a new available port as host port. Raises: errors.ServerRestoreConnectionError: when failed to restore the connection to the snippet server.
github-repos
def get_attachment_data(cls, session, attachment_id): return cls( '/attachments/%d/data.json' % attachment_id, singleton=True, session=session, out_type=AttachmentData, )
Return a specific attachment's data. Args: session (requests.sessions.Session): Authenticated session. attachment_id (int): The ID of the attachment from which to get data. Returns: helpscout.models.AttachmentData: An attachment data singleton, if existing. Otherwise ``None``.
juraj-google-style
def recent(self, username, project, limit=1, offset=0, branch=None, status_filter=''): method = 'GET' if (branch is not None): url = '/project/{username}/{project}/tree/{branch}?circle-token={token}&limit={limit}&offset={offset}&filter={status_filter}'.format(username=username, project=project, branch=branch, token=self.client.api_token, limit=limit, offset=offset, status_filter=status_filter) else: url = '/project/{username}/{project}?circle-token={token}&limit={limit}&offset={offset}&filter={status_filter}'.format(username=username, project=project, token=self.client.api_token, limit=limit, offset=offset, status_filter=status_filter) json_data = self.client.request(method, url) return json_data
Return status of recent builds for given project. Retrieves build statuses for given project and branch. If branch is None it retrieves most recent build. Args: username (str): Name of the user. project (str): Name of the project. limit (int): Number of builds to return, default=1, max=100. offset (int): Returns builds starting from given offset. branch (str): Optional branch name as string. If specified only builds from given branch are returned. status_filter (str): Restricts which builds are returned. Set to "completed", "successful", "failed", "running", or defaults to no filter. Returns: A list of dictionaries with information about each build.
codesearchnet
def get_module(dir_path, relative_to_dir): dir_path = dir_path[len(relative_to_dir):] dir_path = dir_path.replace(os.sep, '/') return dir_path.replace('/', '.').strip('.')
Get module that corresponds to path relative to relative_to_dir. Args: dir_path: Path to directory. relative_to_dir: Get module relative to this directory. Returns: Name of module that corresponds to the given directory.
github-repos
def extract_tree_with(self, labels, suppress_unifurcations=True): return self.extract_tree(labels, False, suppress_unifurcations)
Extract a copy of this ``Tree`` with only the leaves labeled by the strings in ``labels`` Args: ``leaves`` (``set``): Set of leaf labels to include. ``suppress_unifurcations`` (``bool``): ``True`` to suppress unifurcations, otherwise ``False`` Returns: Tree: Copy of this Tree, including only the leaves labeled by the strings in ``labels``
juraj-google-style
def _fuse_awq_layernorm(fuse_module_names, module, target_cls): for module_name in fuse_module_names: if hasattr(module, module_name): old_module = getattr(module, module_name) module._modules[module_name] = target_cls(old_module.weight, old_module.variance_epsilon).to(old_module.weight.device) del old_module
Fuse the LayerNorm layers into a target class using autoawq Args: fuse_module_names (`List[str]`): The list of module names to fuse module (`nn.Module`): The pytorch parent module that has layernorm modules to fuse target_cls (`~autoawq.FasterTransformerRMSNorm`): The `FasterTransformerRMSNorm` class as it only supports that class for now.
github-repos
def process_dimensions(kdims, vdims): dimensions = {} for (group, dims) in [('kdims', kdims), ('vdims', vdims)]: if (dims is None): continue elif isinstance(dims, (tuple, basestring, Dimension, dict)): dims = [dims] elif (not isinstance(dims, list)): raise ValueError(('%s argument expects a Dimension or list of dimensions, specified as tuples, strings, dictionaries or Dimension instances, not a %s type. Ensure you passed the data as the first argument.' % (group, type(dims).__name__))) for dim in dims: if (not isinstance(dim, (tuple, basestring, Dimension, dict))): raise ValueError(('Dimensions must be defined as a tuple, string, dictionary or Dimension instance, found a %s type.' % type(dim).__name__)) dimensions[group] = [asdim(d) for d in dims] return dimensions
Converts kdims and vdims to Dimension objects. Args: kdims: List or single key dimension(s) specified as strings, tuples dicts or Dimension objects. vdims: List or single value dimension(s) specified as strings, tuples dicts or Dimension objects. Returns: Dictionary containing kdims and vdims converted to Dimension objects: {'kdims': [Dimension('x')], 'vdims': [Dimension('y')]
codesearchnet
def l2_regularizer(weight=1.0, scope=None): def regularizer(tensor): with tf.name_scope(scope, 'L2Regularizer', [tensor]): l2_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight') return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value') return regularizer
Define a L2 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function.
codesearchnet
def set_commissions(self, fn): self.commission_fn = fn for c in self._childrenv: if isinstance(c, StrategyBase): c.set_commissions(fn)
Set commission (transaction fee) function. Args: fn (fn(quantity, price)): Function used to determine commission amount.
juraj-google-style
def grow(self, times=1): self.nodes.append([]) for n, node in enumerate(self.nodes[self.age]): if self.age == 0: p_node = Node(self.pos[:2]) else: p_node = self._get_node_parent(self.age-1, n) angle = node.get_node_angle(p_node) for i in range(self.comp): tot_angle = self.__get_total_angle(angle, i) length = self.__get_total_length(self.age+1, i) self.nodes[self.age+1].append(node.make_new_node(length, tot_angle)) self.age += 1 if times > 1: self.grow(times-1)
Let the tree grow. Args: times (integer): Indicate how many times the tree will grow.
juraj-google-style
def validate_txn_obj(obj_name, obj, key, validation_fun): backend = bigchaindb.config['database']['backend'] if backend == 'localmongodb': data = obj.get(key, {}) if isinstance(data, dict): validate_all_keys_in_obj(obj_name, data, validation_fun) elif isinstance(data, list): validate_all_items_in_list(obj_name, data, validation_fun)
Validate value of `key` in `obj` using `validation_fun`. Args: obj_name (str): name for `obj` being validated. obj (dict): dictionary object. key (str): key to be validated in `obj`. validation_fun (function): function used to validate the value of `key`. Returns: None: indicates validation successful Raises: ValidationError: `validation_fun` will raise exception on failure
juraj-google-style
def __init__(self, password, testnet=False): netcode = 'XTN' if testnet else 'BTC' if isinstance(password, str): password = password.encode() self.wallet = BIP32Node.from_master_secret(password, netcode=netcode) self.root_address = ('', self.wallet.address())
Initializes a BIP32 wallet. Addresses returned by the wallet are of the form ``(path, address)``. Args: password (bytes): Master secret for the wallet. The password can also be passed as a string (``str``). testnet (bool): Wwether to use the bitcoin testnet or mainnet. Defaults to ``False``.
juraj-google-style
def get_default_session(): return _default_session_stack.get_default()
Returns the default session for the current thread. The returned `Session` will be the innermost session on which a `Session` or `Session.as_default()` context has been entered. NOTE: The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a `with sess.as_default():` in that thread's function. Returns: The default `Session` being used in the current thread.
github-repos
def from_json(raw): ncls = None _type = raw.get('type') try: ncls = _type_map[NodeType(_type)] except (KeyError, ValueError) as e: logger.warning('Unknown node type: %s', _type) if DEBUG: raise_from(exception.ParseException('Parse error for %s' % (_type), raw), e) return None node = ncls() node.load(raw) return node
Helper to construct a node from a dict. Args: raw (dict): Raw node representation. Returns: Node: A Node object or None.
juraj-google-style
def __init__(self, app_name, ad): self.host_port = None self.device_port = None self.app_name = app_name self._ad = ad self.log = self._ad.log self.uid = None self._client = None self._conn = None self._counter = None self._lock = threading.Lock() self._event_client = None self.verbose_logging = True
Args: app_name: (str) The user-visible name of the app being communicated with. ad: (AndroidDevice) The device object associated with a client.
github-repos
async def get_movie(self, id_): url = self.url_builder('movie/{movie_id}', dict(movie_id=id_), url_params=OrderedDict(append_to_response='credits')) data = (await self.get_data(url)) if (data is None): return return Movie.from_json(data, self.config['data'].get('images'))
Retrieve movie data by ID. Arguments: id_ (:py:class:`int`): The movie's TMDb ID. Returns: :py:class:`~.Movie`: The requested movie.
codesearchnet
def queue_log_message(self, message: LogMessage) -> bool | Any: return self._messages.push(message)
Add a log message to the log queue and attempt a flush. Args: * message: LogMessage dictionary Returns: * True, if flushed with no errors * False, if not flushed * Error value from logger, if flushed with errors
github-repos
def get_random_email(ltd="com"): email = [ RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), "@", RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), ".", ltd ] return "".join(email)
Get a random email address with the given ltd. Args: ltd (str): The ltd to use (e.g. com). Returns: str: The random email.
juraj-google-style
def plot_spectra_stacked(ss, title=None, num_rows=None, setup=_default_setup): draw_spectra_stacked(ss, title, num_rows, setup) plt.show()
Plots one or more stacked in subplots sharing same x-axis. Args: ss: list of Spectrum objects title=None: window title num_rows=None: (optional) number of rows for subplot grid. If not passed, num_rows will be the number of plots, and the number of columns will be 1. If passed, number of columns is calculated automatically. setup: PlotSpectrumSetup object
juraj-google-style
def __init__(self, vertical, cont, end): super(AbstractStyle, self).__init__() self.vertical = vertical self.cont = cont self.end = end assert (len(cont) == len(vertical) and len(cont) == len(end)), ( "'%s', '%s' and '%s' need to have equal length" % (vertical, cont, end))
Tree Render Style. Args: vertical: Sign for vertical line. cont: Chars for a continued branch. end: Chars for the last branch.
juraj-google-style
def eere_station(station_code): with open((env.SRC_PATH + '/eere_meta.csv')) as eere_meta: stations = csv.DictReader(eere_meta) for station in stations: if (station['station_code'] == station_code): return station raise KeyError('station not found')
Station information. Args: station_code (str): station code. Returns (dict): station information
codesearchnet
def extract_cluster(self, target_sites, **kwargs): cluster = list(target_sites) others = [site for site in self if site not in cluster] size = 0 while len(cluster) > size: size = len(cluster) new_others = [] for site in others: for site2 in cluster: if CovalentBond.is_bonded(site, site2, **kwargs): cluster.append(site) break else: new_others.append(site) others = new_others return cluster
Extracts a cluster of atoms based on bond lengths Args: target_sites ([Site]): List of initial sites to nucleate cluster. \\*\\*kwargs: kwargs passed through to CovalentBond.is_bonded. Returns: [Site/PeriodicSite] Cluster of atoms.
juraj-google-style
def minutes(value: Union[int, float]) -> Duration: return float(value * 60)
Converts input value from minutes to a `Duration` in seconds. Example: ```python >>> timestamps = [tp.duration.minutes(i) for i in [5, 10, 30]] >>> timestamps [300.0, 600.0, 1800.0] >>> # Usage in a window operation >>> a = tp.event_set(timestamps=timestamps, features={"f1": [1, 5, -5]}) >>> a.moving_sum(window_length=tp.duration.minutes(6)) indexes: ... timestamps: [ 300. 600. 1800.] 'f1': [ 1 6 -5] ... ``` Args: value: Number of minutes. Returns: Equivalent number of seconds.
github-repos
def _string_from_ip_int(self, ip_int=None): if ((not ip_int) and (ip_int != 0)): ip_int = int(self._ip) if (ip_int > self._ALL_ONES): raise ValueError('IPv6 address is too large') hex_str = ('%032x' % ip_int) hextets = [] for x in range(0, 32, 4): hextets.append(('%x' % int(hex_str[x:(x + 4)], 16))) hextets = self._compress_hextets(hextets) return ':'.join(hextets)
Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones.
codesearchnet
def get_beam_typehints_from_tableschema(schema): if not isinstance(schema, (bigquery.TableSchema, bigquery.TableFieldSchema)): schema = get_bq_tableschema(schema) typehints = [] for field in schema.fields: name, field_type, mode = (field.name, field.type.upper(), field.mode.upper()) if field_type in ['STRUCT', 'RECORD']: typehint = RowTypeConstraint.from_fields(get_beam_typehints_from_tableschema(field)) elif field_type in BIGQUERY_TYPE_TO_PYTHON_TYPE: typehint = BIGQUERY_TYPE_TO_PYTHON_TYPE[field_type] else: raise ValueError(f'Converting BigQuery type [{field_type}] to Python Beam type is not supported.') if mode == 'REPEATED': typehint = Sequence[typehint] elif mode != 'REQUIRED': typehint = Optional[typehint] typehints.append((name, typehint)) return typehints
Extracts Beam Python type hints from the schema. Args: schema (~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema): The TableSchema to extract type hints from. Returns: List[Tuple[str, Any]]: A list of type hints that describe the input schema. Nested and repeated fields are supported.
github-repos
def exit_handler(signum, frame): LOGGER.debug('signal {} was caught'.format(signum)) sys.exit((128 + signum))
Catch SIGTERM and SIGHUP and call "sys.exit" which raises "SystemExit" exception. This will trigger all the cleanup code defined in ContextManagers and "finally" statements. For more details about the arguments see "signal" documentation. Args: signum(int): The signal's number frame(frame): The current stack frame, can be None
codesearchnet
def max_consecutive_days(self) -> Optional[Tuple[(int, Interval)]]: if (len(self.intervals) == 0): return None startdate = self.start_date() enddate = self.end_date() seq = '' ndays = ((enddate - startdate).days + 1) for i in range(ndays): date = (startdate + datetime.timedelta(days=i)) wholeday = Interval.wholeday(date) if any([x.overlaps(wholeday) for x in self.intervals]): seq += '+' else: seq += ' ' longest = max(seq.split(), key=len) longest_len = len(longest) longest_idx = seq.index(longest) longest_interval = Interval.dayspan((startdate + datetime.timedelta(days=longest_idx)), (startdate + datetime.timedelta(days=(longest_idx + longest_len)))) return (longest_len, longest_interval)
The length of the longest sequence of days in which all days include an interval. Returns: tuple: ``(longest_length, longest_interval)`` where ``longest_interval`` is a :class:`Interval` containing the start and end date of the longest span -- or ``None`` if we contain no intervals.
codesearchnet
def load_model(model_cls_path, model_cls_name, model_load_args): spec = importlib.util.spec_from_file_location('active_model', model_cls_path) model_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(model_module) model_cls = getattr(model_module, model_cls_name) model = model_cls() if (not isinstance(model, BaseModel)): warnings.warn(("Loaded model '%s' at '%s' is not an instance of %r" % (model_cls_name, model_cls_path, BaseModel))) model.load(**model_load_args) return model
Get an instance of the described model. Args: model_cls_path: Path to the module in which the model class is defined. model_cls_name: Name of the model class. model_load_args: Dictionary of args to pass to the `load` method of the model instance. Returns: An instance of :class:`.models.model.BaseModel` or subclass
codesearchnet
def find(self, package, **kwargs): for finder in self.finders: package_spec = finder.find(package, **kwargs) if package_spec: return package_spec return None
Find a package using package finders. Return the first package found. Args: package (str): package to find. **kwargs (): additional keyword arguments used by finders. Returns: PackageSpec: if package found, else None
codesearchnet
def token_request(self, authorization_code): if not self._client.token_endpoint: return None request = { 'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': self._redirect_uri } logger.debug('making token request: %s', request) client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic') auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method, request) resp = self._provider_configuration.requests_session \ .post(self._client.token_endpoint, data=request, headers=auth_header) \ .json() logger.debug('received token response: %s', json.dumps(resp)) if 'error' in resp: token_resp = TokenErrorResponse(**resp) else: token_resp = AccessTokenResponse(**resp) token_resp.verify(keyjar=self._client.keyjar) if 'id_token' in resp: token_resp['id_token_jwt'] = resp['id_token'] return token_resp
Makes a token request. If the 'token_endpoint' is not configured in the provider metadata, no request will be made. Args: authorization_code (str): authorization code issued to client after user authorization Returns: Union[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token request was performed.
juraj-google-style
def port_remove(br, port, if_exists=True): param_if_exists = _param_if_exists(if_exists) if (port and (not br)): cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists) else: cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists) result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] return _retcode_to_bool(retcode)
Deletes port. Args: br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it) port: A string - port name. if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True) Returns: True on success, else False. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.port_remove br0 8080
codesearchnet
def from_file(xmu_dat_file='xmu.dat', feff_inp_file='feff.inp'): data = np.loadtxt(xmu_dat_file) header = Header.from_file(feff_inp_file) parameters = Tags.from_file(feff_inp_file) pots = Potential.pot_string_from_file(feff_inp_file) if ('RECIPROCAL' in parameters): absorbing_atom = parameters['TARGET'] else: absorbing_atom = pots.splitlines()[3].split()[2] return Xmu(header, parameters, absorbing_atom, data)
Get Xmu from file. Args: xmu_dat_file (str): filename and path for xmu.dat feff_inp_file (str): filename and path of feff.inp input file Returns: Xmu object
codesearchnet
def get_structure_from_canonical_name(self, structure_name): return next((st for st in self.structures if st.canonical_name == structure_name), None)
Return a structure from a canonical name Args: structure_name (str): canonical name of the structure Returns: Structure
juraj-google-style
def update_missing_keys_after_loading(self, model, missing_keys: List[str], prefix: str) -> List[str]: return missing_keys
Override this method if you want to adjust the `missing_keys` after loading the model params, but before the model is post-processed. Args: missing_keys (`List[str]`, *optional*): The list of missing keys in the checkpoint compared to the state dict of the model
github-repos
def list_vms_sub(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachines', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VMs in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM model views.
codesearchnet
def clean(self, force: bool=False): assert not self._closed with (yield from self._host_pools_lock): for key, pool in tuple(self._host_pools.items()): yield from pool.clean(force=force) if not self._host_pool_waiters[key] and pool.empty(): del self._host_pools[key] del self._host_pool_waiters[key]
Clean all closed connections. Args: force: Clean connected and idle connections too. Coroutine.
juraj-google-style
def frame(self, frame): try: zframe = str(int(frame)).zfill(self._zfill) except ValueError: zframe = frame if self._zfill == 0: zframe = "" return "".join((self._dir, self._base, zframe, self._ext))
Return a path go the given frame in the sequence. Integer or string digits are treated as a frame number and padding is applied, all other values are passed though. Examples: >>> seq.frame(1) /foo/bar.0001.exr >>> seq.frame("#") /foo/bar.#.exr Args: frame (int or str): the desired frame number or a char to pass through (ie. #) Returns: str:
juraj-google-style
def disable_cudnn_autotune(func: _F) -> _F: def decorated(*args, **kwargs): original_tf_cudnn_use_autotune = os.environ.get('TF_CUDNN_USE_AUTOTUNE') os.environ['TF_CUDNN_USE_AUTOTUNE'] = 'false' original_xla_flags = os.environ.get('XLA_FLAGS') new_xla_flags = '--xla_gpu_autotune_level=0' if original_xla_flags: new_xla_flags = original_xla_flags + ' ' + new_xla_flags os.environ['XLA_FLAGS'] = new_xla_flags result = func(*args, **kwargs) if original_tf_cudnn_use_autotune is None: del os.environ['TF_CUDNN_USE_AUTOTUNE'] else: os.environ['TF_CUDNN_USE_AUTOTUNE'] = original_tf_cudnn_use_autotune if original_xla_flags is None: del os.environ['XLA_FLAGS'] else: os.environ['XLA_FLAGS'] = original_xla_flags return result return tf_decorator.make_decorator(func, decorated)
Disable autotuning during the call to this function. Some tests want to base assertions on a graph being isomorphic with a copy. To ensure this, this decorator disables autotuning. Args: func: Function to run with CuDNN autotuning turned off. Returns: Decorated function.
github-repos
def set_compare_custom_predict_fn(self, predict_fn): self.delete('compare_estimator_and_spec') self.store('compare_custom_predict_fn', predict_fn) self.set_compare_inference_address('custom_predict_fn') if (not self.has_compare_model_name()): self.set_compare_model_name('2') return self
Sets a second custom function for inference. If you wish to compare the results of two models in WIT, use this method to setup the details of the second model. Instead of using TF Serving to host a model for WIT to query, WIT can directly use a custom function as the model to query. In this case, the provided function should accept example protos and return: - For classification: A 2D list of numbers. The first dimension is for each example being predicted. The second dimension are the probabilities for each class ID in the prediction. - For regression: A 1D list of numbers, with a regression score for each example being predicted. Args: predict_fn: The custom python function which will be used for model inference. Returns: self, in order to enabled method chaining.
codesearchnet
def update_config(self, config, timeout=-1): return self._client.update(config, uri=self.URI + "/config", timeout=timeout)
Updates the remote server configuration and the automatic backup schedule for backup. Args: config (dict): Object to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Backup details.
juraj-google-style
def take_snapshot(self, snapshot_name, return_dict=True, power_off=False): if ((power_off is True) and (self.status != 'off')): action = self.power_off(return_dict=False) action.wait() self.load() return self._perform_action({'type': 'snapshot', 'name': snapshot_name}, return_dict)
Take a snapshot! Args: snapshot_name (str): name of snapshot Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. power_off (bool): Before taking the snapshot the droplet will be turned off with another API call. It will wait until the droplet will be powered off. Returns dict or Action
codesearchnet
def export(preprocessor: Union['PreTrainedTokenizer', 'FeatureExtractionMixin', 'ProcessorMixin'], model: Union['PreTrainedModel', 'TFPreTrainedModel'], config: OnnxConfig, opset: int, output: Path, tokenizer: Optional['PreTrainedTokenizer']=None, device: str='cpu') -> Tuple[List[str], List[str]]: if not (is_torch_available() or is_tf_available()): raise ImportError('Cannot convert because neither PyTorch nor TensorFlow are not installed. Please install torch or tensorflow first.') if is_tf_available() and isinstance(model, TFPreTrainedModel) and (device == 'cuda'): raise RuntimeError('`tf2onnx` does not support export on CUDA device.') if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError('You cannot provide both a tokenizer and a preprocessor to export the model.') if tokenizer is not None: warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning) logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.') preprocessor = tokenizer if is_torch_available(): from ..utils import get_torch_version if not config.is_torch_support_available: logger.warning(f'Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}, got: {get_torch_version()}') if is_torch_available() and issubclass(type(model), PreTrainedModel): return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device) elif is_tf_available() and issubclass(type(model), TFPreTrainedModel): return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer)
Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): The preprocessor used for encoding the data. model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. device (`str`, *optional*, defaults to `cpu`): The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for export on CUDA devices. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration.
github-repos
def GetMessageStrings(cls, formatter_mediator, event): formatter_object = cls.GetFormatterObject(event.data_type) return formatter_object.GetMessages(formatter_mediator, event)
Retrieves the formatted message strings for a specific event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: list[str, str]: long and short version of the message string.
codesearchnet
def get_certificate(self, id): return Certificate.get_object(api_token=self.token, cert_id=id)
Returns a Certificate object by its ID. Args: id (str): Certificate ID
juraj-google-style
def get_object(cls, api_token, ip): floating_ip = cls(token=api_token, ip=ip) floating_ip.load() return floating_ip
Class method that will return a FloatingIP object by its IP. Args: api_token: str - token ip: str - floating ip address
codesearchnet
def ValidateToken(token, targets): def GetSubjectForError(): if len(targets) == 1: return list(targets)[0] else: return None if not token: raise access_control.UnauthorizedAccess( "Must give an authorization token for %s" % targets, subject=GetSubjectForError()) token.CheckExpiry() if not token.username: raise access_control.UnauthorizedAccess( "Must specify a username for access to %s." % targets, subject=GetSubjectForError()) return True
Does basic token validation. Args: token: User's credentials as access_control.ACLToken. targets: List of targets that were meant to be accessed by the token. This is used for logging purposes only. Returns: True if token is valid. Raises: access_control.UnauthorizedAccess: if token is not valid. ValueError: if targets list is empty.
juraj-google-style
def append_block(self, node, reverse=False): if (not isinstance(node, grammar.STATEMENTS)): raise ValueError if reverse: self.to_append_block[(- 1)].appendleft(node) else: self.to_append_block[(- 1)].append(node)
Append a statement to the current block. Args: node: The statement to prepend. reverse: When called multiple times, this flag determines whether the statement should be prepended or appended to the already inserted statements. Raises: ValueError: If the given node is not a statement.
codesearchnet
def BuildServiceStub(self, cls): def _ServiceStubInit(stub, rpc_channel): stub.rpc_channel = rpc_channel self.cls = cls cls.__init__ = _ServiceStubInit for method in self.descriptor.methods: setattr(cls, method.name, self._GenerateStubMethod(method))
Constructs the stub class. Args: cls: The class that will be constructed.
codesearchnet
def _EvaluateExpression(frame, expression): try: code = compile(expression, '<watched_expression>', 'eval') except (TypeError, ValueError) as e: return (False, { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': 'Invalid expression', 'parameters': [str(e)]}}) except SyntaxError as e: return (False, { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': 'Expression could not be compiled: $0', 'parameters': [e.msg]}}) try: return (True, native.CallImmutable(frame, code)) except BaseException as e: return (False, { 'isError': True, 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': 'Exception occurred: $0', 'parameters': [str(e)]}})
Compiles and evaluates watched expression. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: (False, status) on error or (True, value) on success.
juraj-google-style
def union(cls, *mhs): if len(mhs) < 2: raise ValueError("Cannot union less than 2 MinHash") num_perm = len(mhs[0]) seed = mhs[0].seed if any((seed != m.seed or num_perm != len(m)) for m in mhs): raise ValueError("The unioning MinHash must have the\ same seed and number of permutation functions") hashvalues = np.minimum.reduce([m.hashvalues for m in mhs]) permutations = mhs[0].permutations return cls(num_perm=num_perm, seed=seed, hashvalues=hashvalues, permutations=permutations)
Create a MinHash which is the union of the MinHash objects passed as arguments. Args: *mhs: The MinHash objects to be united. The argument list length is variable, but must be at least 2. Returns: datasketch.MinHash: A new union MinHash.
juraj-google-style
def __init__(self, tcex, name): self._name = name self._tcex = tcex self._type = 'tags' self._api_sub_type = None self._api_type = None self._api_entity = 'tag' self._utils = TcExUtils() self._tc_requests = TiTcRequest(self._tcex)
Initialize Class Properties. Args: group_type (str): The ThreatConnect define Group type. name (str): The name for this Group. xid (str, kwargs): The external id for this Group.
juraj-google-style
def set(msg_or_dict, key, value): if not isinstance(msg_or_dict, (collections_abc.MutableMapping, message.Message)): raise TypeError( "set() expected a dict or protobuf message, got {!r}.".format( type(msg_or_dict) ) ) basekey, subkey = _resolve_subkeys(key) if subkey is not None: if isinstance(msg_or_dict, collections_abc.MutableMapping): msg_or_dict.setdefault(basekey, {}) set(get(msg_or_dict, basekey), subkey, value) return if isinstance(msg_or_dict, collections_abc.MutableMapping): msg_or_dict[key] = value else: _set_field_on_message(msg_or_dict, key, value)
Set a key's value on a protobuf Message or dictionary. Args: msg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key to set. value (Any): The value to set. Raises: TypeError: If ``msg_or_dict`` is not a Message or dictionary.
juraj-google-style
def _ParseEventData(self, variable_length_section): event_data = WinJobEventData() event_data.application = variable_length_section.application_name.rstrip('\x00') event_data.comment = variable_length_section.comment.rstrip('\x00') event_data.parameters = variable_length_section.parameters.rstrip('\x00') event_data.username = variable_length_section.author.rstrip('\x00') event_data.working_directory = variable_length_section.working_directory.rstrip('\x00') return event_data
Parses the event data form a variable-length data section. Args: variable_length_section (job_variable_length_data_section): a Windows Scheduled Task job variable-length data section. Returns: WinJobEventData: event data of the job file.
codesearchnet
def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool): if not is_torch_available(): raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.') import torch from torch.onnx import export print(f'Using framework PyTorch: {torch.__version__}') with torch.no_grad(): input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, 'pt') ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names) export(nlp.model, model_args, f=output.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset)
Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR Args: nlp: The pipeline to be exported opset: The actual version of the ONNX operator set to use output: Path where will be stored the generated ONNX model use_external_format: Split the model definition from its parameters to allow model bigger than 2GB Returns:
github-repos
def __init__(self, rots: Optional[Rotation], trans: Optional[torch.Tensor]): batch_dims, dtype, device, requires_grad = (None, None, None, None) if trans is not None: batch_dims = trans.shape[:-1] dtype = trans.dtype device = trans.device requires_grad = trans.requires_grad elif rots is not None: batch_dims = rots.shape dtype = rots.dtype device = rots.device requires_grad = rots.requires_grad else: raise ValueError('At least one input argument must be specified') if rots is None: rots = Rotation.identity(batch_dims, dtype, device, requires_grad) elif trans is None: trans = identity_trans(batch_dims, dtype, device, requires_grad) assert rots is not None assert trans is not None if rots.shape != trans.shape[:-1] or rots.device != trans.device: raise ValueError('Rots and trans incompatible') trans = trans.to(dtype=torch.float32) self._rots = rots self._trans = trans
Args: rots: A [*, 3, 3] rotation tensor trans: A corresponding [*, 3] translation tensor
github-repos
def __init__(self, current_frame): self.ignore_unknown_dtypes = False self.meta_params = dict() self.method_calling = inspect.getframeinfo(current_frame)[2] _, _, __, self.vals_current = inspect.getargvalues(current_frame) if 'self' in self.vals_current: self.recorded_class_type = self.vals_current['self'] self.meta_params['AgentName'] = str(self.vals_current['self']) frame_list = inspect.getouterframes(current_frame) for frame in frame_list: args, varargs, keywords, vals = inspect.getargvalues(frame[0]) if 'self' in vals: if self.recorded_class_type == vals['self']: for i in args: self.meta_params[i] = vals[i] del self.meta_params['self']
Init the MetaPrameterRecord with "Agent" parameters by passing inspect.currentframe() from Agent Class. The Init will search back to find the parent class to capture all passed parameters and store them in "self.meta_params". NOTE: Currently only optimized for TensorBoard output. TODO: Add JSON Export, TEXT EXPORT Args: current_frame: Frame value from class to obtain metaparameters[= inspect.currentframe()]
juraj-google-style
def DeserializeFromDB(buffer): m = StreamManager.GetStream(buffer) reader = BinaryReader(m) c = ContractState() c.Deserialize(reader) StreamManager.ReleaseStream(m) return c
Deserialize full object. Args: buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from. Returns: ContractState:
juraj-google-style
def release_readme_verify(): version = '{version}' expected = populate_readme(version, version, pypi='', pypi_img='', versions='\n\n', versions_img='', circleci_badge=CIRCLECI_BADGE_RELEASE, circleci_path='/{circleci_build}', travis_badge=TRAVIS_BADGE_RELEASE, travis_path='/builds/{travis_build}', appveyor_badge=APPVEYOR_BADGE_RELEASE, appveyor_path='/build/{appveyor_build}', coveralls_badge=COVERALLS_BADGE_RELEASE, coveralls_path='builds/{coveralls_build}') with open(RELEASE_README_FILE, 'r') as file_obj: contents = file_obj.read() if (contents != expected): err_msg = ('\n' + get_diff(contents, expected, 'README.rst.release.actual', 'README.rst.release.expected')) raise ValueError(err_msg) else: print('README.rst.release.template contents are as expected.')
Specialize the template to a PyPI release template. Once populated, compare to ``README.rst.release.template``. Raises: ValueError: If the current template doesn't agree with the expected value specialized from the template.
codesearchnet
def fasta_files_equal(seq_file1, seq_file2): seq1 = SeqIO.read(open(seq_file1), 'fasta') seq2 = SeqIO.read(open(seq_file2), 'fasta') if (str(seq1.seq) == str(seq2.seq)): return True else: return False
Check equality of a FASTA file to another FASTA file Args: seq_file1: Path to a FASTA file seq_file2: Path to another FASTA file Returns: bool: If the sequences are the same
codesearchnet
def Close(self): if (not self._is_open): raise IOError('Storage file already closed.') if (not self._read_only): self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT_SOURCE) self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT_DATA) self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT) self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EVENT_TAG) self._WriteSerializedAttributeContainerList(self._CONTAINER_TYPE_EXTRACTION_WARNING) if self._connection: self._connection.commit() self._connection.close() self._connection = None self._cursor = None self._is_open = False
Closes the storage. Raises: IOError: if the storage file is already closed. OSError: if the storage file is already closed.
codesearchnet
def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask.
github-repos
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(MACSignatureKeyInformation, self).read( input_stream, kmip_version=kmip_version ) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString( tag=enums.Tags.UNIQUE_IDENTIFIER ) self._unique_identifier.read( local_stream, kmip_version=kmip_version ) else: raise ValueError( "Invalid struct missing the unique identifier attribute." ) if self.is_tag_next( enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream ): self._cryptographic_parameters = CryptographicParameters() self._cryptographic_parameters.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
Read the data encoding the MACSignatureKeyInformation struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def user(self, user: str) -> "ChildHTTPAPI": if self.is_real_user: raise ValueError("Can't get child of real user") try: return self.children[user] except KeyError: child = ChildHTTPAPI(user, self) self.children[user] = child return child
Get a child HTTPAPI instance. Args: user: The Matrix ID of the user whose API to get. Returns: A HTTPAPI instance that always uses the given Matrix ID.
juraj-google-style
def check_cache(resource_type): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: adapter = args[0] (key, val) = list(kwargs.items())[0] except IndexError: logger.warning("Couldn't generate full index key, skipping cache") else: index_key = (resource_type, key, val) try: cached_record = adapter._swimlane.resources_cache[index_key] except KeyError: logger.debug('Cache miss: `{!r}`'.format(index_key)) else: logger.debug('Cache hit: `{!r}`'.format(cached_record)) return cached_record return func(*args, **kwargs) return wrapper return decorator
Decorator for adapter methods to check cache for resource before normally sending requests to retrieve data Only works with single kwargs, almost always used with @one_of_keyword_only decorator Args: resource_type (type(APIResource)): Subclass of APIResource of cache to be checked when called
codesearchnet
def impad(img, shape, pad_val=0): if (not isinstance(pad_val, (int, float))): assert (len(pad_val) == img.shape[(- 1)]) if (len(shape) < len(img.shape)): shape = (shape + (img.shape[(- 1)],)) assert (len(shape) == len(img.shape)) for i in range((len(shape) - 1)): assert (shape[i] >= img.shape[i]) pad = np.empty(shape, dtype=img.dtype) pad[...] = pad_val pad[(:img.shape[0], :img.shape[1], ...)] = img return pad
Pad an image to a certain shape. Args: img (ndarray): Image to be padded. shape (tuple): Expected padding shape. pad_val (number or sequence): Values to be filled in padding areas. Returns: ndarray: The padded image.
codesearchnet
def crop(img, i, j, h, w): if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) return img.crop((j, i, j + w, i + h))
Crop the given PIL Image. Args: img (PIL Image): Image to be cropped. i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped image. w (int): Width of the cropped image. Returns: PIL Image: Cropped image.
juraj-google-style
def sys_update_char(asciiCode: int, fontx: int, fonty: int, img: tcod.image.Image, x: int, y: int) -> None: lib.TCOD_sys_update_char(_int(asciiCode), fontx, fonty, img, x, y)
Dynamically update the current font with img. All cells using this asciiCode will be updated at the next call to :any:`tcod.console_flush`. Args: asciiCode (int): Ascii code corresponding to the character to update. fontx (int): Left coordinate of the character in the bitmap font (in tiles) fonty (int): Top coordinate of the character in the bitmap font (in tiles) img (Image): An image containing the new character bitmap. x (int): Left pixel of the character in the image. y (int): Top pixel of the character in the image.
codesearchnet
def Equals(self, other): if other is None: return False if other.PrevHash.ToBytes() == self.PrevHash.ToBytes() and other.PrevIndex == self.PrevIndex: return True return False
Test for equality. Args: other (obj): Returns: bool: True `other` equals self.
juraj-google-style
def get_comments(self, sharekey=None): if not sharekey: raise Exception( "You must specify a sharekey of the file you" "want to 'like'.") endpoint = '/api/sharedfile/{0}/comments'.format(sharekey) data = self._make_request("GET", endpoint=endpoint) return [Comment.NewFromJSON(c) for c in data['comments']]
Retrieve comments on a SharedFile Args: sharekey (str): Sharekey for the file from which you want to return the set of comments. Returns: List of Comment objects.
juraj-google-style
def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split.
github-repos
def upload_predictions(self, file_path, tournament=1): self.logger.info('uploading predictions...') auth_query = '\n query($filename: String!\n $tournament: Int!) {\n submission_upload_auth(filename: $filename\n tournament: $tournament) {\n filename\n url\n }\n }\n ' arguments = {'filename': os.path.basename(file_path), 'tournament': tournament} submission_resp = self.raw_query(auth_query, arguments, authorization=True) submission_auth = submission_resp['data']['submission_upload_auth'] with open(file_path, 'rb') as fh: requests.put(submission_auth['url'], data=fh.read()) create_query = '\n mutation($filename: String!\n $tournament: Int!) {\n create_submission(filename: $filename\n tournament: $tournament) {\n id\n }\n }\n ' arguments = {'filename': submission_auth['filename'], 'tournament': tournament} create = self.raw_query(create_query, arguments, authorization=True) self.submission_id = create['data']['create_submission']['id'] return self.submission_id
Upload predictions from file. Args: file_path (str): CSV file with predictions that will get uploaded tournament (int): ID of the tournament (optional, defaults to 1) Returns: str: submission_id Example: >>> api = NumerAPI(secret_key="..", public_id="..") >>> api.upload_predictions() '93c46857-fed9-4594-981e-82db2b358daf'
codesearchnet
def forward(self, outputs, targets): batch_size, num_queries = outputs['logits'].shape[:2] out_bbox = outputs['pred_boxes'].flatten(0, 1) target_ids = torch.cat([v['class_labels'] for v in targets]) target_bbox = torch.cat([v['boxes'] for v in targets]) if self.use_focal_loss: out_prob = F.sigmoid(outputs['logits'].flatten(0, 1)) out_prob = out_prob[:, target_ids] neg_cost_class = (1 - self.alpha) * out_prob ** self.gamma * -(1 - out_prob + 1e-08).log() pos_cost_class = self.alpha * (1 - out_prob) ** self.gamma * -(out_prob + 1e-08).log() class_cost = pos_cost_class - neg_cost_class else: out_prob = outputs['logits'].flatten(0, 1).softmax(-1) class_cost = -out_prob[:, target_ids] bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v['boxes']) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
Performs the matching Params: outputs: This is a dict that contains at least these entries: "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates Returns: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
github-repos
def __init__(self, lst): list.__init__(self, lst) self.server = None self.port = find_free_port() self.html_path = get_cur_path()+'/data/math_list/index.html'
MathList Constructor todo:: share a port among lists. Or maybe close the server after serving from it? Args: lst (list): A list of LaTeX math to be rendered by KaTeX Returns: A math list object Usage example >>> lst = ["\int x = y", "x + 6"] >>> MathList(lst) ... see nicely formatted math.
juraj-google-style
def FlatMap(fn=identity, *args, **kwargs): label = 'FlatMap(%s)' % ptransform.label_from_callable(fn) if not callable(fn): raise TypeError('FlatMap can be used only with callable objects. Received %r instead.' % fn) pardo = ParDo(CallableWrapperDoFn(fn), *args, **kwargs) pardo.label = label return pardo
:func:`FlatMap` is like :class:`ParDo` except it takes a callable to specify the transformation. The callable must return an iterable for each element of the input :class:`~apache_beam.pvalue.PCollection`. The elements of these iterables will be flattened into the output :class:`~apache_beam.pvalue.PCollection`. If no callable is given, then all elements of the input PCollection must already be iterables themselves and will be flattened into the output PCollection. Args: fn (callable): a callable object. *args: positional arguments passed to the transform callable. **kwargs: keyword arguments passed to the transform callable. Returns: ~apache_beam.pvalue.PCollection: A :class:`~apache_beam.pvalue.PCollection` containing the :func:`FlatMap` outputs. Raises: TypeError: If the **fn** passed as argument is not a callable. Typical error is to pass a :class:`DoFn` instance which is supported only for :class:`ParDo`.
github-repos
def find_user(cls, session, mailbox, user): return cls(('/mailboxes/%d/users/%s/conversations.json' % (mailbox.id, user.id)), session=session)
Return conversations for a specific user in a mailbox. Args: session (requests.sessions.Session): Authenticated session. mailbox (helpscout.models.Mailbox): Mailbox to search. user (helpscout.models.User): User to search for. Returns: RequestPaginator(output_type=helpscout.models.Conversation): Conversations iterator.
codesearchnet
def create_single_fc_model(fingerprint_input, model_settings, is_training): if is_training: dropout_rate = tf.compat.v1.placeholder(tf.float32, name='dropout_rate') fingerprint_size = model_settings['fingerprint_size'] label_count = model_settings['label_count'] weights = tf.compat.v1.get_variable(name='weights', initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.001), shape=[fingerprint_size, label_count]) bias = tf.compat.v1.get_variable(name='bias', initializer=tf.compat.v1.zeros_initializer, shape=[label_count]) logits = tf.matmul(fingerprint_input, weights) + bias if is_training: return (logits, dropout_rate) else: return logits
Builds a model with a single hidden fully-connected layer. This is a very simple model with just one matmul and bias layer. As you'd expect, it doesn't produce very accurate results, but it is very fast and simple, so it's useful for sanity testing. Here's the layout of the graph: (fingerprint_input) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder.
github-repos
def list_files(root, suffix, prefix=False): root = os.path.expanduser(root) files = list(filter((lambda p: (os.path.isfile(os.path.join(root, p)) and p.endswith(suffix))), os.listdir(root))) if (prefix is True): files = [os.path.join(root, d) for d in files] return files
List all files ending with a suffix at a given root Args: root (str): Path to directory whose folders need to be listed suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). It uses the Python "str.endswith" method and is passed directly prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the files found
codesearchnet
def _kl_dirichlet_dirichlet(d1, d2, name=None): with ops.name_scope(name, 'kl_dirichlet_dirichlet', values=[d1.concentration, d2.concentration]): digamma_sum_d1 = math_ops.digamma(math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True)) digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1 concentration_diff = d1.concentration - d2.concentration return math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) - special_math_ops.lbeta(d1.concentration) + special_math_ops.lbeta(d2.concentration)
Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet. Args: d1: instance of a Dirichlet distribution object. d2: instance of a Dirichlet distribution object. name: (optional) Name to use for created operations. default is "kl_dirichlet_dirichlet". Returns: Batchwise KL(d1 || d2)
github-repos