code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def sort_values( self, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ): axis = self._get_axis_number(axis) if not is_list_like(by): by = [by] if axis == 0: broadcast_value_dict = {col: self[col] for col in by} broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index) new_index = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).index return self.reindex(index=new_index, copy=not inplace) else: broadcast_value_list = [ self[row :: len(self.index)]._to_pandas() for row in by ] index_builder = list(zip(broadcast_value_list, by)) broadcast_values = pandas.concat( [row for row, idx in index_builder], copy=False ) broadcast_values.columns = self.columns new_columns = broadcast_values.sort_values( by=by, axis=axis, ascending=ascending, kind=kind, na_position=na_position, ).columns return self.reindex(columns=new_columns, copy=not inplace)
Sorts by a column/row or list of columns/rows. Args: by: A list of labels for the axis to sort over. axis: The axis to sort. ascending: Sort in ascending or descending order. inplace: If true, do the operation inplace. kind: How to sort. na_position: Where to put np.nan values. Returns: A sorted DataFrame.
juraj-google-style
def parse_split(cls, header: bytes, body: bytes) -> 'MessageContent': header_lines = cls._find_lines(header) body_lines = cls._find_lines(body) header_view = memoryview(header) body_view = memoryview(body) return cls._parse_split([header_view, body_view], header, body, header_view, body_view, header_lines, body_lines)
Parse the header and body bytestrings into message content. Args: header: The header bytestring to parse. body: The body bytestring to parse.
codesearchnet
async def attach_file(self, file_path: str, description: str=None) -> Attachment: with open(file_path, 'rb') as f: return (await self._attach(f.read(), description))
add a file as an attachment |methcoro| Warning: |unstable| Args: file_path: path to the file you want to add description: *optional* description for your attachment Returns: Attachment: Raises: ValueError: file_path must not be None APIException
codesearchnet
def attribute_labels( self, main_type, sub_type, unique_id, attribute_id, owner=None, params=None ): params = params or {} if owner: params['owner'] = owner if not sub_type: url = '/v2/{}/{}/attributes/{}/securityLabels'.format( main_type, unique_id, attribute_id ) else: url = '/v2/{}/{}/{}/attributes/{}/securityLabels'.format( main_type, sub_type, unique_id, attribute_id ) for l in self._iterate(url, params, 'securityLabel'): yield l
Args: owner: main_type: sub_type: unique_id: attribute_id: params: Return:
juraj-google-style
def delete(filething): t = MP4(filething) filething.fileobj.seek(0) t.delete(filething)
delete(filething) Arguments: filething (filething) Raises: mutagen.MutagenError Remove tags from a file.
juraj-google-style
def dependencies(self, user=None, napp=None): napps = self._get_napp_key('napp_dependencies', user, napp) return [tuple(napp.split('/')) for napp in napps]
Get napp_dependencies from install NApp. Args: user(string) A Username. napp(string): A NApp name. Returns: napps(list): List with tuples with Username and NApp name. e.g. [('kytos'/'of_core'), ('kytos/of_l2ls')]
juraj-google-style
def update_locate_candidates(candidate, next_candidates, x_val, y_val, degree): (centroid_x, centroid_y, width, candidate_nodes) = candidate point = np.asfortranarray([x_val, y_val]) if (not _helpers.contains_nd(candidate_nodes, point)): return (nodes_a, nodes_b, nodes_c, nodes_d) = _surface_helpers.subdivide_nodes(candidate_nodes, degree) half_width = (0.5 * width) next_candidates.extend((((centroid_x - half_width), (centroid_y - half_width), half_width, nodes_a), (centroid_x, centroid_y, (- half_width), nodes_b), ((centroid_x + width), (centroid_y - half_width), half_width, nodes_c), ((centroid_x - half_width), (centroid_y + width), half_width, nodes_d)))
Update list of candidate surfaces during geometric search for a point. .. note:: This is used **only** as a helper for :func:`locate_point`. Checks if the point ``(x_val, y_val)`` is contained in the ``candidate`` surface. If not, this function does nothing. If the point is contaned, the four subdivided surfaces from ``candidate`` are added to ``next_candidates``. Args: candidate (Tuple[float, float, float, numpy.ndarray]): A 4-tuple describing a surface and its centroid / width. Contains * Three times centroid ``x``-value * Three times centroid ``y``-value * "Width" of parameter space for the surface * Control points for the surface next_candidates (list): List of "candidate" sub-surfaces that may contain the point being located. x_val (float): The ``x``-coordinate being located. y_val (float): The ``y``-coordinate being located. degree (int): The degree of the surface.
codesearchnet
def list_nics(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/networkInterfaces?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
List the network interfaces in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of NICs list with properties.
codesearchnet
def run(self, *args, **kwargs): accounts = list(AWSAccount.get_all(include_disabled=False).values()) s3_acl = get_template('cloudtrail_s3_bucket_policy.json') s3_bucket_name = self.dbconfig.get('bucket_name', self.ns) s3_bucket_region = self.dbconfig.get('bucket_region', self.ns, 'us-west-2') s3_bucket_account = AWSAccount.get(self.dbconfig.get('bucket_account', self.ns)) CloudTrail.create_s3_bucket(s3_bucket_name, s3_bucket_region, s3_bucket_account, s3_acl) self.validate_sqs_policy(accounts) for account in accounts: ct = CloudTrail(account, s3_bucket_name, s3_bucket_region, self.log) ct.run()
Entry point for the scheduler Args: *args: Optional arguments **kwargs: Optional keyword arguments Returns: None
codesearchnet
def get_config_parameter(config: ConfigParser, section: str, param: str, fn: Callable[([Any], Any)], default: Any) -> Any: try: value = fn(config.get(section, param)) except (TypeError, ValueError, NoOptionError): log.warning('Configuration variable {} not found or improper in section [{}]; using default of {!r}', param, section, default) if (default is None): value = default else: value = fn(default) return value
Fetch parameter from ``configparser`` ``.INI`` file. Args: config: :class:`ConfigParser` object section: section name within config file param: name of parameter within section fn: function to apply to string parameter (e.g. ``int``) default: default value Returns: parameter value, or ``None`` if ``default is None``, or ``fn(default)``
codesearchnet
def _StopMonitoringProcess(self, process): if process is None: raise ValueError('Missing process.') pid = process.pid self._RaiseIfNotMonitored(pid) del self._process_information_per_pid[pid] rpc_client = self._rpc_clients_per_pid.get(pid, None) if rpc_client: rpc_client.Close() del self._rpc_clients_per_pid[pid] if pid in self._rpc_errors_per_pid: del self._rpc_errors_per_pid[pid] logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format( process.name, pid))
Stops monitoring a process. Args: process (MultiProcessBaseProcess): process. Raises: KeyError: if the process is not monitored. ValueError: if the process is missing.
juraj-google-style
def _random_segmentation(num_items, num_segments): mask_indices = np.arange(num_items - 1) < num_segments - 1 np.random.shuffle(mask_indices) first_in_segment = np.pad(mask_indices, [[1, 0]]) segment_id = np.cumsum(first_in_segment) _, segment_length = np.unique(segment_id, return_counts=True) return segment_length
Partition a sequence of items randomly into non-empty segments. Args: num_items: an integer scalar > 0 num_segments: an integer scalar in [1, num_items] Returns: a Tensor with shape [num_segments] containing positive integers that add up to num_items
github-repos
def AddBackpropLoopCounter(self, count, outer_grad_state): in_separate_functions = count.graph is not ops.get_default_graph() if in_separate_functions: count = array_ops.identity(count) else: one = constant_op.constant(1, name='b_count') self.Enter() self.AddName(count.name) enter_count = _Enter(count, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name='b_count') self.loop_enters.append(enter_count) merge_count = merge([enter_count, enter_count])[0] self._pivot_for_pred = merge_count if in_separate_functions: one = constant_op.constant(1, name='b_count') pred = math_ops.greater_equal(merge_count, one) self._pivot = loop_cond(pred, name='b_count') switch_count = switch(merge_count, self._pivot) index = math_ops.subtract(switch_count[1], one) self._pivot_for_body = index next_count = _NextIteration(index) merge_count.op._update_input(1, next_count) final_zero = exit(switch_count[0], name='b_count') self.loop_exits.append(final_zero) if outer_grad_state is not None: outer_grad_state.grad_sync._add_control_input(final_zero.op) self.ExitResult([final_zero]) self.Exit() return next_count
Add the backprop loop that controls the iterations. This is added to the backprop loop. It is used to control the loop termination of the backprop loop. Called in the outer context of this grad context. The pseudocode is: `n = count; while (n >= 1) { n--; }` Note that a control dependency is added to `final_zero` to ensure the correct execution order of stack pop ops. Args: count: The number of iterations for backprop. outer_grad_state: The outer grad state. None if not nested. Returns: The loop index.
github-repos
def __init__(self, sv, sess, step_counter=None): super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs) self._sv = sv self._sess = sess self._last_time = 0.0 self._last_step = 0 step_counter = sv.global_step if step_counter is None else step_counter self._step_counter = step_counter self._summary_tag = '%s/sec' % self._step_counter.op.name
Create a `SVStepCounterThread`. Args: sv: A `Supervisor`. sess: A `Session`. step_counter: A `Tensor` holding the step counter. By defaults, it uses sv.global_step.
github-repos
def embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]: if self._using_tpu: if save_context.in_save_context(): return {table: self._variables[table.name]['parameters'].variables[0] for table in self._table_config} raise RuntimeError('Unable to retrieve embedding tables when using a TPU strategy. If you need access, save your model, create this object under a CPU strategy and restore.') self._maybe_build(None) return {table: self._variables[table.name]['parameters'] for table in self._table_config}
Returns a dict of embedding tables, keyed by `TableConfig`. This property only works when the `TPUEmbedding` object is created under a non-TPU strategy. This is intended to be used to for CPU based lookup when creating a serving checkpoint. Returns: A dict of embedding tables, keyed by `TableConfig`. Raises: RuntimeError: If object was created under a `TPUStrategy`.
github-repos
def from_sub_model_configs(cls, semantic_config: BarkSemanticGenerationConfig, coarse_acoustics_config: BarkCoarseGenerationConfig, fine_acoustics_config: BarkFineGenerationConfig, **kwargs): return cls(semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), **kwargs)
Instantiate a [`BarkGenerationConfig`] (or a derived class) from bark sub-models generation configuration. Returns: [`BarkGenerationConfig`]: An instance of a configuration object
github-repos
def check_directory(path, human_readable_name): if (not os.path.exists(path)): LOGGER.error('%s directory does not exist: %s', human_readable_name, path) return False if (not os.path.isdir(path)): LOGGER.error('%s directory is not a directory: %s', human_readable_name, path) return False errors = True if (not os.access(path, os.R_OK)): LOGGER.error('%s directory is not readable: %s', human_readable_name, path) errors = False if (not os.access(path, os.W_OK)): LOGGER.error('%s directory is not writable: %s', human_readable_name, path) errors = False return errors
Verify that the directory exists and is readable and writable. Args: path (str): a directory which should exist and be writable human_readable_name (str): a human readable string for the directory which is used in logging statements Returns: bool: False if an error exists, True otherwise.
codesearchnet
def _validate_paths(self, settings, name, value): return [self._validate_path(settings, name, item) for item in value]
Apply ``SettingsPostProcessor._validate_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Raises: boussole.exceptions.SettingsInvalidError: Once a path does not exists. Returns: list: Validated paths.
codesearchnet
def tool(name): global g_tools def decorator(fn): g_tools[name] = fn return fn return decorator
Decorator for defining lint tools. Args: name (str): The name of the tool. This name will be used to identify the tool in `pelconf.yaml`.
juraj-google-style
def parse_genotypes(variant, individuals, individual_positions): genotypes = [] for ind in individuals: pos = individual_positions[ind['individual_id']] genotypes.append(parse_genotype(variant, ind, pos)) return genotypes
Parse the genotype calls for a variant Args: variant(cyvcf2.Variant) individuals: List[dict] individual_positions(dict) Returns: genotypes(list(dict)): A list of genotypes
juraj-google-style
def validate_request_success( response_text, request_url, status_code, expected_status_code ): try: assert status_code == expected_status_code except AssertionError: msg = ( "Request to {url} failed with status {status_code}:\n" "The reponse from the request was as follows:\n\n" "{content}" ).format( url=request_url, status_code=status_code, content=response_text ) raise BadHttpRequestError(msg)
Validates that a request was successful. Args: response_text (str): The response body of the request. request_url (str): The URL the request was made at. status_code (int): The status code of the response. expected_status_code (int): The expected status code of the response. Raises: :class:`saltant.exceptions.BadHttpRequestError`: The HTTP request failed.
juraj-google-style
def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, index): full_name = '.'.join((service_name, method_proto.name)) input_type = self._GetTypeFromScope( package, method_proto.input_type, scope) output_type = self._GetTypeFromScope( package, method_proto.output_type, scope) return descriptor.MethodDescriptor(name=method_proto.name, full_name=full_name, index=index, containing_service=None, input_type=input_type, output_type=output_type, options=_OptionsOrNone(method_proto))
Creates a method descriptor from a MethodDescriptorProto. Args: method_proto: The proto describing the method. service_name: The name of the containing service. package: Optional package name to look up for types. scope: Scope containing available types. index: Index of the method in the service. Returns: An initialized MethodDescriptor object.
juraj-google-style
def infer(msg, mrar=False): df = common.df(msg) if common.allzeros(msg): return 'EMPTY' if df == 17: tc = common.typecode(msg) if 1 <= tc <= 4: return 'BDS08' if 5 <= tc <= 8: return 'BDS06' if 9 <= tc <= 18: return 'BDS05' if tc == 19: return 'BDS09' if 20 <= tc <= 22: return 'BDS05' if tc == 28: return 'BDS61' if tc == 29: return 'BDS62' if tc == 31: return 'BDS65' IS10 = bds10.is10(msg) IS17 = bds17.is17(msg) IS20 = bds20.is20(msg) IS30 = bds30.is30(msg) IS40 = bds40.is40(msg) IS50 = bds50.is50(msg) IS60 = bds60.is60(msg) IS44 = bds44.is44(msg) IS45 = bds45.is45(msg) if mrar: allbds = np.array(["BDS10", "BDS17", "BDS20", "BDS30", "BDS40", "BDS44", "BDS45", "BDS50", "BDS60"]) mask = [IS10, IS17, IS20, IS30, IS40, IS44, IS45, IS50, IS60] else: allbds = np.array(["BDS10", "BDS17", "BDS20", "BDS30", "BDS40", "BDS50", "BDS60"]) mask = [IS10, IS17, IS20, IS30, IS40, IS50, IS60] bds = ','.join(sorted(allbds[mask])) if len(bds) == 0: return None else: return bds
Estimate the most likely BDS code of an message. Args: msg (String): 28 bytes hexadecimal message string mrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False. Returns: String or None: BDS version, or possible versions, or None if nothing matches.
juraj-google-style
def transition(self, state, message=''): with self.changes_squashed: initial_state = self.state.value if self.state_set.transition_allowed(initial_state=initial_state, target_state=state): self.log.debug('%s: Transitioning from %s to %s', self.mri, initial_state, state) if (state == ss.DISABLED): alarm = Alarm.invalid('Disabled') elif (state == ss.FAULT): alarm = Alarm.major(message) else: alarm = Alarm() self.update_health(self, HealthInfo(alarm)) self.state.set_value(state) self.state.set_alarm(alarm) for (child, writeable) in self._children_writeable[state].items(): if isinstance(child, AttributeModel): child.meta.set_writeable(writeable) elif isinstance(child, MethodModel): child.set_writeable(writeable) else: raise TypeError(('Cannot transition from %s to %s' % (initial_state, state)))
Change to a new state if the transition is allowed Args: state (str): State to transition to message (str): Message if the transition is to a fault state
codesearchnet
def proc_val(key, val): list_type_keys = list(VALID_FEFF_TAGS) del list_type_keys[list_type_keys.index("ELNES")] del list_type_keys[list_type_keys.index("EXELFS")] boolean_type_keys = () float_type_keys = ("S02", "EXAFS", "RPATH") def smart_int_or_float(numstr): if numstr.find(".") != -1 or numstr.lower().find("e") != -1: return float(numstr) else: return int(numstr) try: if key.lower() == 'cif': m = re.search(r"\w+.cif", val) return m.group(0) if key in list_type_keys: output = list() toks = re.split(r"\s+", val) for tok in toks: m = re.match(r"(\d+)\*([\d\.\-\+]+)", tok) if m: output.extend([smart_int_or_float(m.group(2))] * int(m.group(1))) else: output.append(smart_int_or_float(tok)) return output if key in boolean_type_keys: m = re.search(r"^\W+([TtFf])", val) if m: if m.group(1) == "T" or m.group(1) == "t": return True else: return False raise ValueError(key + " should be a boolean type!") if key in float_type_keys: return float(val) except ValueError: return val.capitalize() return val.capitalize()
Static helper method to convert Feff parameters to proper types, e.g. integers, floats, lists, etc. Args: key: Feff parameter key val: Actual value of Feff parameter.
juraj-google-style
def plot(self, fmt=None, fig=None, ax=None): u = 4 v = 0.25 r = None if ((fig is None) and (ax is None)): fig = plt.figure(figsize=(u, 1)) else: r = fig if (ax is None): ax = fig.add_axes([(0.1 * v), 0.1, (0.8 * v), 0.8]) else: r = ax rect1 = patches.Rectangle((0, 0), (u * v), (u * v), color=self.colour, lw=1, hatch=self.hatch, ec='k') ax.add_patch(rect1) ax.text((1.0 + ((0.1 * v) * u)), ((u * v) * 0.5), self.component.summary(fmt=fmt), fontsize=max(u, 15), verticalalignment='center', horizontalalignment='left') ax.set_xlim([0, (u * v)]) ax.set_ylim([0, (u * v)]) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.invert_yaxis() return r
Make a simple plot of the Decor. Args: fmt (str): A Python format string for the component summaries. fig (Pyplot figure): A figure, optional. Use either fig or ax, not both. ax (Pyplot axis): An axis, optional. Use either fig or ax, not both. Returns: fig or ax or None. If you pass in an ax, you get it back. If you pass in a fig, you get it. If you pass nothing, the function creates a plot object as a side-effect.
codesearchnet
def frosted_glass_blur(x, severity=1): c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][(severity - 1)] x = np.uint8((tfds.core.lazy_imports.skimage.filters.gaussian((np.array(x) / 255.0), sigma=c[0], multichannel=True) * 255)) for _ in range(c[2]): for h in range((x.shape[0] - c[1]), c[1], (- 1)): for w in range((x.shape[1] - c[1]), c[1], (- 1)): (dx, dy) = np.random.randint((- c[1]), c[1], size=(2,)) (h_prime, w_prime) = ((h + dy), (w + dx)) (x[(h, w)], x[(h_prime, w_prime)]) = (x[(h_prime, w_prime)], x[(h, w)]) x_clip = np.clip(tfds.core.lazy_imports.skimage.filters.gaussian((x / 255.0), sigma=c[0], multichannel=True), 0, 1) x_clip *= 255 return around_and_astype(x_clip)
Frosted glass blurring to images. Apply frosted glass blurring to images by shuffling pixels locally. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
codesearchnet
def __init__(self, hash_queue, hash_analysis_queue, **kwargs): super(HTTPHashAnalyzer, self).__init__( hash_queue, hash_analysis_queue, **kwargs) self._checked_for_old_python_version = False
Initializes a HTTP hash analyzer. Args: hash_queue (Queue.queue): a queue that contains hashes to be analyzed. hash_analysis_queue (Queue.queue): queue that the analyzer will append HashAnalysis objects to.
juraj-google-style
def resolve(self, context, provider): try: self._value.resolve(context, provider) except FailedLookup as e: raise FailedVariableLookup(self.name, e.lookup, e.error)
Recursively resolve any lookups with the Variable. Args: context (:class:`stacker.context.Context`): Current context for building the stack provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider
juraj-google-style
def pprnt(input, return_data=False): HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[32m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' import json, re result = json.dumps(input, sort_keys=True, indent=4) result = re.sub(r'(")(\w*?_id)(":)', r'\1%s%s\2%s\3' % (BOLD, HEADER, ENDC), result) result = re.sub(r'(")(\w*?_set)(":)', r'\1%s%s\2%s\3' % (BOLD, HEADER, ENDC), result) result = re.sub(r'(\n *?")(\w*?)(":)', r'\1%s%s\2%s\3' % (BOLD, OKGREEN, ENDC), result) if not return_data: print(result) else: return result
Prettier print for nested data Args: input: Input data return_data (bool): Default False. Print outs if False, returns if True. Returns: None | Pretty formatted text representation of input data.
juraj-google-style
def _srvmgr(cmd, return_json=False): if isinstance(cmd, list): cmd = ' '.join(cmd) if return_json: cmd = 'ConvertTo-Json -Compress -Depth 4 -InputObject @({0})'.format(cmd) cmd = 'Import-Module WebAdministration; {0}'.format(cmd) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) if (ret['retcode'] != 0): msg = 'Unable to execute command: {0}\nError: {1}'.format(cmd, ret['stderr']) log.error(msg) return ret
Execute a powershell command from the WebAdministration PS module. Args: cmd (list): The command to execute in a list return_json (bool): True formats the return in JSON, False just returns the output of the command. Returns: str: The output from the command
codesearchnet
def read_config( config_filepath, logger=logging.getLogger('ProsperCommon'), ): config_parser = configparser.ConfigParser( interpolation=ExtendedInterpolation(), allow_no_value=True, delimiters=('='), inline_comment_prefixes=(' ) logger.debug('config_filepath=%s', config_filepath) with open(config_filepath, 'r') as filehandle: config_parser.read_file(filehandle) return config_parser
fetch and parse config file Args: config_filepath (str): path to config file. abspath > relpath logger (:obj:`logging.Logger`): logger to catch error msgs
juraj-google-style
def parse_args(argv=None): parent_parser = get_parent_parser() desc = 'Data Version Control' parser = DvcParser(prog='dvc', description=desc, parents=[parent_parser], formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-V', '--version', action=VersionAction, nargs=0, help="Show program's version.") subparsers = parser.add_subparsers(title='Available Commands', metavar='COMMAND', dest='cmd', help='Use dvc COMMAND --help for command-specific help.') fix_subparsers(subparsers) for cmd in COMMANDS: cmd.add_parser(subparsers, parent_parser) args = parser.parse_args(argv) return args
Parses CLI arguments. Args: argv: optional list of arguments to parse. sys.argv is used by default. Raises: dvc.exceptions.DvcParserError: raised for argument parsing errors.
codesearchnet
def report(vulnerabilities, fileobj, print_sanitised): n_vulnerabilities = len(vulnerabilities) unsanitised_vulnerabilities = [v for v in vulnerabilities if (not isinstance(v, SanitisedVulnerability))] n_unsanitised = len(unsanitised_vulnerabilities) n_sanitised = (n_vulnerabilities - n_unsanitised) heading = '{} vulnerabilit{} found{}.\n'.format(('No' if (n_unsanitised == 0) else n_unsanitised), ('y' if (n_unsanitised == 1) else 'ies'), (' (plus {} sanitised)'.format(n_sanitised) if n_sanitised else '')) vulnerabilities_to_print = (vulnerabilities if print_sanitised else unsanitised_vulnerabilities) with fileobj: for (i, vulnerability) in enumerate(vulnerabilities_to_print, start=1): fileobj.write(vulnerability_to_str(i, vulnerability)) if (n_unsanitised == 0): fileobj.write(color(heading, GOOD)) else: fileobj.write(color(heading, DANGER))
Prints issues in color-coded text format. Args: vulnerabilities: list of vulnerabilities to report fileobj: The output file object, which may be sys.stdout
codesearchnet
def __lt__(self, other): if not isinstance(other, DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') normalized_timestamp = self._GetNormalizedTimestamp() other_normalized_timestamp = other._GetNormalizedTimestamp() if normalized_timestamp is None: return other_normalized_timestamp is not None if other_normalized_timestamp is None: return False return normalized_timestamp < other_normalized_timestamp
Determines if the date time values are less than other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are less than other. Raises: ValueError: if other is not an instance of DateTimeValues.
juraj-google-style
def Next(self): try: (self.key, self.value) = next(self.current) except StopIteration: if (self.current != self.second): self.current = self.second return self.Next() return False return True
Advances the iterator forward 1 step. Returns: bool: True if another item exists in the iterator, False otherwise.
codesearchnet
class CaptureLogger: def __init__(self, logger): self.logger = logger self.io = StringIO() self.sh = logging.StreamHandler(self.io) self.out = '' def __enter__(self): self.logger.addHandler(self.sh) return self def __exit__(self, *exc): self.logger.removeHandler(self.sh) self.out = self.io.getvalue() def __repr__(self): return f'captured: {self.out}\n'
Context manager to capture `logging` streams Args: logger: 'logging` logger object Returns: The captured output is available via `self.out` Example: ```python >>> from transformers import logging >>> from transformers.testing_utils import CaptureLogger >>> msg = "Testing 1, 2, 3" >>> logging.set_verbosity_info() >>> logger = logging.get_logger("transformers.models.bart.tokenization_bart") >>> with CaptureLogger(logger) as cl: ... logger.info(msg) >>> assert cl.out, msg + " " ```
github-repos
def _construct_concrete_function(func, output_graph_def, converted_input_indices): input_tensors = func.graph.internal_captures converted_inputs = object_identity.ObjectIdentitySet([input_tensors[index] for index in converted_input_indices]) not_converted_inputs = [tensor for tensor in func.inputs if tensor not in converted_inputs] not_converted_inputs_map = {tensor.name: tensor for tensor in not_converted_inputs} new_input_names = [tensor.name for tensor in not_converted_inputs] new_output_names = [tensor.name for tensor in func.outputs] for f in output_graph_def.library.function: if context.context().has_function(f.signature.name): context.context().remove_function(f.signature.name) new_func = wrap_function.function_from_graph_def(output_graph_def, new_input_names, new_output_names) for input_tensor in new_func.inputs: input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape) return new_func
Constructs a concrete function from the `output_graph_def`. Args: func: ConcreteFunction output_graph_def: GraphDef proto. converted_input_indices: Set of integers of input indices that were converted to constants. Returns: ConcreteFunction.
github-repos
def _split_generators(self, dl_manager): splits = super(Imagenet2012Corrupted, self)._split_generators(dl_manager) validation = splits[1] return [validation]
Return the validation split of ImageNet2012. Args: dl_manager: download manager object. Returns: validation split.
juraj-google-style
def _get_input_target_path(self, local_file_path): (path, filename) = os.path.split(local_file_path) if ('*' in filename): return (path + '/') else: return local_file_path
Returns a directory or file path to be the target for "gsutil cp". If the filename contains a wildcard, then the target path must be a directory in order to ensure consistency whether the source pattern contains one or multiple files. Args: local_file_path: A full path terminating in a file or a file wildcard. Returns: The path to use as the "gsutil cp" target.
codesearchnet
def update_data(func): default = dict([(param.name, param.default) for param in inspect.signature(func).parameters.values() if (param.default != getattr(inspect, '_empty'))]) @wraps(func) def wrapper(*args, **kwargs): default.update(kwargs) kwargs.update(default) cur_mod = sys.modules[func.__module__] logger = logs.get_logger(name_or_func=f'{cur_mod.__name__}.{func.__name__}', types='stream') root_path = cur_mod.DATA_PATH date_type = kwargs.pop('date_type', 'date') save_static = kwargs.pop('save_static', True) save_dynamic = kwargs.pop('save_dynamic', True) symbol = kwargs.get('symbol') file_kw = dict(func=func, symbol=symbol, root=root_path, date_type=date_type) d_file = cache_file(has_date=True, **file_kw) s_file = cache_file(has_date=False, **file_kw) cached = kwargs.pop('cached', False) if (cached and save_static and files.exists(s_file)): logger.info(f'Reading data from {s_file} ...') return pd.read_parquet(s_file) data = func(*args, **kwargs) if save_static: files.create_folder(s_file, is_file=True) save_data(data=data, file_fmt=s_file, append=False) logger.info(f'Saved data file to {s_file} ...') if save_dynamic: drop_dups = kwargs.pop('drop_dups', None) files.create_folder(d_file, is_file=True) save_data(data=data, file_fmt=d_file, append=True, drop_dups=drop_dups) logger.info(f'Saved data file to {d_file} ...') return data return wrapper
Decorator to save data more easily. Use parquet as data format Args: func: function to load data from data source Returns: wrapped function
codesearchnet
def set_element_type(entity, dtype, shape=UNSPECIFIED): del entity del dtype del shape
Indicates that the entity is expected hold items of specified type/shape. The staged TensorFlow ops will reflect and assert this data type. Ignored otherwise. Args: entity: The entity to annotate. dtype: TensorFlow dtype value to assert for entity. shape: Optional shape to assert for entity.
github-repos
def encrypt_encoded(self, encoding, r_value): obfuscator = (r_value or 1) ciphertext = self.raw_encrypt(encoding.encoding, r_value=obfuscator) encrypted_number = EncryptedNumber(self, ciphertext, encoding.exponent) if (r_value is None): encrypted_number.obfuscate() return encrypted_number
Paillier encrypt an encoded value. Args: encoding: The EncodedNumber instance. r_value (int): obfuscator for the ciphertext; by default (i.e. if *r_value* is None), a random value is used. Returns: EncryptedNumber: An encryption of *value*.
codesearchnet
def write_pdb(self, custom_name='', out_suffix='', out_dir=None, custom_selection=None, force_rerun=False): if (not custom_selection): custom_selection = ModelSelection([0]) if ((not out_dir) or (not custom_name)): if (not out_suffix): out_suffix = '_new' outfile = ssbio.utils.outfile_maker(inname=self.structure_file, outname=custom_name, append_to_name=out_suffix, outdir=out_dir, outext='.pdb') try: if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): self.save(outfile, custom_selection) except TypeError as e: log.error('{}: unable to save structure in PDB file format'.format(self.structure_file)) raise TypeError(e) return outfile
Write a new PDB file for the Structure's FIRST MODEL. Set custom_selection to a PDB.Select class for custom SMCRA selections. Args: custom_name: Filename of the new file (without extension) out_suffix: Optional string to append to new PDB file out_dir: Optional directory to output the file custom_selection: Optional custom selection class force_rerun: If existing file should be overwritten Returns: out_file: filepath of new PDB file
codesearchnet
def multihead_self_attention_reduced(x, memory_antecedent=None, bias=None, factor=None, multihead_params=None, nonlinearity='none', reduction_type='conv', add_mask=True): if ((not factor) or (not multihead_params)): raise ValueError('factor and multihead_params should be set') if (memory_antecedent is not None): raise NotImplementedError('multihead_self_attention_reduced only works with self-attention') depth = x.get_shape().as_list()[(- 1)] if (reduction_type == 'attention'): memory_x = local_reduction_attention(x, factor, multihead_params) elif (reduction_type == 'conv'): memory_x = conv_elems_1d(x, factor) else: raise ValueError('Unknown reduction type {}'.format(reduction_type)) if (nonlinearity == 'silu'): memory_x *= tf.nn.sigmoid(memory_x) elif (nonlinearity != 'none'): raise ValueError('Unknown non linearity {}'.format(nonlinearity)) memory_x = tf.concat([x[(:, :1, :)], memory_x], axis=1) @expert_utils.add_name_scope() def construct_bias_vectors(t, axis): length = tf.to_float(common_layers.shape_list(t)[1]) length_coordinates = tf.range(length, dtype=tf.float32) length_coordinates = tf.expand_dims(length_coordinates, axis=axis) return length_coordinates if add_mask: bias = (tf.to_float(tf.greater((construct_bias_vectors(memory_x, 0) * factor), (construct_bias_vectors(x, 1) + 0.001))) * (- 1000000000.0)) bias = tf.expand_dims(bias, axis=0) bias = tf.expand_dims(bias, axis=0) else: bias = None return multihead_attention(query_antecedent=x, memory_antecedent=memory_x, bias=bias, output_depth=depth, **multihead_params)
Reduce the length dimension by compressing with conv. Args: x (tf.Tensor): float32 of shape [batch, length, depth] memory_antecedent (tf.Tensor): Unsupported for now bias (tf.Tensor): Ignored factor (int): compression factor for the memory sequence multihead_params (dict): parameters for multihead attention nonlinearity (str): Add some non-linearity after the memory block reduction_type (str): type of compression add_mask (bool): If True, add the bias to prevent attention to the future Returns: (tf.Tensor): float32 of shape [batch, length, depth] Raises: ValueError: If reduction_type or nonlinearity is invalid
codesearchnet
def directional_emd(direction, d1, d2): if direction == Direction.CAUSE: func = hamming_emd elif direction == Direction.EFFECT: func = effect_emd else: validate.direction(direction) return round(func(d1, d2), config.PRECISION)
Compute the EMD between two repertoires for a given direction. The full EMD computation is used for cause repertoires. A fast analytic solution is used for effect repertoires. Args: direction (Direction): |CAUSE| or |EFFECT|. d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``, rounded to |PRECISION|. Raises: ValueError: If ``direction`` is invalid.
juraj-google-style
def to_dict(self): return {'hostname': self.hostname, 'port': self.port, 'transport': self.transport, 'virtual_host': self.virtual_host}
Return a dictionary of the broker stats. Returns: dict: Dictionary of the stats.
codesearchnet
def set_value(value_proto, value, exclude_from_indexes=None): value_proto.Clear() if isinstance(value, (list, tuple)): for sub_value in value: set_value(value_proto.array_value.values.add(), sub_value, exclude_from_indexes) return if isinstance(value, entity_pb2.Value): value_proto.MergeFrom(value) elif isinstance(value, unicode): value_proto.string_value = value elif isinstance(value, str): value_proto.blob_value = value elif isinstance(value, bool): value_proto.boolean_value = value elif isinstance(value, (int, long)): value_proto.integer_value = value elif isinstance(value, float): value_proto.double_value = value elif isinstance(value, datetime.datetime): to_timestamp(value, value_proto.timestamp_value) elif isinstance(value, entity_pb2.Key): value_proto.key_value.CopyFrom(value) elif isinstance(value, entity_pb2.Entity): value_proto.entity_value.CopyFrom(value) else: raise TypeError(('value type: %r not supported' % (value,))) if (exclude_from_indexes is not None): value_proto.exclude_from_indexes = exclude_from_indexes
Set the corresponding datastore.Value _value field for the given arg. Args: value_proto: datastore.Value proto message. value: python object or datastore.Value. (unicode value will set a datastore string value, str value will set a blob string value). Undefined behavior if value is/contains value_proto. exclude_from_indexes: if the value should be exclude from indexes. None leaves indexing as is (defaults to False if value is not a Value message). Raises: TypeError: if the given value type is not supported.
codesearchnet
def __init__(self, definition, data_visibility_policy): self.data_visibility_policy = data_visibility_policy self.breakpoint = copy.deepcopy(definition) self.breakpoint['stackFrames'] = [] self.breakpoint['evaluatedExpressions'] = [] self.breakpoint['variableTable'] = [{ 'status': { 'isError': True, 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': 'Buffer full. Use an expression to see more data' } } }] self._var_table = self.breakpoint['variableTable'] self._var_table_index = {} self._total_size = 0 self.max_frames = 20 self.max_expand_frames = 5 self.max_size = 32768 self.default_capture_limits = _CaptureLimits() self.expression_capture_limits = _CaptureLimits(max_value_len=32768, max_list_items=32768)
Class constructor. Args: definition: breakpoint definition that this class will augment with captured data. data_visibility_policy: An object used to determine the visibiliy of a captured variable. May be None if no policy is available.
juraj-google-style
def _EvaluateExpressions(self, frame): return [self._FormatExpression(frame, expression) for expression in (self._definition.get('expressions') or [])]
Evaluates watched expressions into a string form. If expression evaluation fails, the error message is used as evaluated expression string. Args: frame: Python stack frame of breakpoint hit. Returns: Array of strings where each string corresponds to the breakpoint expression with the same index.
codesearchnet
def load_vocabulary(lang="en", type="wiki"): src_dir = "{}_vocab".format(type) p = locate_resource(src_dir, lang) return CountedVocabulary.from_vocabfile(p)
Return a CountedVocabulary object. Args: lang (string): language code. type (string): wiki,...
juraj-google-style
def _BuildStations(self, stoplist): stations = [] dists = self._EuclidianDistances(stoplist) stations = self._CalculateYLines(dists) return stations
Dispatches the best algorithm for calculating station line position. Args: # Class Stop is defined in transitfeed.py stoplist: [Stop, Stop, ...] # Class Trip is defined in transitfeed.py triplist: [Trip, Trip, ...] Returns: # One integer y-coordinate for each station normalized between # 0 and X, where X is the height of the graph in pixels [0, 33, 140, ... , X]
juraj-google-style
def _finish_operation_action(self, action): success = action.data['success'] conn_key = action.data['id'] if self._get_connection_state(conn_key) != self.InProgress: self._logger.error("Invalid finish_operation action on a connection whose state is not InProgress, conn_key=%s", str(conn_key)) return data = self._get_connection(conn_key) callback = data['callback'] conn_id = data['conn_id'] args = action.data['callback_args'] data['state'] = self.Idle data['microstate'] = None callback(conn_id, self.id, success, *args)
Finish an attempted operation. Args: action (ConnectionAction): the action object describing the result of the operation that we are finishing
juraj-google-style
def _maybe_read_file(filename): try: with open(filename) as infile: return infile.read() except IOError as e: if (e.errno == errno.ENOENT): return None
Read the given file, if it exists. Args: filename: A path to a file. Returns: A string containing the file contents, or `None` if the file does not exist.
codesearchnet
def create_unique_base26_symlink(path, source): retries = 0 while True: name = find_matching_symlink(path, source) if name: return os.path.join(path, name) names = [x for x in os.listdir(path) if os.path.islink(os.path.join(path, x))] if names: prev = max(names) else: prev = None linkname = get_next_base26(prev) linkpath = os.path.join(path, linkname) try: os.symlink(source, linkpath) return linkpath except OSError as e: if (e.errno != errno.EEXIST): raise if (retries > 10): raise RuntimeError('Variant shortlink not created - there was too much contention.') retries += 1
Create a base-26 symlink in `path` pointing to `source`. If such a symlink already exists, it is returned. Note that there is a small chance that this function may create a new symlink when there is already one pointed at `source`. Assumes `path` only contains base26 symlinks. Returns: str: Path to created symlink.
codesearchnet
def assemble(self, header_json, metadata_json, content_json): header = json_decode(header_json) if 'msgtype' not in header: log.error("Bad header with no msgtype was: %r", header) raise ProtocolError("No 'msgtype' in header") return self._messages[header['msgtype']].assemble( header_json, metadata_json, content_json )
Create a Message instance assembled from json fragments. Args: header_json (``JSON``) : metadata_json (``JSON``) : content_json (``JSON``) : Returns: message
juraj-google-style
def crypto_withdraw(self, amount, currency, crypto_address): params = {'amount': amount, 'currency': currency, 'crypto_address': crypto_address} return self._send_message('post', '/withdrawals/crypto', data=json.dumps(params))
Withdraw funds to a crypto address. Args: amount (Decimal): The amount to withdraw currency (str): The type of currency (eg. 'BTC') crypto_address (str): Crypto address to withdraw to. Returns: dict: Withdraw details. Example:: { "id":"593533d2-ff31-46e0-b22e-ca754147a96a", "amount":"10.00", "currency": "BTC", }
codesearchnet
def create_output_excerpts(self, test_info): return []
Creates excerpts of the service's output files. [Optional] This method only applies to services with output files. For services that generates output files, calling this method would create excerpts of the output files. An excerpt should contain info between two calls of `create_output_excerpts` or from the start of the service to the call to `create_output_excerpts`. Use `AndroidDevice#generate_filename` to get the proper filenames for excerpts. This is usually called at the end of: `setup_class`, `teardown_test`, or `teardown_class`. Args: test_info: RuntimeTestInfo, the test info associated with the scope of the excerpts. Returns: List of strings, the absolute paths to the excerpt files created. Empty list if no excerpt files are created.
github-repos
def get(self, feed_item, required=False, column_name=None): result = None keys = [] id_value = feed_item.get(self._id_field, None) if column_name == None else feed_item.get(column_name, None) if not id_value and self._search_field and feed_item.get(self._search_field, None): store_key = feed_item[self._search_field] if self._parent_filter_name: if feed_item.get(self._parent_filter_field_name, None): store_key = str(feed_item.get(self._parent_filter_field_name, None)) + store_key result = store.get(self._entity, store_key) if not result: result, key = self._get_by_name(feed_item) keys.append(key) if not result and required: raise Exception('ERROR: Could not find %s with name %s' % (self._entity, feed_item[self._search_field])) elif id_value: if isinstance(id_value, str) and id_value.startswith('ext'): keys.append(id_value) id_value = store.translate(self._entity, id_value) if id_value and (not column_name): feed_item[self._id_field] = id_value elif id_value and column_name: feed_item[column_name] = id_value if id_value: keys.append(id_value) result = store.get(self._entity, id_value) if not result: result = self._get(feed_item) if not result and required: raise Exception('ERROR: Could not find %s with id %s' % (self._entity, id_value)) store.set(self._entity, keys, result) return result
Retrieves an item. Items could be retrieved from a in memory cache in case it has already been retrieved within the current execution. Also, this method is capable of translating 'ext' placeholder IDs with concrete CM ids. Args: feed_item: Feed item from the Bulkdozer feed representing the item to retrieve. Returns: The CM object that represents the identified entity.
github-repos
def extend_transformations(self, transformations, return_alternatives=False): for t in transformations: self.append_transformation(t, return_alternatives=return_alternatives)
Extends a sequence of transformations to the TransformedStructure. Args: transformations: Sequence of Transformations return_alternatives: Whether to return alternative TransformedStructures for one-to-many transformations. return_alternatives can be a number, which stipulates the total number of structures to return.
juraj-google-style
def repeat(self, caller: Caller[RequestT, ResponseT], request: RequestT, timeout: float, metrics_collector: Optional[_MetricsCollector]=None) -> ResponseT: return _execute_request(caller, request, timeout, metrics_collector)
repeat method is called from the RequestResponseIO when a repeater is enabled. Args: caller: a `~apache_beam.io.requestresponse.Caller` object that calls the API. request: input request to repeat. timeout: time to wait for the request to complete. metrics_collector: (Optional) a `~apache_beam.io.requestresponse._MetricsCollector` object to collect the metrics for RequestResponseIO.
github-repos
def create(self, task_type_id, task_queue_id, arguments=None, name=''): if (arguments is None): arguments = {} request_url = (self._client.base_api_url + self.list_url) data_to_post = {'name': name, 'arguments': json.dumps(arguments), 'task_type': task_type_id, 'task_queue': task_queue_id} response = self._client.session.post(request_url, data=data_to_post) self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED) return self.response_data_to_model_instance(response.json())
Create a task instance. Args: task_type_id (int): The ID of the task type to base the task instance on. task_queue_id (int): The ID of the task queue to run the job on. arguments (dict, optional): The arguments to give the task type. name (str, optional): A non-unique name to give the task instance. Returns: :class:`saltant.models.base_task_instance.BaseTaskInstance`: A task instance model instance representing the task instance just created.
codesearchnet
async def register_user(self, password, **kwds): user = await self._create_remote_user(password=password, **kwds) if not 'pk' in user: user['pk'] = user['id'] match_query = self.model.user == user['id'] if self.model.select().where(match_query).count() > 0: raise RuntimeError('The user is already registered.') password = self.model(user=user['id'], password=password) password.save() return { 'user': user, 'sessionToken': self._user_session_token(user) }
This function is used to provide a sessionToken for later requests. Args: uid (str): The
juraj-google-style
def write(self, file_des, contents): file_handle = self.filesystem.get_open_file(file_des) if isinstance(file_handle, FakeDirWrapper): self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path) if isinstance(file_handle, FakePipeWrapper): return file_handle.write(contents) file_handle.raw_io = True file_handle._sync_io() file_handle.update_flush_pos() file_handle.write(contents) file_handle.flush() return len(contents)
Write string to file descriptor, returns number of bytes written. Args: file_des: An integer file descriptor for the file object requested. contents: String of bytes to write to file. Returns: Number of bytes written. Raises: OSError: bad file descriptor. TypeError: if file descriptor is not an integer.
codesearchnet
def validate(self, corpus): passed = True results = {} for validator in self.validators: sub_result = validator.validate(corpus) results[validator.name()] = sub_result if (not sub_result.passed): passed = False return CombinedValidationResult(passed, results)
Perform validation on the given corpus. Args: corpus (Corpus): The corpus to test/validate.
codesearchnet
def bold(self, action): if action =='on': action = 'E' elif action == 'off': action = 'F' else: raise RuntimeError('Invalid action for function bold. Options are on and off') self.send(chr(27)+action)
Enable/cancel bold printing Args: action: Enable or disable bold printing. Options are 'on' and 'off' Returns: None Raises: RuntimeError: Invalid action.
juraj-google-style
def get_mel_conditioner_outputs(self, input_features: torch.FloatTensor, composer: str, generation_config: GenerationConfig, attention_mask: Optional[torch.FloatTensor]=None): composer_to_feature_token = generation_config.composer_to_feature_token if composer not in composer_to_feature_token.keys(): raise ValueError(f'Please choose a composer from {list(composer_to_feature_token.keys())}. Composer received - {composer}') composer_value = composer_to_feature_token[composer] composer_value = torch.tensor(composer_value, device=self.device) composer_value = composer_value.repeat(input_features.shape[0]) embedding_offset = min(composer_to_feature_token.values()) input_features = self.mel_conditioner(feature=input_features, index_value=composer_value, embedding_offset=embedding_offset) if attention_mask is not None: input_features[~attention_mask[:, 0].bool()] = 0.0 attention_mask = torch.concatenate([attention_mask[:, 0].view(-1, 1), attention_mask], axis=1) return (input_features, attention_mask) return (input_features, None)
This method is used to concatenate mel conditioner tokens at the front of the input_features in order to control the type of MIDI token generated by the model. Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): input features extracted from the feature extractor. composer (`str`): composer token which determines the type of MIDI tokens to be generated. generation_config (`~generation.GenerationConfig`): The generation is used to get the composer-feature_token pair. attention_mask (``, *optional*): For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**.
github-repos
def get_wallace_tensor(self, tau): b = (0.5 * ((((np.einsum('ml,kn->klmn', tau, np.eye(3)) + np.einsum('km,ln->klmn', tau, np.eye(3))) + np.einsum('nl,km->klmn', tau, np.eye(3))) + np.einsum('kn,lm->klmn', tau, np.eye(3))) + ((- 2) * np.einsum('kl,mn->klmn', tau, np.eye(3))))) strain = self.get_strain_from_stress(tau) b += self.get_effective_ecs(strain) return b
Gets the Wallace Tensor for determining yield strength criteria. Args: tau (3x3 array-like): stress at which to evaluate the wallace tensor
codesearchnet
def iflag_unique_items(list_): seen = set() def unseen(item): if item in seen: return False seen.add(item) return True flag_iter = (unseen(item) for item in list_) return flag_iter
Returns a list of flags corresponding to the first time an item is seen Args: list_ (list): list of items Returns: flag_iter
juraj-google-style
def _do_sampling(self, logits, num_samples): with self.session(), self.test_scope(): random_seed.set_random_seed(1618) op = random_ops.multinomial(logits, num_samples, output_dtype=dtypes.int32) d = self.evaluate(op) batch_size, num_classes = logits.shape freqs_mat = [] for i in range(batch_size): cnts = dict(collections.Counter(d[i, :])) self.assertLess(max(cnts.keys()), num_classes) self.assertGreaterEqual(min(cnts.keys()), 0) freqs = [cnts[k] * 1.0 / num_samples if k in cnts else 0 for k in range(num_classes)] freqs_mat.append(freqs) return freqs_mat
Categorical samples from given input. Args: logits: Numpy ndarray of shape [batch_size, num_classes]. num_samples: Int; number of samples to draw. Returns: Frequencies from sampled classes; shape [batch_size, num_classes].
github-repos
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream): if file_entry.IsRoot() and file_entry.type_indicator not in ( self._TYPES_WITH_ROOT_METADATA): return if data_stream and not data_stream.IsDefault(): return display_name = mediator.GetDisplayName() logger.debug( '[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format( display_name)) self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING if self._processing_profiler: self._processing_profiler.StartTiming('extracting') self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) if self._processing_profiler: self._processing_profiler.StopTiming('extracting') self.processing_status = definitions.STATUS_INDICATOR_RUNNING
Extracts metadata from a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract metadata from. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream.
juraj-google-style
def _objective_and_vega(volatilities): vols = volatilities * sqrt_t / normalization d1 = (norm_forwards - norm_strikes) / vols implied_prices = (norm_forwards - norm_strikes) * _cdf(d1) + vols * _pdf(d1) if is_call_options is not None: put_prices = implied_prices - norm_forwards + norm_strikes implied_prices = tf.where(is_call_options, implied_prices, put_prices) vega = _pdf(d1) * sqrt_t / discount_factors / normalization return (implied_prices - normalized_prices, vega)
Calculate the Bachelier price and vega for a given volatility. This method returns normalized results. Args: volatilities: A real `Tensor` of same shape and dtype as `forwards`. The volatility to expiry. Returns: A tuple containing (value, gradient) of the black scholes price, both of which are `Tensor`s of the same shape and dtype as `volatilities`.
github-repos
def load_config(self, settings=None): self._load_defaults() if settings: self.update(settings) else: config_paths = _get_config_files() for p in config_paths: conf = _process_config_file([p]) self.update(conf) self._loaded = True self._validate()
Load the configuration either from the config file, or from the given settings. Args: settings (dict): If given, the settings are pulled from this dictionary. Otherwise, the config file is used.
codesearchnet
def _transform_local_field_to_expression(expression, node, context): column_name = expression.field_name column = sql_context_helpers.get_column(column_name, node, context) return column
Transform a LocalField compiler expression into its SQLAlchemy expression representation. Args: expression: expression, LocalField compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression.
codesearchnet
def _build(self, *args): net = args if (not self._layers): if (len(args) == 1): return args[0] else: return args for layer in self._layers: if isinstance(net, tuple): net = layer(*net) else: net = layer(net) return net
Connects the Sequential module into the graph. Args: *args: A tuple of inputs, to be unpacked as the arguments to the first layer. Returns: The output value of the last layer.
codesearchnet
def append(parent: ScheduleComponent, child: ScheduleComponent, name: str=None) -> Schedule: common_channels = (set(parent.channels) & set(child.channels)) insertion_time = parent.ch_stop_time(*common_channels) return insert(parent, insertion_time, child, name=name)
r"""Return a new schedule with by appending `child` to `parent` at the last time of the `parent` schedule's channels over the intersection of the parent and child schedule's channels. $t = \textrm{max}({x.stop\_time |x \in parent.channels \cap child.channels})$ Args: parent: The schedule to be inserted into child: The schedule to insert name: Name of the new schedule. Defaults to name of parent
codesearchnet
def serve(name: str = "", port: int = 5000) -> None: logging.info(" * Listening on port %s", port) httpd = HTTPServer((name, port), RequestHandler) httpd.serve_forever()
A basic way to serve the methods. Args: name: Server address. port: Server port.
juraj-google-style
def __mod__(self, other: Union[_FormatArg, Iterable[_FormatArg]]) -> bytes: if isinstance(other, bytes): return self.format([other]) elif hasattr(other, '__bytes__'): supports_bytes = cast(SupportsBytes, other) return self.format([bytes(supports_bytes)]) elif hasattr(other, '__iter__'): items = cast(Iterable[_FormatArg], other) return self.format(items) return NotImplemented
String interpolation, shortcut for :meth:`.format`. Args: other: The data interpolated into the format string.
juraj-google-style
def robust_zscore(mat, ctrl_mat=None, min_mad=0.1): if (ctrl_mat is not None): medians = ctrl_mat.median(axis=1) median_devs = abs(ctrl_mat.subtract(medians, axis=0)) else: medians = mat.median(axis=1) median_devs = abs(mat.subtract(medians, axis=0)) sub = mat.subtract(medians, axis='index') mads = median_devs.median(axis=1) mads = mads.clip(lower=min_mad) zscore_df = sub.divide((mads * 1.4826), axis='index') return zscore_df.round(rounding_precision)
Robustly z-score a pandas df along the rows. Args: mat (pandas df): Matrix of data that z-scoring will be applied to ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs (e.g. vehicle control) min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause z-scores to blow up Returns: zscore_df (pandas_df): z-scored data
codesearchnet
def random_get_float( rnd: Optional[tcod.random.Random], mi: float, ma: float ) -> float: return float( lib.TCOD_random_get_double(rnd.random_c if rnd else ffi.NULL, mi, ma) )
Return a random float in the range: ``mi`` <= n <= ``ma``. The result is affected by calls to :any:`random_set_distribution`. Args: rnd (Optional[Random]): A Random instance, or None to use the default. low (float): The lower bound of the random range, inclusive. high (float): The upper bound of the random range, inclusive. Returns: float: A random double precision float in the range ``mi`` <= n <= ``ma``.
juraj-google-style
def _encode(self, value, path_from_root): if isinstance(value, dict): json_value = {} for (key, value) in six.iteritems(value): json_value[key] = self._encode(value, (path_from_root + (key,))) return json_value else: path = '.'.join(path_from_root) if util.is_pandas_data_frame(value): return util.encode_data_frame(path, value, self._run) else: (friendly_value, converted) = util.json_friendly(data_types.val_to_json(path, value)) (json_value, compressed) = util.maybe_compress_summary(friendly_value, util.get_h5_typename(value)) if compressed: self.write_h5(path_from_root, friendly_value) return json_value '\n if isinstance(value, dict):\n json_child[key], converted = util.json_friendly(\n self._encode(value, path_from_root + [key]))\n else:\n '
Normalize, compress, and encode sub-objects for backend storage. value: Object to encode. path_from_root: `tuple` of key strings from the top-level summary to the current `value`. Returns: A new tree of dict's with large objects replaced with dictionaries with "_type" entries that say which type the original data was.
codesearchnet
def importFile(self, path, mode, outp=None): if (not os.path.isfile(path)): raise s_exc.NoSuchFile('File does not exist') fname = os.path.split(path)[1] parts = fname.rsplit('.', 1) ext = (parts[1] if (len(parts) is 2) else None) if ((not ext) or (ext not in ('crt', 'key', 'p12'))): mesg = 'importFile only supports .crt, .key, .p12 extensions' raise s_exc.BadFileExt(mesg=mesg, ext=ext) newpath = s_common.genpath(self.certdir, mode, fname) if os.path.isfile(newpath): raise s_exc.FileExists('File already exists') shutil.copy(path, newpath) if (outp is not None): outp.printf(('copied %s to %s' % (path, newpath)))
Imports certs and keys into the Synapse cert directory Args: path (str): The path of the file to be imported. mode (str): The certdir subdirectory to import the file into. Examples: Import CA certifciate 'mycoolca.crt' to the 'cas' directory. certdir.importFile('mycoolca.crt', 'cas') Notes: importFile does not perform any validation on the files it imports. Returns: None
codesearchnet
def get_float_type_def(double_precision, include_complex=True): if include_complex: with open(os.path.abspath(resource_filename('mot', 'data/opencl/complex.h')), 'r') as f: complex_number_support = f.read() else: complex_number_support = '' scipy_constants = if double_precision: return + scipy_constants + complex_number_support else: return + scipy_constants + complex_number_support
Get the model floating point type definition. Args: double_precision (boolean): if True we will use the double type for the mot_float_type type. Else, we will use the single precision float type for the mot_float_type type. include_complex (boolean): if we include support for complex numbers Returns: str: defines the mot_float_type types, the epsilon and the MIN and MAX values.
juraj-google-style
def from_db_value(cls, value, *_) -> Optional[LocalizedValue]: if not value: if getattr(settings, 'LOCALIZED_FIELDS_EXPERIMENTAL', False): return None else: return cls.attr_class() if isinstance(value, list): result = [] for inner_val in value: if isinstance(inner_val, dict): if inner_val is None: result.append(None) else: result.append(cls.attr_class(inner_val)) else: result.append(inner_val) return result if not isinstance(value, dict): return value return cls.attr_class(value)
Turns the specified database value into its Python equivalent. Arguments: value: The value that is stored in the database and needs to be converted to its Python equivalent. Returns: A :see:LocalizedValue instance containing the data extracted from the database.
juraj-google-style
def getObjective(self, name): return lock_and_call((lambda : Objective(self._impl.getObjective(name))), self._lock)
Get the objective with the corresponding name. Args: name: Name of the objective to be found. Raises: TypeError: if the specified objective does not exist.
codesearchnet
def word_probability(self, word, total_words=None): if total_words is None: total_words = self._word_frequency.total_words return self._word_frequency.dictionary[word] / total_words
Calculate the probability of the `word` being the desired, correct word Args: word (str): The word for which the word probability is \ calculated total_words (int): The total number of words to use in the \ calculation; use the default for using the whole word \ frequency Returns: float: The probability that the word is the correct word
juraj-google-style
def labels(self, leaves=True, internal=True): if not isinstance(leaves, bool): raise TypeError("leaves must be a bool") if not isinstance(internal, bool): raise TypeError("internal must be a bool") for node in self.traverse_preorder(): if node.label is not None and ((leaves and node.is_leaf()) or (internal and not node.is_leaf())): yield node.label
Generator over the (non-``None``) ``Node`` labels of this ``Tree`` Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
juraj-google-style
def get_thread(self, thread_id, update_if_cached=True, raise_404=False): cached_thread = self._thread_cache.get(thread_id) if cached_thread: if update_if_cached: cached_thread.update() return cached_thread res = self._requests_session.get(self._url.thread_api_url(thread_id=thread_id)) if raise_404: res.raise_for_status() elif (not res.ok): return None thread = Thread._from_request(self, res, thread_id) self._thread_cache[thread_id] = thread return thread
Get a thread from 4chan via 4chan API. Args: thread_id (int): Thread ID update_if_cached (bool): Whether the thread should be updated if it's already in our cache raise_404 (bool): Raise an Exception if thread has 404'd Returns: :class:`basc_py4chan.Thread`: Thread object
codesearchnet
def marshal_bson( obj, types=BSON_TYPES, fields=None, ): return marshal_dict( obj, types, fields=fields, )
Recursively marshal a Python object to a BSON-compatible dict that can be passed to PyMongo, Motor, etc... Args: obj: object, It's members can be nested Python objects which will be converted to dictionaries types: tuple-of-types, The BSON primitive types, typically you would not change this fields: None-list-of-str, Explicitly marshal only these fields Returns: dict
juraj-google-style
def __init__(self, email_url: str, mailing_list_name: str, extractor_name: str) -> None: Extractor.__init__(self, input_type=InputType.TEXT, category="build_in_extractor", name=extractor_name) self.email_url = email_url self.mailing_list_name = mailing_list_name
Initialize the extractor, storing mailing list and message information Args: email_url: str mailing_list_name: str extractor_name: str Returns:
juraj-google-style
def floor(x, name=None): return gen_math_ops.floor(x, name)
Returns element-wise largest integer not greater than x. Both input range is `(-inf, inf)` and the output range consists of all integer values. For example: >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")]) >>> tf.floor(x).numpy() array([ 1., -2., 5., -3., 0., inf], dtype=float32) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as x.
github-repos
def parse_record(raw_record, is_training, dtype): (image_buffer, label) = _parse_example_proto(raw_record) image = imagenet_preprocessing.preprocess_image(image_buffer=image_buffer, output_height=_DEFAULT_IMAGE_SIZE, output_width=_DEFAULT_IMAGE_SIZE, num_channels=_NUM_CHANNELS, is_training=is_training) image = tf.cast(image, dtype) return (image, label)
Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor.
codesearchnet
def commit_offsets_async(self, offsets, callback=None): self._invoke_completed_offset_commit_callbacks() if not self.coordinator_unknown(): future = self._do_commit_offsets_async(offsets, callback) else: future = self.lookup_coordinator() future.add_callback(lambda r: functools.partial(self._do_commit_offsets_async, offsets, callback)()) if callback: future.add_errback(lambda e: self.completed_offset_commits.appendleft((callback, offsets, e))) self._client.poll(timeout_ms=0) return future
Commit specific offsets asynchronously. Arguments: offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit callback (callable, optional): called as callback(offsets, response) response will be either an Exception or a OffsetCommitResponse struct. This callback can be used to trigger custom actions when a commit request completes. Returns: kafka.future.Future
juraj-google-style
def plot_main(pid, return_fig_ax=False): global WORKING_DIRECTORY, SNR_CUT if isinstance(pid, PlotInput): pid = pid.return_dict() WORKING_DIRECTORY = '.' if 'WORKING_DIRECTORY' not in pid['general'].keys(): pid['general']['WORKING_DIRECTORY'] = '.' SNR_CUT = 5.0 if 'SNR_CUT' not in pid['general'].keys(): pid['general']['SNR_CUT'] = SNR_CUT if "switch_backend" in pid['general'].keys(): plt.switch_backend(pid['general']['switch_backend']) running_process = MakePlotProcess( **{**pid, **pid['general'], **pid['plot_info'], **pid['figure']}) running_process.input_data() running_process.setup_figure() running_process.create_plots() if 'save_figure' in pid['figure'].keys(): if pid['figure']['save_figure'] is True: running_process.fig.savefig( pid['general']['WORKING_DIRECTORY'] + '/' + pid['figure']['output_path'], **pid['figure']['savefig_kwargs']) if 'show_figure' in pid['figure'].keys(): if pid['figure']['show_figure'] is True: plt.show() if return_fig_ax is True: return running_process.fig, running_process.ax return
Main function for creating these plots. Reads in plot info dict from json file or dictionary in script. Args: return_fig_ax (bool, optional): Return figure and axes objects. Returns: 2-element tuple containing - **fig** (*obj*): Figure object for customization outside of those in this program. - **ax** (*obj*): Axes object for customization outside of those in this program.
juraj-google-style
def __init__(self, key_wrapping_data=None): super(Key, self).__init__() self.cryptographic_algorithm = None self.cryptographic_length = None self.key_format_type = None self.key_wrapping_data = key_wrapping_data self._cryptographic_parameters = list() self._usage_limits = None
Create a Key object. Args: key_wrapping_data(dict): A dictionary containing key wrapping data settings, describing how the key value has been wrapped. Optional, defaults to None.
juraj-google-style
def floordiv(self, other, axis="columns", level=None, fill_value=None): return self._binary_op( "floordiv", other, axis=axis, level=level, fill_value=fill_value )
Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied.
juraj-google-style
def delete(self, version_name): name = ('%s/versions/%s' % (self._full_model_name, version_name)) response = self._api.projects().models().versions().delete(name=name).execute() if 'name' not in response: raise Exception('Invalid response from service. "name" is not found.') _util.wait_for_long_running_operation(response['name'])
Delete a version of model. Args: version_name: the name of the version in short form, such as "v1".
juraj-google-style
def access_token(self): if ((self._access_token is None) or (self.expiration_time <= int(time.time()))): resp = self.make_access_request() self._access_token = resp.json()['access_token'] return self._access_token
Stores always valid OAuth2 access token. Note: Accessing this property may result in HTTP request. Returns: str
codesearchnet
def _convert(x, factor1, factor2): return ((x * factor2) / (((1 - x) * factor1) + (x * factor2)))
Converts mixing ratio x in comp1 - comp2 tie line to that in c1 - c2 tie line. Args: x (float): Mixing ratio x in comp1 - comp2 tie line, a float between 0 and 1. factor1 (float): Compositional ratio between composition c1 and processed composition comp1. E.g., factor for Composition('SiO2') and Composition('O') is 2.0. factor2 (float): Compositional ratio between composition c2 and processed composition comp2. Returns: Mixing ratio in c1 - c2 tie line, a float between 0 and 1.
codesearchnet
def dict_to_schema(schema_dict, required, allow_custom_keys=True, modifier=None): if modifier: modifier = Use(modifier) def _to(value): if isinstance(value, dict): d = {} for k, v in value.iteritems(): if isinstance(k, basestring): k = Required(k) if required else Optional(k) d[k] = _to(v) if allow_custom_keys: d[Optional(basestring)] = modifier or object schema = Schema(d) elif modifier: schema = And(value, modifier) else: schema = value return schema return _to(schema_dict)
Convert a dict of Schemas into a Schema. Args: required (bool): Whether to make schema keys optional or required. allow_custom_keys (bool, optional): If True, creates a schema that allows custom items in dicts. modifier (callable): Functor to apply to dict values - it is applied via `Schema.Use`. Returns: A `Schema` object.
juraj-google-style