code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, hunt_obj, runner_args=None, token=None): self.token = token or hunt_obj.token self.queue_manager = queue_manager.QueueManager(token=self.token) self.outbound_lock = threading.Lock() self.hunt_obj = hunt_obj if runner_args is not None: self.runner_args = runner_args self.session_id = self.GetNewSessionID() self.hunt_obj.urn = self.session_id self.context = self.InitializeContext(runner_args) self.hunt_obj.context = self.context self.context.session_id = self.session_id else: self.context = self.hunt_obj.context self.runner_args = self.hunt_obj.runner_args self.hunt_obj.urn = self.session_id = self.context.session_id
Constructor for the Hunt Runner. Args: hunt_obj: The hunt object this runner will run states for. runner_args: A HuntRunnerArgs() instance containing initial values. If not specified, we use the runner_args from the hunt_obj. token: An instance of access_control.ACLToken security token.
juraj-google-style
def compute(i, tas): elems_value_batchable = [ta.read(i) for ta in elems_batchable_ta] elems_value_flat = _elems_value_batchable_to_flat(elems_value_batchable, elems_flat_signature) elems_value = elems_unflatten(elems_value_flat) ag_ctx = autograph_ctx.control_status_ctx() autographed_fn = autograph.tf_convert(fn, ag_ctx) result_value = autographed_fn(elems_value) nest.assert_same_structure(fn_output_signature or elems, result_value) result_value_flat = nest.flatten(result_value) result_value_batchable = _result_value_flat_to_batchable(result_value_flat, result_flat_signature) tas = [ta.write(i, value) for ta, value in zip(tas, result_value_batchable)] return (i + 1, tas)
The loop body of map_fn. Args: i: the loop counter tas: the flat TensorArray accumulator list Returns: (i + 1, tas): the updated counter + updated TensorArrays Raises: TypeError: if fn_output_signature and result_value structure don't match ValueType: if fn_output_signature and result_value lengths don't match
github-repos
def loads(text): if text.startswith('CCSDS_OEM_VERS'): func = _read_oem elif text.startswith('CCSDS_OPM_VERS'): func = _read_opm else: raise ValueError('Unknown CCSDS type') return func(text)
Read CCSDS from a string, and provide the beyond class corresponding; Orbit or list of Orbit if it's an OPM, Ephem if it's an OEM. Args: text (str): Return: Orbit or Ephem Raise: ValueError: when the text is not a recognizable CCSDS format
codesearchnet
def __init__(self, fraction): self.fraction = fraction super().__init__('Fraction should be in (0,1] (received {})' .format(fraction))
Initialization of instances: Args: fraction (float): the invalid fraction. Attributes: fraction (float): the invalid fraction.
juraj-google-style
def prompt_for_test_start( message='Enter a DUT ID in order to start the test.', timeout_s=60*60*24, validator=lambda sn: sn, cli_color=''): @PhaseOptions(timeout_s=timeout_s) @plugs.plug(prompts=UserInput) def trigger_phase(test, prompts): dut_id = prompts.prompt( message, text_input=True, timeout_s=timeout_s, cli_color=cli_color) test.test_record.dut_id = validator(dut_id) return trigger_phase
Returns an OpenHTF phase for use as a prompt-based start trigger. Args: message: The message to display to the user. timeout_s: Seconds to wait before raising a PromptUnansweredError. validator: Function used to validate or modify the serial number. cli_color: An ANSI color code, or the empty string.
juraj-google-style
def _Replacement(node): value = node.id if value in ('True', 'False', 'None'): return node return _StrNode(value)
Returns a node to use in place of the supplied node in the AST. Args: node: A node of type Name. Could be a variable, or builtin constant. Returns: A node to use in place of the supplied Node. Either the same node, or a String node whose value matches the Name node's id.
github-repos
def FromStream(cls, stream): if stream.system: specifier = DataStreamSelector.MatchSystemOnly else: specifier = DataStreamSelector.MatchUserOnly return DataStreamSelector(stream.stream_type, stream.stream_id, specifier)
Create a DataStreamSelector from a DataStream. Args: stream (DataStream): The data stream that we want to convert.
codesearchnet
def deepcopy_dict(data): try: return copy.deepcopy(data) except TypeError: copied_data = {} for key, value in data.items(): if isinstance(value, dict): copied_data[key] = deepcopy_dict(value) else: try: copied_data[key] = copy.deepcopy(value) except TypeError: copied_data[key] = value return copied_data
deepcopy dict data, ignore file object (_io.BufferedReader) Args: data (dict): dict data structure { 'a': 1, 'b': [2, 4], 'c': lambda x: x+1, 'd': open('LICENSE'), 'f': { 'f1': {'a1': 2}, 'f2': io.open('LICENSE', 'rb'), } } Returns: dict: deep copied dict data, with file object unchanged.
juraj-google-style
def flatten(inputs, name=None, data_format='channels_last'): warnings.warn('`tf.layers.flatten` is deprecated and will be removed in a future version. Please use `tf.keras.layers.Flatten` instead.') layer = Flatten(name=name, data_format=data_format) return layer.apply(inputs)
Flattens an input tensor while preserving the batch axis (axis 0). Args: inputs: Tensor input. name: The name of the layer (string). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. Returns: Reshaped tensor. Examples: ``` x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32') y = flatten(x) # now `y` has shape `(None, 16)` x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32') y = flatten(x) # now `y` has shape `(None, None)` ```
github-repos
def validate_word(self, word): while word: match = self.seg_regex.match(word) if match: word = word[len(match.group(0)):] else: return False return True
Returns True if `word` consists exhaustively of valid IPA segments Args: word (unicode): input word as Unicode IPA string Returns: bool: True if `word` can be divided exhaustively into IPA segments that exist in the database
codesearchnet
def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=FLAGS): def validate_mutual_exclusion(flags_dict): flag_count = sum((1 for val in flags_dict.values() if (val is not None))) if ((flag_count == 1) or ((not required) and (flag_count == 0))): return True message = ('%s one of (%s) must be specified.' % (('Exactly' if required else 'At most'), ', '.join(flag_names))) raise ValidationError(message) register_multi_flags_validator(flag_names, validate_mutual_exclusion, flag_values=flag_values)
Ensures that only one flag among flag_names is set. Args: flag_names: [str], a list of the flag names to be checked. required: Boolean, if set, exactly one of the flags must be set. Otherwise, it is also valid for none of the flags to be set. flag_values: An optional FlagValues instance to validate against.
codesearchnet
def get_name(node): if isinstance(node, gast.Name): return node.id elif isinstance(node, (gast.Subscript, gast.Attribute)): return get_name(node.value) else: raise TypeError
Get the name of a variable. Args: node: A `Name`, `Subscript` or `Attribute` node. Returns: The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.
juraj-google-style
def _create_complete_graph(node_ids): g = nx.Graph() g.add_nodes_from(node_ids) for (i, j) in combinations(node_ids, 2): g.add_edge(i, j) return g
Create a complete graph from the list of node ids. Args: node_ids: a list of node ids Returns: An undirected graph (as a networkx.Graph)
codesearchnet
def program(self, *, vertex_shader, fragment_shader=None, geometry_shader=None, tess_control_shader=None, tess_evaluation_shader=None, varyings=()) -> 'Program': if type(varyings) is str: varyings = (varyings,) varyings = tuple(varyings) res = Program.__new__(Program) res.mglo, ls1, ls2, ls3, ls4, ls5, res._subroutines, res._geom, res._glo = self.mglo.program( vertex_shader, fragment_shader, geometry_shader, tess_control_shader, tess_evaluation_shader, varyings ) members = {} for item in ls1: obj = Attribute.__new__(Attribute) obj.mglo, obj._location, obj._array_length, obj._dimension, obj._shape, obj._name = item members[obj.name] = obj for item in ls2: obj = Varying.__new__(Varying) obj._number, obj._array_length, obj._dimension, obj._name = item members[obj.name] = obj for item in ls3: obj = Uniform.__new__(Uniform) obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item members[obj.name] = obj for item in ls4: obj = UniformBlock.__new__(UniformBlock) obj.mglo, obj._index, obj._size, obj._name = item members[obj.name] = obj for item in ls5: obj = Subroutine.__new__(Subroutine) obj._index, obj._name = item members[obj.name] = obj res._members = members res.ctx = self res.extra = None return res
Create a :py:class:`Program` object. Only linked programs will be returned. A single shader in the `shaders` parameter is also accepted. The varyings are only used when a transform program is created. Args: shaders (list): A list of :py:class:`Shader` objects. varyings (list): A list of varying names. Returns: :py:class:`Program` object
juraj-google-style
def pb(name, data, bucket_count=None, display_name=None, description=None): import tensorflow.compat.v1 as tf if (bucket_count is None): bucket_count = summary_v2.DEFAULT_BUCKET_COUNT data = np.array(data).flatten().astype(float) if (data.size == 0): buckets = np.array([]).reshape((0, 3)) else: min_ = np.min(data) max_ = np.max(data) range_ = (max_ - min_) if (range_ == 0): center = min_ buckets = np.array([[(center - 0.5), (center + 0.5), float(data.size)]]) else: bucket_width = (range_ / bucket_count) offsets = (data - min_) bucket_indices = np.floor((offsets / bucket_width)).astype(int) clamped_indices = np.minimum(bucket_indices, (bucket_count - 1)) one_hots = (np.array([clamped_indices]).transpose() == np.arange(0, bucket_count)) assert (one_hots.shape == (data.size, bucket_count)), (one_hots.shape, (data.size, bucket_count)) bucket_counts = np.sum(one_hots, axis=0) edges = np.linspace(min_, max_, (bucket_count + 1)) left_edges = edges[:(- 1)] right_edges = edges[1:] buckets = np.array([left_edges, right_edges, bucket_counts]).transpose() tensor = tf.make_tensor_proto(buckets, dtype=tf.float64) if (display_name is None): display_name = name summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description) tf_summary_metadata = tf.SummaryMetadata.FromString(summary_metadata.SerializeToString()) summary = tf.Summary() summary.value.add(tag=('%s/histogram_summary' % name), metadata=tf_summary_metadata, tensor=tensor) return summary
Create a legacy histogram summary protobuf. Arguments: name: A unique name for the generated summary, including any desired name scopes. data: A `np.array` or array-like form of any shape. Must have type castable to `float`. bucket_count: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object.
codesearchnet
def _validate(self): if self.tuple_shapes is not None: for policy, shape in zip(self._sharding_policies, self._tuple_shapes): _ = policy.get_sharded_shape(shape)
Checks that the configuration is self-consistent. Raises: ValueError: if the shapes and sharding policies don't match.
github-repos
def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): options = {'log_level': log_level} if (vcodec is None): options['vcodec'] = 'copy' if (acodec is None): options['acodec'] = 'copy' if start: options['ss'] = start else: start = 0 if end: options['t'] = (end - start) convert_video(in_file, out_file, print_cmd, **options)
Cut a clip from a video. Args: in_file (str): Input video filename. out_file (str): Output video filename. start (None or float): Start time (in seconds). end (None or float): End time (in seconds). vcodec (None or str): Output video codec, None for unchanged. acodec (None or str): Output audio codec, None for unchanged. log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command.
codesearchnet
def GetBalance(self, asset_id, watch_only=0): total = Fixed8(0) if type(asset_id) is NEP5Token.NEP5Token: return self.GetTokenBalance(asset_id, watch_only) for coin in self.GetCoins(): if coin.Output.AssetId == asset_id: if coin.State & CoinState.Confirmed > 0 and \ coin.State & CoinState.Spent == 0 and \ coin.State & CoinState.Locked == 0 and \ coin.State & CoinState.Frozen == 0 and \ coin.State & CoinState.WatchOnly == watch_only: total = total + coin.Output.Value return total
Get the balance of a specific token by its asset id. Args: asset_id (NEP5Token|TransactionOutput): an instance of type neo.Wallets.NEP5Token or neo.Core.TX.Transaction.TransactionOutput to get the balance from. watch_only (bool): True, to limit to watch only wallets. Returns: Fixed8: total balance.
juraj-google-style
def _WriteCacheFile(self, cache_filename, scopes): creds = {'scopes': sorted(list(scopes)), 'svc_acct_name': self.__service_account_name} creds_str = json.dumps(creds) cache_file = _MultiProcessCacheFile(cache_filename) try: cache_file.LockedWrite(creds_str) except KeyboardInterrupt: raise except: pass
Writes the credential metadata to the cache file. This does not save the credentials themselves (CredentialStore class optionally handles that after this class is initialized). Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials.
juraj-google-style
def BuildAdGroupCriterionOperations(adgroup_operations, number_of_keywords=1): criterion_operations = [ { 'xsi_type': 'AdGroupCriterionOperation', 'operand': { 'xsi_type': 'BiddableAdGroupCriterion', 'adGroupId': adgroup_operation['operand']['id'], 'criterion': { 'xsi_type': 'Keyword', 'text': 'mars%s%s' % (i, '!!!' if i % 2 == 0 else ''), 'matchType': 'BROAD' } }, 'operator': 'ADD' } for adgroup_operation in adgroup_operations for i in range(number_of_keywords)] return criterion_operations
Builds the operations adding a Keyword Criterion to each AdGroup. Args: adgroup_operations: a list containing the operations that will add AdGroups. number_of_keywords: an int defining the number of Keywords to be created. Returns: a list containing the operations that will create a new Keyword Criterion associated with each provided AdGroup.
juraj-google-style
def __init__(self, AssetId=None, Value=None, script_hash=None): super(TransactionOutput, self).__init__() self.AssetId = AssetId self.Value = Value self.ScriptHash = script_hash
Create an instance. Args: AssetId (UInt256): Value (Fixed8): script_hash (UInt160):
juraj-google-style
def GetDecrypter(cls, encryption_method, **kwargs): encryption_method = encryption_method.lower() decrypter = cls._decrypters.get(encryption_method, None) if not decrypter: return None return decrypter(**kwargs)
Retrieves the decrypter object for a specific encryption method. Args: encryption_method (str): encryption method identifier. kwargs (dict): keyword arguments depending on the decrypter. Returns: Decrypter: decrypter or None if the encryption method does not exists. Raises: CredentialError: if the necessary credentials are missing.
juraj-google-style
def list_worker_processes(apppool): ps_cmd = ['Get-ChildItem', "'IIS:\\AppPools\\{0}\\WorkerProcesses'".format(apppool)] cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True) try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) except ValueError: raise CommandExecutionError('Unable to parse return data as Json.') ret = dict() for item in items: ret[item['processId']] = item['appPoolName'] if (not ret): log.warning('No backups found in output: %s', cmd_ret) return ret
Returns a list of worker processes that correspond to the passed application pool. .. versionadded:: 2017.7.0 Args: apppool (str): The application pool to query Returns: dict: A dictionary of worker processes with their process IDs CLI Example: .. code-block:: bash salt '*' win_iis.list_worker_processes 'My App Pool'
codesearchnet
def get_min_max_value(self) -> tuple[float, float]: if self._num_bins > 512: logging.warning('num_bins=%d is too large. The HISTOGRAM_MSE_BRUTEFORCE method tests all histogram mid value pairs, so it may take a long time.', self._num_bins) mse_min = (float('inf'), float('inf'), float('inf')) for left, right in itertools.combinations(range(self._num_bins), 2): quant_min, quant_max = (self._hist_mids[left], self._hist_mids[right]) mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max) mse_min = min(mse_tuple, mse_min) min_value, max_value = (mse_min[1], mse_min[2]) return (min_value, max_value)
Finds the optimal quant_min and quant_max by testing all possible cases. It guarantees optimal quant_min and quant_max for the representative dataset, but not for the test dataset. Returns: (min_value, max_value): Min and max calculated using HistogramMseBruteforce.
github-repos
def prepare_axes(axes, title, size, cmap=None): if (axes is None): return None axes.set_xlim([0, size[1]]) axes.set_ylim([size[0], 0]) axes.set_aspect('equal') axes.axis('off') if isinstance(cmap, str): title = '{} (cmap: {})'.format(title, cmap) axes.set_title(title) axes_image = image.AxesImage(axes, cmap=cmap, extent=(0, size[1], size[0], 0)) axes_image.set_data(np.random.random((size[0], size[1], 3))) axes.add_image(axes_image) return axes_image
Prepares an axes object for clean plotting. Removes x and y axes labels and ticks, sets the aspect ratio to be equal, uses the size to determine the drawing area and fills the image with random colors as visual feedback. Creates an AxesImage to be shown inside the axes object and sets the needed properties. Args: axes: The axes object to modify. title: The title. size: The size of the expected image. cmap: The colormap if a custom color map is needed. (Default: None) Returns: The AxesImage's handle.
codesearchnet
def inject_params(self, params): for arg, value in params.items(): cli_arg = '--{}'.format(arg) if cli_arg in sys.argv: self.tcex.log.debug('skipping existing arg: {}'.format(cli_arg)) continue param_data = self.tcex.install_json_params.get(arg) or {} if param_data.get('type', '').lower() == 'multichoice': value = value.split('|') elif param_data.get('type', '').lower() == 'boolean': value = self.tcex.utils.to_bool(value) elif arg in self.tc_bool_args: value = self.tcex.utils.to_bool(value) if isinstance(value, (bool)): if value is True: sys.argv.append(cli_arg) elif isinstance(value, (list)): for mcv in value: sys.argv.append('{}={}'.format(cli_arg, mcv)) else: sys.argv.append('{}={}'.format(cli_arg, value)) self._default_args, unknown = self.parser.parse_known_args() self.tcex._logger()
Inject params into sys.argv from secureParams API, AOT, or user provided. Args: params (dict): A dictionary containing all parameters that need to be injected as args.
juraj-google-style
def patch_masks(patches: dict) -> None: for patch in patches: patch_mask(patch)
Wraps patch mask function for list of patches. Modifies in place. Executes patch_mask for multiple patches. Args: patches: A list of patch objects to annotate.
github-repos
def _as_log_entry(self, name, now): d = {u'http_response_code': self.response_code, u'timestamp': time.mktime(now.timetuple())} severity = _SEVERITY.INFO if (self.response_code >= 400): severity = _SEVERITY.ERROR d[u'error_cause'] = self.error_cause.name if (self.request_size > 0): d[u'request_size'] = self.request_size if (self.response_size > 0): d[u'response_size'] = self.response_size if self.method: d[u'http_method'] = self.method if self.request_time: d[u'request_latency_in_ms'] = (self.request_time.total_seconds() * 1000) for key in self.COPYABLE_LOG_FIELDS: value = getattr(self, key, None) if value: d[key] = value return sc_messages.LogEntry(name=name, timestamp=timestamp.to_rfc3339(now), severity=severity, structPayload=_struct_payload_from(d))
Makes a `LogEntry` from this instance for the given log_name. Args: rules (:class:`ReportingRules`): determines what labels, metrics and logs to include in the report request. now (:class:`datetime.DateTime`): the current time Return: a ``LogEntry`` generated from this instance with the given name and timestamp Raises: ValueError: if the fields in this instance are insufficient to to create a valid ``ServicecontrolServicesReportRequest``
codesearchnet
def _ParseLastRunTime(self, parser_mediator, fixed_length_section): systemtime_struct = fixed_length_section.last_run_time system_time_tuple = (systemtime_struct.year, systemtime_struct.month, systemtime_struct.weekday, systemtime_struct.day_of_month, systemtime_struct.hours, systemtime_struct.minutes, systemtime_struct.seconds, systemtime_struct.milliseconds) date_time = None if (system_time_tuple != self._EMPTY_SYSTEM_TIME_TUPLE): try: date_time = dfdatetime_systemtime.Systemtime(system_time_tuple=system_time_tuple) except ValueError: parser_mediator.ProduceExtractionWarning('invalid last run time: {0!s}'.format(system_time_tuple)) return date_time
Parses the last run time from a fixed-length data section. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. fixed_length_section (job_fixed_length_data_section): a Windows Scheduled Task job fixed-length data section. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available.
codesearchnet
def _get_default_configurable_parameter_values(fn, whitelist, blacklist): arg_vals = _ARG_DEFAULTS_CACHE.get(fn) if arg_vals is not None: return arg_vals.copy() arg_spec = _get_cached_arg_spec(fn) if arg_spec.defaults: default_kwarg_names = arg_spec.args[-len(arg_spec.defaults):] arg_vals = dict(zip(default_kwarg_names, arg_spec.defaults)) else: arg_vals = {} if six.PY3 and arg_spec.kwonlydefaults: arg_vals.update(arg_spec.kwonlydefaults) for k in list(six.iterkeys(arg_vals)): whitelist_fail = whitelist and k not in whitelist blacklist_fail = blacklist and k in blacklist representable = _is_literally_representable(arg_vals[k]) if whitelist_fail or blacklist_fail or not representable: del arg_vals[k] _ARG_DEFAULTS_CACHE[fn] = arg_vals return arg_vals.copy()
Retrieve all default values for configurable parameters of a function. Any parameters included in the supplied blacklist, or not included in the supplied whitelist, are excluded. Args: fn: The function whose parameter values should be retrieved. whitelist: The whitelist (or `None`) associated with the function. blacklist: The blacklist (or `None`) associated with the function. Returns: A dictionary mapping configurable parameter names to their default values.
juraj-google-style
def _load_json_module(): first_import_error = None for module_name in ['json', 'simplejson']: try: module = __import__(module_name, {}, {}, 'json') if (not hasattr(module, 'JSONEncoder')): message = ('json library "%s" is not compatible with ProtoRPC' % module_name) logging.warning(message) raise ImportError(message) else: return module except ImportError as err: if (not first_import_error): first_import_error = err logging.error('Must use valid json library (json or simplejson)') raise first_import_error
Try to load a valid json module. There are more than one json modules that might be installed. They are mostly compatible with one another but some versions may be different. This function attempts to load various json modules in a preferred order. It does a basic check to guess if a loaded version of json is compatible. Returns: Compatible json module. Raises: ImportError if there are no json modules or the loaded json module is not compatible with ProtoRPC.
codesearchnet
def get_logging_metric_hook(benchmark_log_dir=None, tensors_to_log=None, every_n_secs=600, **kwargs): if (benchmark_log_dir is None): raise ValueError('metric_log_dir should be provided to use metric logger') if (tensors_to_log is None): tensors_to_log = _TENSORS_TO_LOG return metric_hook.LoggingMetricHook(tensors=tensors_to_log, log_dir=benchmark_log_dir, every_n_secs=every_n_secs)
Function to get LoggingMetricHook. Args: benchmark_log_dir: `string`, directory path to save the metric log. tensors_to_log: List of tensor names or dictionary mapping labels to tensor names. If not set, log _TENSORS_TO_LOG by default. every_n_secs: `int`, the frequency for logging the metric. Default to every 10 mins. Returns: Returns a ProfilerHook that writes out timelines that can be loaded into profiling tools like chrome://tracing.
codesearchnet
def is_special_unitary(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool: return (is_unitary(matrix, rtol=rtol, atol=atol) and ((matrix.shape[0] == 0) or np.allclose(np.linalg.det(matrix), 1, rtol=rtol, atol=atol)))
Determines if a matrix is approximately unitary with unit determinant. A matrix is special-unitary if it is square and its adjoint is its inverse and its determinant is one. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is unitary with unit determinant within the given tolerance.
codesearchnet
def raw_state(self): try: return self._get_domain().state() except libvirt.libvirtError as e: raise vm_plugin.LagoFailedToGetVMStateError(str(e))
Return the state of the domain in Libvirt's terms Retruns: tuple of ints: The state and its reason Raises: :exc:`~lago.plugins.vm.LagoVMDoesNotExistError`: If the VM of this provider doesn't exist. :exc:`~lago.plugins.vm.LagoFailedToGetVMStateError: If the VM exist, but the query returned an error.
codesearchnet
def get_sig(ir, name): sig = '{}({})' argss = convert_arguments(ir.arguments) return [sig.format(name, ','.join(args)) for args in argss]
Return a list of potential signature It is a list, as Constant variables can be converted to int256 Args: ir (slithIR.operation) Returns: list(str)
juraj-google-style
def LockScanNode(self, path_spec): scan_node = self._scan_nodes.get(path_spec, None) if not scan_node: raise KeyError('Scan node does not exist.') self._locked_scan_nodes[path_spec] = scan_node
Marks a scan node as locked. Args: path_spec (PathSpec): path specification. Raises: KeyError: if the scan node does not exists.
juraj-google-style
def save_feature_list(self, obj, set_id, feature_list_id): save(obj, self.features_dir + 'X_{}_{}.pickle'.format(set_id, feature_list_id))
Pickle the specified feature list to a file. Example: `save_feature_list(project, X_tfidf_train, 'train', 'tfidf')`. Args: obj: The object to pickle (e.g., a numpy array or a Pandas dataframe) project: An instance of pygoose project. set_id: The id of the subset (e.g., 'train' or 'test') feature_list_id: The name for this feature list.
juraj-google-style
def transpose(self, name=None, activate_final=None): if name is None: name = self.module_name + "_transpose" if activate_final is None: activate_final = self.activate_final output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers] output_sizes.reverse() return MLP( name=name, output_sizes=output_sizes, activation=self.activation, activate_final=activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)
Returns transposed `MLP`. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. Returns: Matching transposed `MLP` module.
juraj-google-style
def _force_edge_active_move(self, state: _STATE) -> _STATE: (seqs, edges) = state unused_edges = edges.copy() for seq in seqs: for i in range(1, len(seq)): unused_edges.remove(self._normalize_edge((seq[(i - 1)], seq[i]))) edge = self._choose_random_edge(unused_edges) if (not edge): return (seqs, edges) return (self._force_edge_active(seqs, edge, (lambda : bool(self._rand.randint(2)))), edges)
Move which forces a random edge to appear on some sequence. This move chooses random edge from the edges which do not belong to any sequence and modifies state in such a way, that this chosen edge appears on some sequence of the search state. Args: state: Search state, not mutated. Returns: New search state with one of the unused edges appearing in some sequence.
codesearchnet
def _feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None:
Actual feedback method which should be implemented by the child class. The default implementation is no-op. Args: dna: a DNA object. reward: reward for the DNA. It is a float if `self.multi_objective` returns False, otherwise it's a tuple of floats.
github-repos
def _get_name_filter(package, context='decorate', reparse=False): global name_filters pkey = (package, context) if ((pkey in name_filters) and (not reparse)): return name_filters[pkey] from acorn.config import settings spack = settings(package) sections = {'decorate': ['tracking', 'acorn.tracking'], 'time': ['timing', 'acorn.timing'], 'analyze': ['analysis', 'acorn.analysis']} (filters, rfilters) = (None, None) import re if (context in sections): (filters, rfilters) = ([], []) (ignores, rignores) = ([], []) for section in sections[context]: if spack.has_section(section): options = spack.options(section) if ('filter' in options): filters.extend(re.split('\\s*\\$\\s*', spack.get(section, 'filter'))) if ('rfilter' in options): pfilters = re.split('\\s*\\$\\s*', spack.get(section, 'rfilter')) rfilters.extend([re.compile(p, re.I) for p in pfilters]) if ('ignore' in options): ignores.extend(re.split('\\s*\\$\\s*', spack.get(section, 'ignore'))) if ('rignore' in options): pignores = re.split('\\s*\\$\\s*', spack.get(section, 'rignore')) rignores.extend([re.compile(p, re.I) for p in pfilters]) name_filters[pkey] = {'filters': filters, 'rfilters': rfilters, 'ignores': ignores, 'rignores': rignores} else: name_filters[pkey] = None return name_filters[pkey]
Makes sure that the name filters for the specified package have been loaded. Args: package (str): name of the package that this method belongs to. context (str): one of ['decorate', 'time', 'analyze']; specifies which section of the configuration settings to check.
codesearchnet
def _process_policy_eval_results(to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions): actions_to_send = defaultdict(dict) for env_id in active_envs: actions_to_send[env_id] = {} for (policy_id, eval_data) in to_eval.items(): rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data]) (actions, rnn_out_cols, pi_info_cols) = eval_results[policy_id] if (len(rnn_in_cols) != len(rnn_out_cols)): raise ValueError('Length of RNN in did not match RNN out, got: {} vs {}'.format(rnn_in_cols, rnn_out_cols)) for (f_i, column) in enumerate(rnn_in_cols): pi_info_cols['state_in_{}'.format(f_i)] = column for (f_i, column) in enumerate(rnn_out_cols): pi_info_cols['state_out_{}'.format(f_i)] = column actions = _unbatch_tuple_actions(actions) policy = _get_or_raise(policies, policy_id) for (i, action) in enumerate(actions): env_id = eval_data[i].env_id agent_id = eval_data[i].agent_id if clip_actions: actions_to_send[env_id][agent_id] = clip_action(action, policy.action_space) else: actions_to_send[env_id][agent_id] = action episode = active_episodes[env_id] episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols]) episode._set_last_pi_info(agent_id, {k: v[i] for (k, v) in pi_info_cols.items()}) if ((env_id in off_policy_actions) and (agent_id in off_policy_actions[env_id])): episode._set_last_action(agent_id, off_policy_actions[env_id][agent_id]) else: episode._set_last_action(agent_id, action) return actions_to_send
Process the output of policy neural network evaluation. Records policy evaluation results into the given episode objects and returns replies to send back to agents in the env. Returns: actions_to_send: nested dict of env id -> agent id -> agent replies.
codesearchnet
def AddAttribute(self, attribute, value=None, age=None): if ('w' not in self.mode): raise IOError(('Writing attribute %s to read only object.' % attribute)) if (value is None): value = attribute attribute = value.attribute_instance if ((self.mode != 'w') and attribute.lock_protected and (not self.transaction)): raise IOError(('Object must be locked to write attribute %s.' % attribute)) self._CheckAttribute(attribute, value) if attribute.versioned: if attribute.creates_new_object_version: self._new_version = True if age: value.age = age else: value.age = rdfvalue.RDFDatetime.Now() else: self._to_delete.add(attribute) self.synced_attributes.pop(attribute, None) self.new_attributes.pop(attribute, None) value.age = 0 self._AddAttributeToCache(attribute, value, self.new_attributes) self._dirty = True
Add an additional attribute to this object. If value is None, attribute is expected to be already initialized with a value. For example: fd.AddAttribute(fd.Schema.CONTAINS("some data")) Args: attribute: The attribute name or an RDFValue derived from the attribute. value: The value the attribute will be set to. age: Age (timestamp) of the attribute. If None, current time is used. Raises: IOError: If this object is read only.
codesearchnet
def _stream_output(process): exit_code = None while (exit_code is None): stdout = process.stdout.readline().decode('utf-8') sys.stdout.write(stdout) exit_code = process.poll() if (exit_code != 0): raise RuntimeError(('Process exited with code: %s' % exit_code)) return exit_code
Stream the output of a process to stdout This function takes an existing process that will be polled for output. Only stdout will be polled and sent to sys.stdout. Args: process(subprocess.Popen): a process that has been started with stdout=PIPE and stderr=STDOUT Returns (int): process exit code
codesearchnet
def getParameter(self, name): return lock_and_call( lambda: Parameter(self._impl.getParameter(name)), self._lock )
Get the parameter with the corresponding name. Args: name: Name of the parameter to be found. Raises: TypeError: if the specified parameter does not exist.
juraj-google-style
def json(self) -> dict: content = {} if self.text: content['text'] = self.text content['controls'] = [control.json() for control in self.content] self.control_json['content'] = content return self.control_json
Returns json compatible state of the ButtonsFrame instance. Returns json compatible state of the ButtonsFrame instance including all nested buttons. Returns: control_json: Json representation of ButtonsFrame state.
codesearchnet
def should_stop(self): if self._check_stop(): return True if self._sess: return self._wrapped_is_stoppable and self._sess.should_stop() return True
Return true if this session should not be used anymore. Always return True if the session was closed. Returns: True if the session should stop, False otherwise.
github-repos
def get_creation_date_tags(url, domain, as_dicts=False): creation_date_tags = [ mementoweb_api_tags(url), get_whois_tags(domain), ] creation_date_tags = sorted( sum(creation_date_tags, []), key=lambda x: x.date ) if not as_dicts: return creation_date_tags return [ item._as_dict() for item in creation_date_tags ]
Put together all data sources in this module and return it's output. Args: url (str): URL of the web. With relative paths and so on. domain (str): Just the domain of the web. as_dicts (bool, default False): Convert output to dictionaries compatible with :class:`.SourceString`? Returns: list: Sorted list of :class:`TimeResource` objects or dicts.
juraj-google-style
def annotate_test_file(self, test_file: Iterator[str]) -> Iterator[str]: transformed_tests, run_directives = self.for_each_test_case(test_file, self.annotate_test_case, num_outputs=2) return itertools.chain([_BANNER_COMMENT_LINE], run_directives, ['\n'], transformed_tests)
Inserts FileCheck directives above each test case in an HLO test file. Args: test_file: An iterator over the lines of an HLO test file. Returns: An iterator over the lines of the transformed HLO test file. Each test case is preceded by FileCheck directives describing the expected output of the optimizer on that test case.
github-repos
def add_op(state, op_func, *args, **kwargs): frameinfo = get_caller_frameinfo() kwargs['frameinfo'] = frameinfo for host in state.inventory: op_func(state, host, *args, **kwargs)
Prepare & add an operation to ``pyinfra.state`` by executing it on all hosts. Args: state (``pyinfra.api.State`` obj): the deploy state to add the operation to op_func (function): the operation function from one of the modules, ie ``server.user`` args/kwargs: passed to the operation function
codesearchnet
def make_sharded_variable_creator(hosts: List[Text]) -> Callable[..., TPUEmbeddingVariable]: def sharded_variable_creator(next_creator: Callable[..., tf_variables.Variable], *args, **kwargs): kwargs['skip_mirrored_creator'] = True num_hosts = len(hosts) name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs) initial_value = kwargs['initial_value'] rows = shape[0] cols = shape[1] partial_partition = rows % num_hosts full_rows_per_host = rows partitions = [full_rows_per_host + 1] * partial_partition + [full_rows_per_host] * (num_hosts - partial_partition) variables = [] sharding_aware = 'shard_info' in tf_inspect.getargspec(initial_value).args offset = 0 kwargs['dtype'] = dtype for i, p in enumerate(partitions): if p == 0: continue with ops.device(hosts[i]): kwargs['name'] = '{}_{}'.format(name, i) kwargs['shape'] = (p, cols) if sharding_aware: shard_info = base.ShardInfo(kwargs['shape'], (offset, 0)) kwargs['initial_value'] = functools.partial(initial_value, shard_info=shard_info) offset += p else: kwargs['initial_value'] = functools.partial(unwrapped_initial_value, kwargs['shape'], dtype=dtype) variables.append(next_creator(*args, **kwargs)) return TPUEmbeddingVariable(variables, name=name) return sharded_variable_creator
Makes a sharded variable creator given a list of hosts. Args: hosts: a list of tensorflow devices on which to shard the tensors. Returns: A variable creator function.
github-repos
def _read_protocol_line(self): self._server_start_stdout = [] while True: line = self._proc.stdout.readline().decode('utf-8') if not line: raise errors.ServerStartError(self._device, 'Unexpected EOF when waiting for server to start.') line = line.strip() if line.startswith('INSTRUMENTATION_RESULT:') or line.startswith('SNIPPET '): self.log.debug('Accepted line from instrumentation output: "%s"', line) return line self._server_start_stdout.append(line) self.log.debug('Discarded line from instrumentation output: "%s"', line)
Reads the next line of instrumentation output relevant to snippets. This method will skip over lines that don't start with 'SNIPPET ' or 'INSTRUMENTATION_RESULT:'. Returns: A string for the next line of snippet-related instrumentation output, stripped. Raises: errors.ServerStartError: If EOF is reached without any protocol lines being read.
github-repos
async def get_movie(self, id_): url = self.url_builder( 'movie/{movie_id}', dict(movie_id=id_), url_params=OrderedDict(append_to_response='credits'), ) data = await self.get_data(url) if data is None: return return Movie.from_json(data, self.config['data'].get('images'))
Retrieve movie data by ID. Arguments: id_ (:py:class:`int`): The movie's TMDb ID. Returns: :py:class:`~.Movie`: The requested movie.
juraj-google-style
def __init__(self, header, values, datetimes): assert isinstance(header, Header), \ 'header must be a Ladybug Header object. Got {}'.format(type(header)) assert isinstance(datetimes, Iterable) \ and not isinstance(datetimes, (str, dict, bytes, bytearray)), \ 'datetimes should be a list or tuple. Got {}'.format(type(datetimes)) self._header = header self._datetimes = tuple(datetimes) self.values = values self._validated_a_period = False
Initialize base collection. Args: header: A Ladybug Header object. values: A list of values. datetimes: A list of Ladybug DateTime objects that aligns with the list of values.
juraj-google-style
def get_qa_logit_layer(self) -> nn.Module: if hasattr(self, 'answer_head'): return self.answer_head.logit_fc[-1]
Returns the linear layer that produces question answering logits Returns: `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType object if Lxmert does not have the visual answering head.
github-repos
def from_dict(cls, parameters): instance = cls() instance.fitted = parameters['fitted'] instance.constant_value = parameters['constant_value'] if instance.fitted and instance.constant_value is None: instance.model = scipy.stats.truncnorm(parameters['a'], parameters['b']) return instance
Set attributes with provided values. Args: parameters(dict): Dictionary containing instance parameters. Returns: Truncnorm: Instance populated with given parameters.
juraj-google-style
def make_single_array(ds, batch_size=8*1024): if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple): raise ValueError('Dataset must have a single type and shape') nshapes = len(ds.output_shapes) if nshapes > 0: raise ValueError('Dataset must be comprised of scalars (TensorShape=[])') batches = [] with tf.Session() as sess: ds = ds.batch(batch_size) iterator = ds.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() with tqdm(desc='Elements', unit_scale=1) as pbar: try: while True: batches.append(sess.run(get_next)) pbar.update(len(batches[-1])) except tf.errors.OutOfRangeError: pass if batches: return np.concatenate(batches) return np.array([], dtype=ds.output_types.as_numpy_dtype)
Create a single numpy array from a dataset. The dataset must have only one dimension, that is, the length of its `output_shapes` and `output_types` is 1, and its output shape must be `[]`, that is, every tensor in the dataset must be a scalar. Args: ds: a TF Dataset. batch_size: how many elements to read per pass Returns: a single numpy array.
juraj-google-style
def __init__(self, bytes_per_pack=0, timeout_seconds=None): pass
Creates a CollectiveHints. Args: bytes_per_pack: a non-negative integer. Breaks collective operations into packs of certain size. If it's zero, the value is determined automatically. This only applies to all-reduce with `MultiWorkerMirroredStrategy` currently. timeout_seconds: a float or None, timeout in seconds. If not None, the collective raises `tf.errors.DeadlineExceededError` if it takes longer than this timeout. This can be useful when debugging hanging issues. This should only be used for debugging since it creates a new thread for each collective, i.e. an overhead of `timeout_seconds * num_collectives_per_second` more threads. This only works for `tf.distribute.experimental.MultiWorkerMirroredStrategy`. Raises: ValueError: When arguments have invalid value.
github-repos
def status(self, job_ids): logging.debug("Checking status of : {0}".format(job_ids)) for job_id in self.resources: poll_code = self.resources[job_id]['proc'].poll() if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']: continue if poll_code is None: self.resources[job_id]['status'] = 'RUNNING' elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING': self.resources[job_id]['status'] = 'COMPLETED' elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING': self.resources[job_id]['status'] = 'FAILED' return [self.resources[jid]['status'] for jid in job_ids]
Get the status of a list of jobs identified by their ids. Args: - job_ids (List of ids) : List of identifiers for the jobs Returns: - List of status codes.
juraj-google-style
def validate_probability(p: float, p_str: str) -> float: if p < 0: raise ValueError('{} was less than 0.'.format(p_str)) elif p > 1: raise ValueError('{} was greater than 1.'.format(p_str)) return p
Validates that a probability is between 0 and 1 inclusively. Args: p: The value to validate. p_str: What to call the probability in error messages. Returns: The probability p if the probability if valid. Raises: ValueError if the probability is invalid.
juraj-google-style
def __init__(self, mediator=None): super(WindowsVolumeScanner, self).__init__(mediator=mediator) self._file_system = None self._path_resolver = None self._windows_directory = None
Initializes a Windows volume scanner. Args: mediator (VolumeScannerMediator): a volume scanner mediator.
juraj-google-style
def broadcast_shapes(shape1, shape2): shape1 = list(shape1) shape2 = list(shape2) origin_shape1 = shape1 origin_shape2 = shape2 if len(shape1) > len(shape2): shape2 = [1] * (len(shape1) - len(shape2)) + shape2 if len(shape1) < len(shape2): shape1 = [1] * (len(shape2) - len(shape1)) + shape1 output_shape = list(shape1) for i in range(len(shape1)): if shape1[i] == 1: output_shape[i] = shape2[i] elif shape1[i] is None: output_shape[i] = None if shape2[i] == 1 else shape2[i] elif shape2[i] == 1 or shape2[i] is None or shape2[i] == shape1[i]: output_shape[i] = shape1[i] else: raise ValueError(f'Cannot broadcast shape, the failure dim has value {shape1[i]}, which cannot be broadcasted to {shape2[i]}. Input shapes are: {origin_shape1} and {origin_shape2}.') return output_shape
Broadcast input shapes to a unified shape. Convert to list for mutability. Args: shape1: A tuple or list of integers. shape2: A tuple or list of integers. Returns: output_shape (list of integers or `None`): The broadcasted shape. Example: >>> broadcast_shapes((5, 3), (1, 3)) [5, 3]
github-repos
def probabilistic_collocation(order, dist, subset=0.1): (abscissas, weights) = chaospy.quad.collection.golub_welsch(order, dist) likelihood = dist.pdf(abscissas) alpha = numpy.random.random(len(weights)) alpha = (likelihood > ((alpha * subset) * numpy.max(likelihood))) abscissas = abscissas.T[alpha].T weights = weights[alpha] return (abscissas, weights)
Probabilistic collocation method. Args: order (int, numpy.ndarray) : Quadrature order along each axis. dist (Dist) : Distribution to generate samples from. subset (float) : Rate of which to removed samples.
codesearchnet
def ProcessGlobalSuppresions(lines): for line in lines: if _SEARCH_C_FILE.search(line): for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True if _SEARCH_KERNEL_FILE.search(line): for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True
Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. Args: lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline.
codesearchnet
def _try_refresh_access_token(self) -> None: if self.refresh_token: if ((not self.access_token) or self._is_access_token_expired()): (self.access_token, self.access_expiration) = self._get_access_from_refresh() self.access_expiration = (time.time() + self.access_expiration)
Attempts to get a new access token using the refresh token, if needed. If the access token is expired and this instance has a stored refresh token, then the refresh token is in the API call to get a new access token. If successful, this instance is modified in-place with that new access token. Args: None Returns: None
codesearchnet
def Match(self, artifact=None, os_name=None, cpe=None, label=None): return [ c for c in self.conditions if c.Match(artifact, os_name, cpe, label) ]
Test if host data should trigger a check. Args: artifact: An artifact name. os_name: An OS string. cpe: A CPE string. label: A label string. Returns: A list of conditions that match.
juraj-google-style
def _build_hash_string(self): if ((self.site_name in SITE_LIST) or self.hash_string): if (self.username and self.password): try: hash_string = self.hash_string.format(self.password) except TypeError: raise PybooruError("Pybooru can't add 'password' to 'hash_string'") self.password_hash = hashlib.sha1(hash_string.encode('utf-8')).hexdigest() else: raise PybooruError("Specify the 'username' and 'password' parameters of the Pybooru object, for setting 'password_hash' attribute.") else: raise PybooruError("Specify the 'hash_string' parameter of the Pybooru object, for the functions that requires login.")
Function for build password hash string. Raises: PybooruError: When isn't provide hash string. PybooruError: When aren't provide username or password. PybooruError: When Pybooru can't add password to hash strring.
codesearchnet
def load_validation_plugin(name=None): if (not name): return BaseValidationRules plugin = None for entry_point in iter_entry_points('bigchaindb.validation', name): plugin = entry_point.load() if (not plugin): raise ResolutionError('No plugin found in group `bigchaindb.validation` with name `{}`'.format(name)) if (not issubclass(plugin, (BaseValidationRules,))): raise TypeError('object of type "{}" does not implement `bigchaindb.validation.BaseValidationRules`'.format(type(plugin))) return plugin
Find and load the chosen validation plugin. Args: name (string): the name of the entry_point, as advertised in the setup.py of the providing package. Returns: an uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules``
codesearchnet
def VerifyStructure(self, parser_mediator, line): self._last_month = 0 self._year_use = parser_mediator.GetEstimatedYear() try: structure = self.SECURITYD_LINE.parseString(line) except pyparsing.ParseException: logger.debug('Not a MacOS securityd log file') return False time_elements_tuple = self._GetTimeElementsTuple(structure) try: dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple) except ValueError: logger.debug('Not a MacOS securityd log file, invalid date and time: {0!s}'.format(structure.date_time)) return False self._last_month = time_elements_tuple[1] return True
Verify that this file is a securityd log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
codesearchnet
def get_palette(num_colors=256): pallete = [0]*(num_colors*3) for j in range(0, num_colors): lab = j pallete[j*3+0] = 0 pallete[j*3+1] = 0 pallete[j*3+2] = 0 i = 0 while (lab > 0): pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i)) pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i)) pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i)) i = i + 1 lab >>= 3 return pallete
generates the colormap for visualizing the segmentation mask Args: num_colors (int): the number of colors to generate in the output palette Returns: string: the supplied extension, if assertion is successful.
juraj-google-style
def check_requirements_file(req_file, skip_packages): reqs = read_requirements(req_file) if (skip_packages is not None): reqs = [req for req in reqs if (req.name not in skip_packages)] outdated_reqs = filter(None, [check_req(req) for req in reqs]) return outdated_reqs
Return list of outdated requirements. Args: req_file (str): Filename of requirements file skip_packages (list): List of package names to ignore.
codesearchnet
def _get_filename_from_url(url): parse = urlparse(url) return os.path.basename(parse.path)
Return a filename from a URL Args: url (str): URL to extract filename from Returns: (str): Filename in URL
juraj-google-style
def _get_dominant_angle(lines, domination_type=MEDIAN): if (domination_type == MEDIAN): return _get_median_angle(lines) elif (domination_type == MEAN): return _get_mean_angle(lines) else: raise ValueError(('Unknown domination type provided: %s' % domination_type))
Picks dominant angle of a set of lines. Args: lines: iterable of (x1, y1, x2, y2) tuples that define lines. domination_type: either MEDIAN or MEAN. Returns: Dominant angle value in radians. Raises: ValueError: on unknown domination_type.
codesearchnet
def _reduce_output(self, outputs, seq_lengths): batch_size = outputs.shape[0] reduced = [] for i in range(batch_size): if (self.lstm_reduction == 'mean'): reduced.append(outputs[(i, :seq_lengths[i], :)].mean(dim=0)) elif (self.lstm_reduction == 'max'): reduced.append(outputs[(i, :seq_lengths[i], :)].max(dim=0)[0]) elif (self.lstm_reduction == 'last'): reduced.append(outputs[(i, (seq_lengths[i] - 1), :)]) elif (self.lstm_reduction == 'attention'): reduced.append(self._attention(outputs[(i, :seq_lengths[i], :)])) else: msg = f"Did not recognize lstm kwarg 'lstm_reduction' == {self.lstm_reduction}" raise ValueError(msg) return torch.stack(reduced, dim=0)
Reduces the output of an LSTM step Args: outputs: (torch.FloatTensor) the hidden state outputs from the lstm, with shape [batch_size, max_seq_length, hidden_size]
codesearchnet
def get(self, key, default=None, cast=True): tablename, _, key = key.rpartition(':') if tablename and tablename not in self.fields.name.split('+'): raise ItsdbError('column requested from wrong table: {}' .format(tablename)) try: index = self.fields.index(key) value = list.__getitem__(self, index) except (KeyError, IndexError): value = default else: if cast: field = self.fields[index] value = _cast_to_datatype(value, field) return value
Return the field data given by field name *key*. Args: key: the field name of the data to return default: the value to return if *key* is not in the row
juraj-google-style
def UpdateIncludeState(filename, include_dict, io=codecs): headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_dict.setdefault(include, linenum) return True
Fill up the include_dict with new includes found from the file. Args: filename: the name of the header to read. include_dict: a dictionary in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was successfully added. False otherwise.
juraj-google-style
def get_frame(self, index): frame_num = self.frame_index[index] onset = float(frame_num) / self.fps if index < self.n_frames - 1: next_frame_num = self.frame_index[index + 1] end = float(next_frame_num) / self.fps else: end = float(self.duration) duration = end - onset if end > onset else 0.0 return VideoFrameStim(self, frame_num, data=self.clip.get_frame(onset), duration=duration)
Get video frame at the specified index. Args: index (int): Positional index of the desired frame.
juraj-google-style
def add_member_to_list(self, username, listname, member_type='USER'): return self.client.service.addMemberToList(listname, username, member_type, self.proxy_id)
Add a member to an existing list. Args: username (str): The username of the user to add listname (str): The name of the list to add the user to member_type (str): Normally, this should be "USER". If you are adding a list as a member of another list, set this to "LIST", instead.
codesearchnet
def pull(self, platform=None): repository, _ = parse_repository_tag(self.image_name) return self.collection.pull(repository, tag=self.id, platform=platform)
Pull the image digest. Args: platform (str): The platform to pull the image for. Default: ``None`` Returns: (:py:class:`Image`): A reference to the pulled image.
juraj-google-style
def render_template_inplace(template_path, info, dry_run=False, extra_filters=None, resolver=None): filters = {} if (resolver is not None): filters['find_product'] = _create_resolver_filter(resolver) if (extra_filters is not None): filters.update(extra_filters) basedir = os.path.dirname(template_path) template_name = os.path.basename(template_path) if (not template_name.endswith('.tpl')): raise ArgumentError('You must specify a filename that ends in .tpl', filepath=template_path) out_path = os.path.join(basedir, template_name[:(- 4)]) if (basedir == ''): basedir = '.' env = Environment(loader=FileSystemLoader(basedir), trim_blocks=True, lstrip_blocks=True) for (name, func) in filters.items(): env.filters[name] = func template = env.get_template(template_name) result = template.render(info) if (not dry_run): with open(out_path, 'wb') as outfile: outfile.write(result.encode('utf-8')) return out_path
Render a template file in place. This function expects template path to be a path to a file that ends in .tpl. It will be rendered to a file in the same directory with the .tpl suffix removed. Args: template_path (str): The path to the template file that we want to render in place. info (dict): A dictionary of variables passed into the template to perform substitutions. dry_run (bool): Whether to actually render the output file or just return the file path that would be generated. extra_filters (dict of str -> callable): An optional group of filters that will be made available to the template. The dict key will be the name at which callable is made available. resolver (ProductResolver): The specific ProductResolver class to use in the find_product filter. Returns: str: The path to the output file generated.
codesearchnet
def print_start_command(self, command): size = len(command) if size > 20: raise RuntimeError('Command too long') n1 = size/10 n2 = size%10 self.send('^PS'+chr(n1)+chr(n2)+command)
Set print command Args: command: the type of command you desire. Returns: None Raises: RuntimeError: Command too long.
juraj-google-style
def db(self, entity, query_filters='size=10'): if (self.entity_api_key == ''): return {'status': 'failure', 'response': 'No API key found in request'} historic_url = ((self.base_url + 'api/0.1.0/historicData?') + query_filters) historic_headers = {'apikey': self.entity_api_key, 'Content-Type': 'application/json'} historic_query_data = json.dumps({'query': {'match': {'key': entity}}}) with self.no_ssl_verification(): r = requests.get(historic_url, data=historic_query_data, headers=historic_headers) response = dict() if ('No API key' in str(r.content.decode('utf-8'))): response['status'] = 'failure' else: r = r.content.decode('utf-8') response = r return response
This function allows an entity to access the historic data. Args: entity (string): Name of the device to listen to query_filters (string): Elastic search response format string example, "pretty=true&size=10"
codesearchnet
def decorate_set_on_listener(prototype): def add_annotation(method): method._event_info = {} method._event_info['name'] = method.__name__ method._event_info['prototype'] = prototype return method return add_annotation
Private decorator for use in the editor. Allows the Editor to create listener methods. Args: params (str): The list of parameters for the listener method (es. "(self, new_value)")
codesearchnet
def __call__(self, **kwargs): if len(kwargs) != len(self._inputs): raise ValueError('Invalid number of inputs provided for running a SignatureDef, expected %s vs provided %s' % (len(self._inputs), len(kwargs))) for input_name, value in kwargs.items(): if input_name not in self._inputs: raise ValueError('Invalid Input name (%s) for SignatureDef' % input_name) self._interpreter_wrapper.ResizeInputTensor(self._inputs[input_name], np.array(value.shape, dtype=np.int32), False, self._subgraph_index) self._interpreter_wrapper.AllocateTensors(self._subgraph_index) for input_name, value in kwargs.items(): self._interpreter_wrapper.SetTensor(self._inputs[input_name], value, self._subgraph_index) self._interpreter_wrapper.Invoke(self._subgraph_index) result = {} for output_name, output_index in self._outputs: result[output_name] = self._interpreter_wrapper.GetTensor(output_index, self._subgraph_index) return result
Runs the SignatureDef given the provided inputs in arguments. Args: **kwargs: key,value for inputs to the model. Key is the SignatureDef input name. Value is numpy array with the value. Returns: dictionary of the results from the model invoke. Key in the dictionary is SignatureDef output name. Value is the result Tensor.
github-repos
def getContext(self, context_name = 'default'): if context_name == 'default' and 'default' not in self.contexts: self('default') return self.contexts[context_name]
Get a context by name, create the default context if it does not exist Params: context_name (string): Context name Raises: KeyError: If the context name does not exist Returns: bubbler.Bubbler: Named context
juraj-google-style
def added_tokens_decoder(self) -> dict[int, AddedToken]: return dict(sorted(self._added_tokens_decoder.items(), key=lambda item: item[0]))
Returns the added tokens in the vocabulary as a dictionary of index to AddedToken. Returns: `Dict[str, int]`: The added tokens.
github-repos
def _ssl_context_factory(parameters): client_cert = None ca_cert = None key = config.conf['tls']['keyfile'] cert = config.conf['tls']['certfile'] ca_file = config.conf['tls']['ca_cert'] if ca_file: with open(ca_file, 'rb') as fd: ca_cert = ssl.Certificate.loadPEM(fd.read()) if (key and cert): with open(key) as fd: client_keypair = fd.read() with open(cert) as fd: client_keypair += fd.read() client_cert = ssl.PrivateCertificate.loadPEM(client_keypair) hostname = parameters.host if (not isinstance(hostname, six.text_type)): hostname = hostname.decode(locale.getdefaultlocale()[1]) try: context_factory = ssl.optionsForClientTLS(hostname, trustRoot=(ca_cert or ssl.platformTrust()), clientCertificate=client_cert, extraCertificateOptions={'raiseMinimumTo': ssl.TLSVersion.TLSv1_2}) except AttributeError: context_factory = ssl.CertificateOptions(certificate=client_cert.original, privateKey=client_cert.privateKey.original, caCerts=([ca_cert.original] or ssl.platformTrust()), verify=True, requireCertificate=True, verifyOnce=False, enableSessions=False) return context_factory
Produce a Twisted SSL context object from a pika connection parameter object. This is necessary as Twisted manages the connection, not Pika. Args: parameters (pika.ConnectionParameters): The connection parameters built from the fedora_messaging configuration.
codesearchnet
def register_mbr_plugin(self, fs_id, plugin): self.logger.debug('MBR: {}, FS ID: {}' .format(self.__get_plugin_name(plugin), fs_id)) self.__mbr_plugins[fs_id].append(plugin)
Used in plugin's registration routine, to associate it's detection method with given filesystem id Args: fs_id: filesystem id that is read from MBR partition entry plugin: plugin that supports this filesystem
juraj-google-style
def __init__(self, configuration, provider=None): self._configuration = configuration self._provider = provider
Base class for backends. This method should initialize the module and its configuration, and raise an exception if a component of the module is not available. Args: configuration (BackendConfiguration): backend configuration provider (BaseProvider): provider responsible for this backend Raises: FileNotFoundError if backend executable is not available. QiskitError: if there is no name in the configuration
juraj-google-style
def plot_ax(self, ax=None, fontsize=12, **kwargs): ax, fig, plt = get_ax_fig_plt(ax=ax) color = kwargs.get("color", "r") label = kwargs.get("label", "{} fit".format(self.__class__.__name__)) lines = ["Equation of State: %s" % self.__class__.__name__, "Minimum energy = %1.2f eV" % self.e0, "Minimum or reference volume = %1.2f Ang^3" % self.v0, "Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" % (self.b0, self.b0_GPa), "Derivative of bulk modulus wrt pressure = %1.2f" % self.b1] text = "\n".join(lines) text = kwargs.get("text", text) ax.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color) vmin, vmax = min(self.volumes), max(self.volumes) vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax)) vfit = np.linspace(vmin, vmax, 100) ax.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label) ax.grid(True) ax.set_xlabel("Volume $\\AA^3$") ax.set_ylabel("Energy (eV)") ax.legend(loc="best", shadow=True) ax.text(0.5, 0.5, text, fontsize=fontsize, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) return fig
Plot the equation of state on axis `ax` Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. fontsize: Legend fontsize. color (str): plot color. label (str): Plot label text (str): Legend text (options) Returns: Matplotlib figure object.
juraj-google-style
def _get_backend_instance(self, backend_cls): try: backend_instance = backend_cls(provider=self) except Exception as err: raise QiskitError(('Backend %s could not be instantiated: %s' % (backend_cls, err))) return backend_instance
Return an instance of a backend from its class. Args: backend_cls (class): Backend class. Returns: BaseBackend: a backend instance. Raises: QiskitError: if the backend could not be instantiated.
codesearchnet
def write(self, offset, data): if (not isinstance(offset, (int, long))): raise TypeError('Invalid offset type, should be integer.') if (not isinstance(data, (bytes, bytearray, list))): raise TypeError('Invalid data type, expected bytes, bytearray, or list.') offset = self._adjust_offset(offset) self._validate_offset(offset, len(data)) data = bytes(bytearray(data)) self.mapping[offset:(offset + len(data))] = data
Write a string of bytes to the specified `offset` in bytes, relative to the base physical address of the MMIO region. Args: offset (int, long): offset from base physical address, in bytes. data (bytes, bytearray, list): a byte array or list of 8-bit integers to write. Raises: TypeError: if `offset` or `data` type are invalid. ValueError: if `offset` is out of bounds, or if data is not valid bytes.
codesearchnet
def _compress_json(self, j): compressed_json = copy.copy(j) compressed_json.pop('users', None) compressed_data = zlib.compress( json.dumps(j['users']).encode('utf-8'), self.zlib_compression_strength ) b64_data = base64.b64encode(compressed_data).decode('utf-8') compressed_json['blob'] = b64_data return compressed_json
Compress the BLOB data portion of the usernotes. Arguments: j: the JSON in Schema v5 format (dict) Returns a dict with the 'users' key removed and 'blob' key added
juraj-google-style
def get_first_model_with_rest_name(cls, rest_name): models = cls.get_models_with_rest_name(rest_name) if len(models) > 0: return models[0] return None
Get the first model corresponding to a rest_name Args: rest_name: the rest name
juraj-google-style
def RemoveEventAttribute(self, attribute_name): if (attribute_name not in self._extra_event_attributes): raise KeyError('Event attribute: {0:s} not set'.format(attribute_name)) del self._extra_event_attributes[attribute_name]
Removes an attribute from being set on all events produced. Args: attribute_name (str): name of the attribute to remove. Raises: KeyError: if the event attribute is not set.
codesearchnet
def GetEntries( self, parser_mediator, cookie_data=None, url=None, **kwargs): fields = cookie_data.split('.') number_of_fields = len(fields) if number_of_fields not in (1, 4): parser_mediator.ProduceExtractionWarning( 'unsupported number of fields: {0:d} in cookie: {1:s}'.format( number_of_fields, self.COOKIE_NAME)) return if number_of_fields == 1: domain_hash = None try: last_visit_posix_time = int(fields[0], 10) / 10000000 except ValueError: last_visit_posix_time = None number_of_pages_viewed = None elif number_of_fields == 4: domain_hash = fields[0] try: number_of_pages_viewed = int(fields[1], 10) except ValueError: number_of_pages_viewed = None try: if fields[2] in ('8', '9'): last_visit_posix_time = int(fields[3], 10) / 1000 else: last_visit_posix_time = int(fields[3], 10) except ValueError: last_visit_posix_time = None if last_visit_posix_time is not None: date_time = dfdatetime_posix_time.PosixTime( timestamp=last_visit_posix_time) timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME event_data = GoogleAnalyticsEventData('utmb') event_data.cookie_name = self.COOKIE_NAME event_data.domain_hash = domain_hash event_data.pages_viewed = number_of_pages_viewed event_data.url = url event = time_events.DateTimeValuesEvent(date_time, timestamp_description) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts event objects from the cookie. Args: parser_mediator (ParserMediator): parser mediator. cookie_data (bytes): cookie data. url (str): URL or path where the cookie got set.
juraj-google-style
def __init__(self, name, aliases=None, description=None, urls=None): super(UUIDDefinition, self).__init__( name, aliases=aliases, description=description, urls=urls) self.size = 16
Initializes an UUID data type definition. Args: name (str): name. aliases (Optional[list[str]]): aliases. description (Optional[str]): description. urls (Optional[list[str]]): URLs.
juraj-google-style
def read_raw(self, key): data = None if (key is not None): data = self.db.read(key.strip()) else: self.tcex.log.warning(u'The key field was None.') return data
Read method of CRUD operation for raw data. Args: key (string): The variable to read from the DB. Returns: (any): Results retrieved from DB.
codesearchnet
def console_from_file(filename: str) -> tcod.console.Console: return tcod.console.Console._from_cdata(lib.TCOD_console_from_file(filename.encode('utf-8')))
Return a new console object from a filename. The file format is automactially determined. This can load REXPaint `.xp`, ASCII Paint `.apf`, or Non-delimited ASCII `.asc` files. Args: filename (Text): The path to the file, as a string. Returns: A new :any`Console` instance.
codesearchnet
def create_endpoint(self, endpoint_name, config_name, tags=None, wait=True): LOGGER.info('Creating endpoint with name {}'.format(endpoint_name)) tags = (tags or []) self.sagemaker_client.create_endpoint(EndpointName=endpoint_name, EndpointConfigName=config_name, Tags=tags) if wait: self.wait_for_endpoint(endpoint_name) return endpoint_name
Create an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request. Once the ``Endpoint`` is created, client applications can send requests to obtain inferences. The endpoint configuration is created using the ``CreateEndpointConfig`` API. Args: endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` being created. config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy. wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True). Returns: str: Name of the Amazon SageMaker ``Endpoint`` created.
codesearchnet