code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def whois_emails(self, emails):
api_name = 'opendns-whois-emails'
fmt_url_path = u'whois/emails/{0}'
return self._multi_get(api_name, fmt_url_path, emails)
|
Calls WHOIS Email end point
Args:
emails: An enumerable of string Emails
Returns:
A dict of {email: domain_result}
|
juraj-google-style
|
def __init__(self, grammar, latent_size, num_units):
super(ProbabilisticGrammar, self).__init__()
self.grammar = grammar
self.latent_size = latent_size
self.lstm = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units)
self.output_layer = tf.keras.layers.Dense(len(grammar.production_rules))
|
Constructs a probabilistic grammar.
Args:
grammar: An object representing a grammar. It has members
`nonterminal_symbols`, `alphabet`, `production_rules`, and
`start_symbol`, and a method `mask` determining (in)valid
production rules given a symbol.
latent_size: Number of dimensions in the latent code.
num_units: Number of units in the LSTM cell.
|
juraj-google-style
|
async def enqueue(self, query, queue_index=None, stop_current=False, shuffle=False):
if query is None or query == "":
return
self.statuslog.info("Parsing {}".format(query))
self.logger.debug("Enqueueing from query")
indexnum = None
if queue_index is not None:
try:
indexnum = int(queue_index) - 1
except TypeError:
self.statuslog.error("Play index argument must be a number")
return
except ValueError:
self.statuslog.error("Play index argument must be a number")
return
if not self.vready:
self.parse_query(query, indexnum, stop_current, shuffle)
else:
parse_thread = threading.Thread(
target=self.parse_query,
args=[query, indexnum, stop_current, shuffle])
parse_thread.start()
|
Queues songs based on either a YouTube search or a link
Args:
query (str): Either a search term or a link
queue_index (str): The queue index to enqueue at (None for end)
stop_current (bool): Whether to stop the current song after the songs are queued
shuffle (bool): Whether to shuffle the added songs
|
juraj-google-style
|
def get_associated_profiles(self):
uri = "{}/associatedProfiles".format(self.data['uri'])
return self._helper.do_get(uri)
|
Gets the URIs of profiles which are using an Ethernet network.
Args:
id_or_uri: Can be either the logical interconnect group id or the logical interconnect group uri
Returns:
list: URIs of the associated profiles.
|
juraj-google-style
|
def gbest_idx(swarm):
best = 0
cmp = comparator(swarm[best].best_fitness)
for (idx, particle) in enumerate(swarm):
if cmp(particle.best_fitness, swarm[best].best_fitness):
best = idx
return best
|
gbest Neighbourhood topology function.
Args:
swarm: list: The list of particles.
Returns:
int: The index of the gbest particle.
|
juraj-google-style
|
def _parse_query_key(self, key, val, is_escaped):
if key.endswith('__contains'):
key = key[:-10]
val = self._parse_query_modifier('contains', val, is_escaped)
elif key.endswith('__range'):
key = key[:-7]
val = self._parse_query_modifier('range', val, is_escaped)
elif key.endswith('__startswith'):
key = key[:-12]
val = self._parse_query_modifier('startswith', val, is_escaped)
elif key.endswith('__endswith'):
key = key[:-10]
val = self._parse_query_modifier('endswith', val, is_escaped)
elif key.endswith('__lt'):
key = key[:-4]
val = self._parse_query_modifier('lt', val, is_escaped)
elif key.endswith('__gt'):
key = key[:-4]
val = self._parse_query_modifier('gt', val, is_escaped)
elif key.endswith('__lte'):
key = key[:-5]
val = self._parse_query_modifier('lte', val, is_escaped)
elif key.endswith('__gte'):
key = key[:-5]
val = self._parse_query_modifier('gte', val, is_escaped)
elif key != 'NOKEY' and not is_escaped:
val = self._escape_query(val)
return key, val
|
Strips query modifier from key and call's the appropriate value modifier.
Args:
key (str): Query key
val: Query value
Returns:
Parsed query key and value.
|
juraj-google-style
|
def set_device_policy(device_policy):
if device_policy == 'silent':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT
elif device_policy == 'silent_for_int32':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32
elif device_policy == 'warn':
context.context().device_policy = context.DEVICE_PLACEMENT_WARN
elif device_policy == 'explicit':
context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT
elif device_policy is None:
context.context().device_policy = None
else:
raise ValueError(f'Invalid argument `device_policy`: {device_policy!r}. Please refer to https:
|
Sets the current thread device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
When using the default, an appropriate policy will be picked automatically.
The default policy may change over time.
This function only sets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Args:
device_policy: A device policy.
Valid values:
- None: Switch to a system default.
- 'warn': Copies the tensors which are not on the right device and logs a
warning.
- 'explicit': Raises an error if the placement is not as required.
- 'silent': Silently copies the tensors. Note that this may hide
performance problems as there is no notification provided when
operations are blocked on the tensor being copied between devices.
- 'silent_for_int32': silently copies `int32` tensors, raising errors on
the other ones.
Raises:
ValueError: If an invalid `device_policy` is passed.
|
github-repos
|
def WriteUInt160(self, value):
if (type(value) is UInt160):
value.Serialize(self)
else:
raise Exception('value must be UInt160 instance ')
|
Write a UInt160 type to the stream.
Args:
value (UInt160):
Raises:
Exception: when `value` is not of neocore.UInt160 type.
|
codesearchnet
|
def CopyFrom(self, other_msg):
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
|
Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
|
juraj-google-style
|
def _update_section_state(line_info, state):
section_updated = False
google_section_permitted = _google_section_permitted(line_info, state)
google_section = google_section_permitted and _google_section(line_info)
if google_section:
state.section.format = Formats.GOOGLE
state.section.title = google_section
line_info.remaining = _get_after_google_header(line_info)
line_info.remaining_raw = line_info.remaining
section_updated = True
rst_section = _rst_section(line_info)
if rst_section:
state.section.format = Formats.RST
state.section.title = rst_section
line_info.remaining = _get_after_directive(line_info)
line_info.remaining_raw = line_info.remaining
section_updated = True
numpy_section = _numpy_section(line_info)
if numpy_section:
state.section.format = Formats.NUMPY
state.section.title = numpy_section
line_info.remaining = ''
line_info.remaining_raw = line_info.remaining
section_updated = True
if section_updated:
state.section.new = True
state.section.indentation = line_info.indentation
state.section.line1_indentation = line_info.next.indentation
else:
state.section.new = False
|
Uses line_info to determine the current section of the docstring.
Updates state and line_info.remaining.
Args:
line_info: Information about the current line.
state: The state of the parser.
|
github-repos
|
def load_qrandom():
fname = 'datasets/qrandom.npy'
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f)
|
Loads a set of 10000 random numbers generated by qrandom.
This dataset can be used when you want to do some limited tests with "true"
random data without an internet connection.
Returns:
int array
the dataset
|
codesearchnet
|
def output_reference(self, name):
if (name not in self.output_names):
raise ValueError('Invalid output "{}"'.format(name))
return Reference(step_name=self.name_in_workflow, output_name=name)
|
Return a reference to the given output for use in an input
of a next Step.
For a Step named `echo` that has an output called `echoed`, the
reference `echo/echoed` is returned.
Args:
name (str): the name of the Step output
Raises:
ValueError: The name provided is not a valid output name for this
Step.
|
codesearchnet
|
def __init__(self, graph_def, input_tensors, output_tensors, input_arrays_with_shape=None, output_arrays=None, experimental_debug_info_func=None):
super(TFLiteConverter, self).__init__(graph_def, input_tensors, output_tensors, input_arrays_with_shape, output_arrays, experimental_debug_info_func)
|
Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes (e.g., [("foo" : [1, 16,
16, 3])]). Use only when graph cannot be loaded into TensorFlow and when
`input_tensors` and `output_tensors` are None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
|
github-repos
|
def coder_benchmark_factory(coder, generate_fn):
class CoderBenchmark(object):
def __init__(self, num_elements_per_benchmark):
self._coder = coders.IterableCoder(coder)
self._list = [generate_fn() for _ in range(num_elements_per_benchmark)]
def __call__(self):
_ = self._coder.decode(self._coder.encode(self._list))
CoderBenchmark.__name__ = '%s, %s' % (generate_fn.__name__, str(coder))
return CoderBenchmark
|
Creates a benchmark that encodes and decodes a list of elements.
Args:
coder: coder to use to encode an element.
generate_fn: a callable that generates an element.
|
github-repos
|
def _pack(formatstring, value):
_checkString(formatstring, description='formatstring', minlength=1)
try:
result = struct.pack(formatstring, value)
except:
errortext = 'The value to send is probably out of range, as the num-to-bytestring conversion failed.'
errortext += ' Value: {0!r} Struct format code is: {1}'
raise ValueError(errortext.format(value, formatstring))
if sys.version_info[0] > 2:
return str(result, encoding='latin1')
return result
|
Pack a value into a bytestring.
Uses the built-in :mod:`struct` Python module.
Args:
* formatstring (str): String for the packing. See the :mod:`struct` module for details.
* value (depends on formatstring): The value to be packed
Returns:
A bytestring (str).
Raises:
ValueError
Note that the :mod:`struct` module produces byte buffers for Python3,
but bytestrings for Python2. This is compensated for automatically.
|
juraj-google-style
|
def __init__(self, xid=None, command=None, flags=None, meter_id=None,
bands=None):
super().__init__(xid)
self.command = command
self.flags = flags
self.meter_id = meter_id
self.bands = bands
|
Create a MeterMod with the optional parameters below.
Args:
xid (int): Headers transaction id. Defaults to random.
command (MeterModCommand): One of OFPMC_*.
flags (MeterFlags): One of OFPMF_*.
meter_id (int): Meter instance.
bands (MeterBandHeader): The bands length is inferred from the
length field in the header.
|
juraj-google-style
|
def _einsum_v1_parse_and_resolve_equation(equation, input_shapes):
equation = equation.replace(' ', '')
match = re.match('^([a-zA-Z,.]+)(->[a-zA-Z.]*)?$', equation)
if not match:
raise ValueError(f'Indices have incorrect format. Received: {equation}.')
input_axis_labels = match.group(1).split(',')
output_axis_labels = match.group(2)[2:] if match.group(2) else None
if len(input_shapes) != len(input_axis_labels):
raise ValueError(f'Got {len(input_shapes)} arguments for equation "{equation}", expecting {len(input_axis_labels)}.')
ellipsis_axes = ''
if '...' in equation:
unused = ''.join((c for c in string.ascii_letters if c not in ''.join(input_axis_labels)))
for i, ax in enumerate(input_axis_labels):
if '...' in ax:
parts = ax.split('...')
if len(parts) != 2:
raise ValueError(f'Unable to resolve ellipsis. Excess number found: {len(parts) - 1} vs 1.')
if input_shapes[i].ndims is None:
raise ValueError('Unable to statically infer ellipsis axes. The input shapes has a dynamic dimensionality.')
n = input_shapes[i].ndims - len(''.join(parts))
if n < 0:
raise ValueError('Ellipses lengths do not match.')
if len(unused) < n:
raise ValueError('Unable to resolve ellipsis, too many distinct labels.')
replace_axes = unused[-n:] if n > 0 else ''
input_axis_labels[i] = input_axis_labels[i].replace('...', replace_axes)
if len(replace_axes) > len(ellipsis_axes):
ellipsis_axes = replace_axes
if any(('.' in ax for ax in input_axis_labels)):
raise ValueError(f'Period "." found outside of ellipsis in input {input_axis_labels}.')
if output_axis_labels is not None:
output_axis_labels = output_axis_labels.replace('...', ellipsis_axes)
if '.' in output_axis_labels:
raise ValueError(f'Period "." found outside of ellipsis in output {output_axis_labels}.')
if output_axis_labels is None:
axis_labels = set(''.join(input_axis_labels)) - set(ellipsis_axes)
indices = ''.join(sorted(axis_labels))
counts = {ax: 0 for ax in indices}
for axes_ in input_axis_labels:
for ax in axes_:
if ax not in ellipsis_axes:
counts[ax] += 1
output_axis_labels = ellipsis_axes + ''.join(sorted((ax for ax in axis_labels if counts[ax] == 1)))
return (input_axis_labels, output_axis_labels)
|
Helper for einsum() that splits/resolves inputs & outputs.
Args:
equation: Equation string given as argument to einsum().
input_shapes: List of the shapes of all inputs given to einsum()
Returns:
input_axis_labels, output_axis_labels where:
input_axis_labels: List of length len(input_shapes) of strings
representing the character label for each dimension of each given input,
resolving any broadcast (...) axes,
output_axis_labels: A string of character labels for each axes of output
tensor, filling in missing output subscripts and broadcast axes.
Raises:
ValueError: If equation is in the uncorrect format, incorrect number of
inputs given or broadcast axes "..." or output axes could not be resolved.
|
github-repos
|
def update(self, rid, data, raise_on_error=True):
cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data}
return self.ds.put(rid, cache_data, raise_on_error)
|
Write updated cache data to the DataStore.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
|
juraj-google-style
|
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
self._ParseMRUListKey(parser_mediator, registry_key, codepage=codepage)
|
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
|
juraj-google-style
|
def expected_h(nvals, fit="RANSAC"):
rsvals = [expected_rs(n) for n in nvals]
poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)
return poly[0]
|
Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
Args:
nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
KWargs:
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
Returns:
float:
expected h for white noise
|
juraj-google-style
|
def _ParsePlistKeyValue(self, knowledge_base, name, value):
if not knowledge_base.GetValue('keyboard_layout'):
if name in self._PLIST_KEYS:
if isinstance(value, (list, tuple)):
value = value[0]
_, _, keyboard_layout = value.rpartition('.')
knowledge_base.SetValue('keyboard_layout', keyboard_layout)
|
Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
|
juraj-google-style
|
def _log_device_compatibility_check(policy_name, gpu_details_list):
if policy_name != 'mixed_float16':
return
supported_device_strs = []
unsupported_device_strs = []
for details in gpu_details_list:
name = details.get('device_name', 'Unknown GPU')
cc = details.get('compute_capability')
if cc:
device_str = '%s, compute capability %s.%s' % (name, cc[0], cc[1])
if cc >= (7, 0):
supported_device_strs.append(device_str)
else:
unsupported_device_strs.append(device_str)
else:
unsupported_device_strs.append(name + ', no compute capability (probably not an Nvidia GPU)')
if unsupported_device_strs:
warning_str = _COMPAT_CHECK_WARNING_PREFIX + '\n'
if supported_device_strs:
warning_str += 'Some of your GPUs may run slowly with dtype policy mixed_float16 because they do not all have compute capability of at least 7.0. Your GPUs:\n'
elif len(unsupported_device_strs) == 1:
warning_str += 'Your GPU may run slowly with dtype policy mixed_float16 because it does not have compute capability of at least 7.0. Your GPU:\n'
else:
warning_str += 'Your GPUs may run slowly with dtype policy mixed_float16 because they do not have compute capability of at least 7.0. Your GPUs:\n'
for device_str in _dedup_strings(supported_device_strs + unsupported_device_strs):
warning_str += ' ' + device_str + '\n'
warning_str += 'See https:
warning_str += _COMPAT_CHECK_WARNING_SUFFIX
tf_logging.warning(warning_str)
elif not supported_device_strs:
tf_logging.warning('%s\nThe dtype policy mixed_float16 may run slowly because this machine does not have a GPU. Only Nvidia GPUs with compute capability of at least 7.0 run quickly with mixed_float16.\n%s' % (_COMPAT_CHECK_WARNING_PREFIX, _COMPAT_CHECK_WARNING_SUFFIX))
elif len(supported_device_strs) == 1:
tf_logging.info('%s\nYour GPU will likely run quickly with dtype policy mixed_float16 as it has compute capability of at least 7.0. Your GPU: %s' % (_COMPAT_CHECK_OK_PREFIX, supported_device_strs[0]))
else:
tf_logging.info('%s\nYour GPUs will likely run quickly with dtype policy mixed_float16 as they all have compute capability of at least 7.0' % _COMPAT_CHECK_OK_PREFIX)
|
Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16.
Args:
policy_name: The name of the dtype policy.
gpu_details_list: A list of dicts, one dict per GPU. Each dict
is the device details for a GPU, as returned by
`tf.config.experimental.get_device_details()`.
|
github-repos
|
def load(self, validate=True):
self._load()
try:
self.config = self._load_config(self.system_config_file)
user = self._load_config(self.global_config_file)
config = self._load_config(self.config_file)
local = self._load_config(self.config_local_file)
for conf in [user, config, local]:
self.config = self._merge(self.config, conf)
if validate:
self.config = Schema(self.SCHEMA).validate(self.config)
self.config = configobj.ConfigObj(self.config, write_empty_values=True)
self.config.filename = self.config_file
self._resolve_paths(self.config, self.config_file)
except Exception as ex:
raise ConfigError(ex)
|
Loads config from all the config files.
Args:
validate (bool): optional flag to tell dvc if it should validate
the config or just load it as is. 'True' by default.
Raises:
dvc.config.ConfigError: thrown if config has invalid format.
|
codesearchnet
|
def __init__(self, url, access_token, index,
source="parsedmarc", verify=True, timeout=60):
url = urlparse(url)
self.url = "{0}:
url.netloc)
self.access_token = access_token.lstrip("Splunk ")
self.index = index
self.host = socket.getfqdn()
self.source = source
self.session = requests.Session()
self.timeout = timeout
self.session.verify = verify
self._common_data = dict(host=self.host, source=self.source,
index=self.index)
self.session.headers = {
"User-Agent": "parsedmarc/{0}".format(__version__),
"Authorization": "Splunk {0}".format(self.access_token)
}
|
Initializes the HECClient
Args:
url (str): The URL of the HEC
access_token (str): The HEC access token
index (str): The name of the index
source (str): The source name
verify (bool): Verify SSL certificates
timeout (float): Number of seconds to wait for the server to send
data before giving up
|
juraj-google-style
|
def comment(data, what):
data = data.splitlines()
data = map((lambda x: (('
return '\n'.join(data)
|
Comments line containing `what` in string `data`.
Args:
data (str): Configuration file in string.
what (str): Line which will be commented out.
Returns:
str: Configuration file with commented `what`.
|
codesearchnet
|
def expect_false(condition, msg, extras=None):
try:
asserts.assert_false(condition, msg, extras)
except signals.TestSignal as e:
logging.exception('Expected a `False` value, got `True`.')
recorder.add_error(e)
|
Expects an expression evaluates to False.
If the expectation is not met, the test is marked as fail after its
execution finishes.
Args:
expr: The expression that is evaluated.
msg: A string explaining the details in case of failure.
extras: An optional field for extra information to be included in test
result.
|
github-repos
|
def markdown_to_text(body):
md = markdown.markdown(body, extensions=[
'markdown.extensions.extra'
])
soup = BeautifulSoup(md, 'html.parser')
return soup.get_text()
|
Converts markdown to text.
Args:
body: markdown (or plaintext, or maybe HTML) input
Returns:
Plaintext with all tags and frills removed
|
juraj-google-style
|
def __init__(self, event_timestamp, duration=5):
super(TimeSlice, self).__init__()
self.duration = duration
self.event_timestamp = event_timestamp
|
Initializes the time slice.
Args:
event_timestamp (int): event timestamp of the time slice or None.
duration (Optional[int]): duration of the time slice in minutes.
The default is 5, which represent 2.5 minutes before and 2.5 minutes
after the event timestamp.
|
juraj-google-style
|
def attention_mask_ignore_padding(inputs, dtype=tf.float32):
inputs = rename_length_to_memory_length(inputs)
return (mtf.cast(mtf.equal(inputs, 0), dtype) * (- 1000000000.0))
|
Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim]
|
codesearchnet
|
def clean(self, force: bool=False):
with (yield from self._lock):
for connection in tuple(self.ready):
if (force or connection.closed()):
connection.close()
self.ready.remove(connection)
|
Clean closed connections.
Args:
force: Clean connected and idle connections too.
Coroutine.
|
codesearchnet
|
def protocol_version_to_kmip_version(value):
if not isinstance(value, ProtocolVersion):
return None
if value.major == 1:
if value.minor == 0:
return enums.KMIPVersion.KMIP_1_0
elif value.minor == 1:
return enums.KMIPVersion.KMIP_1_1
elif value.minor == 2:
return enums.KMIPVersion.KMIP_1_2
elif value.minor == 3:
return enums.KMIPVersion.KMIP_1_3
elif value.minor == 4:
return enums.KMIPVersion.KMIP_1_4
else:
return None
else:
return None
|
Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent.
Args:
value (ProtocolVersion): A ProtocolVersion struct to be converted into
a KMIPVersion enumeration.
Returns:
KMIPVersion: The enumeration equivalent of the struct. If the struct
cannot be converted to a valid enumeration, None is returned.
|
juraj-google-style
|
def get(name):
for matcher in matchers:
if ((matcher.__name__ == name) or (getattr(matcher, 'name', None) == name)):
return matcher
|
Returns a matcher instance by class or alias name.
Arguments:
name (str): matcher class name or alias.
Returns:
matcher: found matcher instance, otherwise ``None``.
|
codesearchnet
|
def key_prefix(self) -> str:
return self.__class__.__qualname__
|
Prefix for key to avoid collisions from different Processors.
Defaults to classname. Processor() should override this if, for example, it
accepts arguments that change output of __call__.
Returns:
Prefix that will be added to key.
|
github-repos
|
def NewFromJSON(data):
if data.get('shakes', None):
shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')]
else:
shakes = None
return User(id=data.get('id', None), name=data.get('name', None), profile_image_url=data.get('profile_image_url', None), about=data.get('about', None), website=data.get('website', None), shakes=shakes)
|
Create a new User instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a user.
Returns:
A User instance.
|
codesearchnet
|
def apply(self, func, **kwargs):
oid = self.oid
self.call_queue.append((func, kwargs))
def call_queue_closure(oid_obj, call_queues):
for func, kwargs in call_queues:
if isinstance(func, ray.ObjectID):
func = ray.get(func)
if isinstance(kwargs, ray.ObjectID):
kwargs = ray.get(kwargs)
oid_obj = func(oid_obj, **kwargs)
return oid_obj
oid = deploy_ray_func.remote(
call_queue_closure, oid, kwargs={"call_queues": self.call_queue}
)
self.call_queue = []
return PyarrowOnRayFramePartition(oid)
|
Apply a function to the object stored in this partition.
Note: It does not matter if func is callable or an ObjectID. Ray will
handle it correctly either way. The keyword arguments are sent as a
dictionary.
Args:
func: The function to apply.
Returns:
A RayRemotePartition object.
|
juraj-google-style
|
def _GetStatus(self, two_factor=False):
params = ['status']
if two_factor:
params += ['--twofactor']
retcode = self._RunOsLoginControl(params)
if retcode is None:
if self.oslogin_installed:
self.logger.warning('OS Login not installed.')
self.oslogin_installed = False
return None
self.oslogin_installed = True
if not os.path.exists(constants.OSLOGIN_NSS_CACHE):
return False
return not retcode
|
Check whether OS Login is installed.
Args:
two_factor: bool, True if two factor should be enabled.
Returns:
bool, True if OS Login is installed.
|
juraj-google-style
|
def from_representation(self, representation):
object_dict = {}
failed = {}
for (name, field) in self.fields.items():
if (name not in representation):
continue
try:
if ((not isinstance(representation[name], (list, tuple))) and field.many):
raise ValueError('field should be sequence')
source = _source(name, field)
value = representation[name]
if field.many:
if (not field.allow_null):
object_dict[source] = [field.from_representation(single_value) for single_value in value]
else:
object_dict[source] = [(field.from_representation(single_value) if (single_value is not None) else None) for single_value in value]
elif (not field.allow_null):
object_dict[source] = field.from_representation(value)
else:
object_dict[source] = (field.from_representation(value) if value else None)
except ValueError as err:
failed[name] = str(err)
if failed:
try:
self.validate(object_dict)
raise DeserializationError()
except DeserializationError as err:
err.failed = failed
raise
return object_dict
|
Convert given representation dict into internal object.
Internal object is simply a dictionary of values with respect to field
sources.
This does not check if all required fields exist or values are
valid in terms of value validation
(see: :meth:`BaseField.validate()`) but still requires all of passed
representation values to be well formed representation (success call
to ``field.from_representation``).
In case of malformed representation it will run additional validation
only to provide a full detailed exception about all that might be
wrong with provided representation.
Args:
representation (dict): dictionary with field representation values
Raises:
DeserializationError: when at least one representation field
is not formed as expected by field object. Information
about additional forbidden/missing/invalid fields is provided
as well.
|
codesearchnet
|
def _average_precision(self, rec, prec):
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
|
calculate average precision
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
|
juraj-google-style
|
def old_tracer_correlation( self ):
if self.has_run:
return self.atoms.sum_dr_squared() / float( self.number_of_jumps )
else:
return None
|
Deprecated tracer correlation factor for this simulation.
Args:
None
Returns:
(Float): The tracer correlation factor, f.
Notes:
This function assumes that the jump distance between sites has
been normalised to a=1. If the jump distance is not equal to 1
then the value returned by this function should be divided by a^2.
Even better, use `self.tracer_correlation`.
|
juraj-google-style
|
def listen_now_items(self):
response = self._call(mc_calls.ListenNowGetListenNowItems)
listen_now_item_list = response.body.get('listennow_items', [])
listen_now_items = defaultdict(list)
for item in listen_now_item_list:
type_ = f"{ListenNowItemType(item['type']).name}s"
listen_now_items[type_].append(item)
return dict(listen_now_items)
|
Get a listing of Listen Now items.
Note:
This does not include situations;
use the :meth:`situations` method instead.
Returns:
dict: With ``albums`` and ``stations`` keys of listen now items.
|
codesearchnet
|
def get_instance_type_parameter(self, name: str, node: 'cfg.CFGNode | None'=None):
del name
if node is None:
node = self.ctx.root_node
return self.ctx.new_unsolvable(node)
|
Get a cfg.Variable of the instance's values for the type parameter.
Treating self as an abstract.Instance, gets the variable of its values for
the given type parameter. For the real implementation, see
SimpleValue.get_instance_type_parameter.
Args:
name: The name of the type parameter.
node: Optionally, the current CFG node.
Returns:
A Variable which may be empty.
|
github-repos
|
def concept(self, mechanism, purviews=False, cause_purviews=False, effect_purviews=False):
log.debug('Computing concept %s...', mechanism)
if (not mechanism):
log.debug('Empty concept; returning null concept')
return self.null_concept
cause = self.mic(mechanism, purviews=(cause_purviews or purviews))
effect = self.mie(mechanism, purviews=(effect_purviews or purviews))
log.debug('Found concept %s', mechanism)
return Concept(mechanism=mechanism, cause=cause, effect=effect, subsystem=self)
|
Return the concept specified by a mechanism within this subsytem.
Args:
mechanism (tuple[int]): The candidate set of nodes.
Keyword Args:
purviews (tuple[tuple[int]]): Restrict the possible purviews to
those in this list.
cause_purviews (tuple[tuple[int]]): Restrict the possible cause
purviews to those in this list. Takes precedence over
``purviews``.
effect_purviews (tuple[tuple[int]]): Restrict the possible effect
purviews to those in this list. Takes precedence over
``purviews``.
Returns:
Concept: The pair of maximally irreducible cause/effect repertoires
that constitute the concept specified by the given mechanism.
|
codesearchnet
|
def longest_one_seg_prefix(self, word):
match = self.seg_regex.match(word)
if match:
return match.group(0)
else:
return ''
|
Return longest IPA Unicode prefix of `word`
Args:
word (unicode): word as IPA string
Returns:
unicode: longest single-segment prefix of `word`
|
codesearchnet
|
def initialize_from_assignments(assignments, k, max_assign_weight=0.75):
cells = len(assignments)
init_W = np.zeros((k, cells))
for i, a in enumerate(assignments):
init_W[a, i] = max_assign_weight
for a2 in range(k):
if a2!=a:
init_W[a2, i] = (1-max_assign_weight)/(k-1)
return init_W/init_W.sum(0)
|
Creates a weight initialization matrix from Poisson clustering assignments.
Args:
assignments (array): 1D array of integers, of length cells
k (int): number of states/clusters
max_assign_weight (float, optional): between 0 and 1 - how much weight to assign to the highest cluster. Default: 0.75
Returns:
init_W (array): k x cells
|
juraj-google-style
|
def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None):
import shlex
if isinstance(command, (list, tuple)):
raise ValueError('command tuple not supported yet')
args = shlex.split(command, posix=not WIN32)
if verbose is True:
verbose = 2
if verbout is None:
verbout = verbose >= 1
if verbose >= 2:
print('+=== START CMD2 ===')
print('Command:')
print(command)
if verbout:
print('----')
print('Stdout:')
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=shell,
universal_newlines=True)
if detatch:
info = {'proc': proc}
else:
write_fn = sys.stdout.write
flush_fn = sys.stdout.flush
logged_out = []
for line in _run_process(proc):
line_ = line if six.PY2 else line
if len(line_) > 0:
if verbout:
write_fn(line_)
flush_fn()
logged_out.append(line)
try:
from utool import util_str
out = ''.join(logged_out)
except UnicodeDecodeError:
from utool import util_str
logged_out = util_str.ensure_unicode_strlist(logged_out)
out = ''.join(logged_out)
(out_, err) = proc.communicate()
ret = proc.wait()
info = {
'out': out,
'err': err,
'ret': ret,
}
if verbose >= 2:
print('L___ END CMD2 ___')
return info
|
Trying to clean up cmd
Args:
command (str): string command
shell (bool): if True, process is run in shell
detatch (bool): if True, process is run in background
verbose (int): verbosity mode
verbout (bool): if True, `command` writes to stdout in realtime.
defaults to True iff verbose > 0
Returns:
dict: info - information about command status
|
juraj-google-style
|
def expand_value_set_definition(self, value_set: value_set_pb2.ValueSet) -> value_set_pb2.ValueSet:
base_url, request_url = _expansion_request_url_for_value_set_url(value_set.url.value)
request_json = json_format.print_fhir_to_json_string(value_set).encode('utf-8')
session_ = self.create_session()
session_.headers.update({'Accept': 'application/json', 'Content-Type': 'application/json'})
auth = self.auth_per_terminology_server.get(base_url)
if auth is not None:
if isinstance(auth, tuple) and len(auth) == 2:
logging.debug('Using Basic auth for auth')
session_.auth = auth
else:
logging.debug('Using Bearer token for auth')
session_.headers['Authorization'] = auth
logging.info('Expanding value set url: %s version: %s using terminology service: %s', value_set.url.value, value_set.version.value, base_url)
with session_ as session:
def request_func(offset: int) -> requests.Response:
return session.post(request_url, data=request_json, params={'offset': offset})
expanded_value_set = _paginate_expand_value_set_request(request_func, value_set.url.value, value_set.version.value)
logging.info('Retrieved %d codes for value set url: %s version: %s using terminology service: %s', len(expanded_value_set.expansion.contains), value_set.url.value, value_set.version.value, base_url)
return expanded_value_set
|
Expands the value set definition using a terminology server.
Requests an expansion of the given value set from the appropriate
terminology server. Attempts to expand arbitrary value sets by passing their
entire definition to the terminology service for expansion.
If possible, requests expansion from the domain associated with the value
set's URL. If the value set URL is not associated with a known terminology
service, uses the tx.fhir.org service as it is able to expand value sets
defined outside its own specifications.
Retrieves the current definition of the value set from the terminology
service as well as its expansion.
Args:
value_set: The value set to expand.
Returns:
The current definition of the value set from the server with its expanded
codes present.
|
github-repos
|
def get(cls, resource_id):
res = Resource.get(resource_id)
return cls(res) if res else None
|
Returns the class object identified by `resource_id`
Args:
resource_id (str): Unique EC2 Instance ID to load from database
Returns:
EC2 Instance object if found, else None
|
juraj-google-style
|
def plot_heatmap(data, title='Heatmap', show_legend=True, show_labels=True, label_fmt='.2f', vmin=None, vmax=None, figsize=None, label_color='w', cmap='RdBu', **kwargs):
(fig, ax) = plt.subplots(figsize=figsize)
heatmap = ax.pcolor(data, vmin=vmin, vmax=vmax, cmap=cmap)
ax.invert_yaxis()
if (title is not None):
plt.title(title)
if show_legend:
fig.colorbar(heatmap)
if show_labels:
vals = data.values
for x in range(data.shape[0]):
for y in range(data.shape[1]):
plt.text((x + 0.5), (y + 0.5), format(vals[(y, x)], label_fmt), horizontalalignment='center', verticalalignment='center', color=label_color)
plt.yticks(np.arange(0.5, len(data.index), 1), data.index)
plt.xticks(np.arange(0.5, len(data.columns), 1), data.columns)
return plt
|
Plot a heatmap using matplotlib's pcolor.
Args:
* data (DataFrame): DataFrame to plot. Usually small matrix (ex.
correlation matrix).
* title (string): Plot title
* show_legend (bool): Show color legend
* show_labels (bool): Show value labels
* label_fmt (str): Label format string
* vmin (float): Min value for scale
* vmax (float): Max value for scale
* cmap (string): Color map
* kwargs: Passed to matplotlib's pcolor
|
codesearchnet
|
def render_policy_template(account_number='', app='coreforrest', env='dev', group='forrest', items=None, pipeline_settings=None, region='us-east-1', service=''):
statements = []
rendered_service_policy = get_template('infrastructure/iam/{0}.json.j2'.format(service), account_number=account_number, app=app, env=env, group=group, region=region, items=items, settings=pipeline_settings)
try:
statement_block = json.loads(rendered_service_policy)
statements.append(statement_block)
except ValueError:
LOG.debug('Need to make %s template into list.', service)
statements = json.loads('[{0}]'.format(rendered_service_policy))
LOG.debug('Rendered IAM Policy statements: %s', statements)
return statements
|
Render IAM Policy template.
To support multiple statement blocks, JSON objects can be separated by a
comma. This function attempts to turn any invalid JSON into a valid list
based on this comma separated assumption.
Args:
account_number (str): AWS Account number.
app (str): Name of Spinnaker Application.
env (str): Environment/Account in AWS
group (str):A Application group/namespace
items (list): Resource names used to create a Policy per Resource.
region (str): AWS region.
pipeline_settings (dict): Settings from *pipeline.json*.
service (str): Name of cloud service to find matching IAM Policy
template.
Returns:
list: IAM Policy :obj:`dict` statements for the given service.
|
codesearchnet
|
def parse(self, argument):
if isinstance(argument, list):
return argument
elif (not argument):
return []
else:
if self._comma_compat:
argument = argument.replace(',', ' ')
return argument.split()
|
Parses argument as whitespace-separated list of strings.
It also parses argument as comma-separated list of strings if requested.
Args:
argument: string argument passed in the commandline.
Returns:
[str], the parsed flag value.
|
codesearchnet
|
def must_run_on_cpu(node, pin_variables_on_cpu=False):
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, node_def_pb2.NodeDef)
node_def = node
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
if node_def.op == 'Const':
dtype = node_def.attr['dtype'].type
if dtype == dtypes.string or dtype == dtypes.int32:
return True
if node_def.op in ['DynamicStitch', 'ParallelDynamicStitch']:
dtype = node_def.attr['T'].type
if dtype == dtypes.int32:
return True
if node_def.op in ['Cast']:
dtype = node_def.attr['SrcT'].type
if dtype == dtypes.int32:
return True
return False
|
Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CPU, otherwise False.
|
github-repos
|
def master(self, task_type=None, task_id=None, rpc_layer=None):
if task_type is not None and task_id is not None:
master = self.cluster_spec().task_address(task_type, task_id)
return format_master_url(master, rpc_layer or self._rpc_layer)
return self._cluster_resolvers[0].master(rpc_layer=rpc_layer)
|
Returns the master address to use when creating a session.
This usually returns the master from the first ClusterResolver passed in,
but you can override this by specifying the task_type and task_id.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (Optional) The type of the TensorFlow task of the master.
task_id: (Optional) The index of the TensorFlow task of the master.
rpc_layer: (Optional) The RPC protocol for the given cluster.
Returns:
The name or URL of the session master.
|
github-repos
|
def clean_all(G, settings):
quiet = settings['quiet']
recon = settings['recon']
sprint = settings['sprint']
error = settings['error']
all_outputs = []
for node in G.nodes(data=True):
if ('output' in node[1]):
for item in get_all_outputs(node[1]):
all_outputs.append(item)
all_outputs.append('.shastore')
retcode = 0
for item in sorted(all_outputs):
if os.path.isfile(item):
if recon:
sprint('Would remove file: {}'.format(item))
continue
sprint("Attempting to remove file '{}'", level='verbose')
try:
os.remove(item)
sprint('Removed file', level='verbose')
except:
errmes = "Error: file '{}' failed to be removed"
error(errmes.format(item))
retcode = 1
if ((not retcode) and (not recon)):
sprint('All clean', color=True)
return retcode
|
Removes all the output files from all targets. Takes
the graph as the only argument
Args:
The networkx graph object
The settings dictionary
Returns:
0 if successful
1 if removing even one file failed
|
codesearchnet
|
def language_from_str(language_def, metamodel):
if type(language_def) is not text:
raise TextXError("textX accepts only unicode strings.")
if metamodel.debug:
metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***")
if metamodel.debug in textX_parsers:
parser = textX_parsers[metamodel.debug]
else:
parser = ParserPython(textx_model, comment_def=comment,
ignore_case=False,
reduce_tree=False,
memoization=metamodel.memoization,
debug=metamodel.debug,
file=metamodel.file)
textX_parsers[metamodel.debug] = parser
try:
parse_tree = parser.parse(language_def)
except NoMatch as e:
line, col = parser.pos_to_linecol(e.position)
raise TextXSyntaxError(text(e), line, col)
lang_parser = visit_parse_tree(parse_tree,
TextXVisitor(parser, metamodel))
metamodel.validate()
lang_parser.metamodel = metamodel
metamodel._parser_blueprint = lang_parser
if metamodel.debug:
PMDOTExporter().exportFile(
lang_parser.parser_model,
"{}_parser_model.dot".format(metamodel.rootcls.__name__))
return lang_parser
|
Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language.
|
juraj-google-style
|
def __add__(self, r):
if not isinstance(r, TestResult):
raise TypeError('Operand %s of type %s is not a TestResult.' % (r, type(r)))
sum_result = TestResult()
for name in sum_result.__dict__:
r_value = getattr(r, name)
l_value = getattr(self, name)
if isinstance(r_value, list):
setattr(sum_result, name, l_value + r_value)
return sum_result
|
Overrides '+' operator for TestResult class.
The add operator merges two TestResult objects by concatenating all of
their lists together.
Args:
r: another instance of TestResult to be added
Returns:
A TestResult instance that's the sum of two TestResult instances.
|
github-repos
|
def panel(self, panel_id):
if not isinstance(panel_id, ObjectId):
panel_id = ObjectId(panel_id)
panel_obj = self.panel_collection.find_one({'_id': panel_id})
return panel_obj
|
Fetch a gene panel by '_id'.
Args:
panel_id (str, ObjectId): str or ObjectId of document ObjectId
Returns:
dict: panel object or `None` if panel not found
|
juraj-google-style
|
def get_labels(self, **query_params):
labels = self.get_labels_json(self.base_uri, query_params=query_params)
labels_list = []
for label_json in labels:
labels_list.append(self.create_label(label_json))
return labels_list
|
Get the labels attached to this board. Returns a label of Label
objects.
Returns:
list(Label): The labels attached to this board
|
codesearchnet
|
def CreateExtensionSetting(client, feed_items, campaign_feed, feed_item_ids,
platform_restrictions=None):
campaign_extension_setting_service = client.GetService(
'CampaignExtensionSettingService', 'v201809')
extension_feed_items = [{
CreateSitelinkFeedItem(feed_items, feed_item_id)
} for feed_item_id in feed_item_ids]
extension_setting = {
'extensions': extension_feed_items
}
if platform_restrictions:
extension_setting['platformRestrictions'] = platform_restrictions
campaign_extension_setting = {
'campaignId': campaign_feed['campaignId'],
'extensionType': 'SITELINK',
'extensionSetting': extension_setting
}
operation = {
'operand': campaign_extension_setting,
'operator': 'ADD'
}
campaign_extension_setting_service.mutate([operation])
|
Creates the extension setting for a list of Feed Items.
Args:
client: an AdWordsClient instance.
feed_items: the list of all Feed Items.
campaign_feed: the original Campaign Feed.
feed_item_ids: the Ids of the feed items for which extension settings should
be created.
platform_restrictions: an optional Platform Restriction for the Feed items.
|
juraj-google-style
|
def get(self, center, target, date):
if ((center.index, target.index) in self.segments):
(pos, vel) = self.segments[(center.index, target.index)].compute_and_differentiate(date.jd)
sign = 1
else:
(pos, vel) = self.segments[(target.index, center.index)].compute_and_differentiate(date.jd)
sign = (- 1)
if (len(pos) == 3):
pv = np.concatenate((pos, (vel / S_PER_DAY)))
elif (len(pos) == 6):
pv = np.array(pos)
else:
raise JplError('Unknown state vector format')
return ((sign * pv) * 1000)
|
Retrieve the position and velocity of a target with respect to a center
Args:
center (Target):
target (Target):
date (Date):
Return:
numpy.array: length-6 array position and velocity (in m and m/s) of the
target, with respect to the center
|
codesearchnet
|
def variable_summaries(vars_, groups=None, scope='weights'):
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for var in vars_:
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(var)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
for name, vars_ in grouped.items():
vars_ = [tf.reshape(var, [-1]) for var in vars_]
vars_ = tf.concat(vars_, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, vars_))
return tf.summary.merge(summaries)
|
Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
|
juraj-google-style
|
def _gen(self, optimized, splitstring):
self.resolved = {}
for nt in self.grammar.grammar_nonterminals_map:
for i in self.grammar.grammar_nonterminals_map[nt]:
if self.grammar.grammar_rules[i][0] not in self.resolved\
and not isinstance(self.grammar.grammar_rules[i][1], (set, tuple)):
if self.grammar.grammar_rules[i][1] != '@empty_set' \
and self.grammar.grammar_rules[i][1] in self.grammar.grammar_terminals:
if splitstring:
self.resolved[
self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1]
else:
if self.grammar.grammar_rules[i][1] == '&':
self.resolved[self.grammar.grammar_rules[i][0]] = ' '
else:
self.resolved[
self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1]
if self._checkfinal(self.grammar.grammar_rules[i][0]):
return self.resolved[self.grammar.grammar_rules[i][0]]
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
if self.grammar.grammar_rules[i][1] == '@empty_set':
self.resolved[self.grammar.grammar_rules[i][0]] = ''
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
if optimized and self._check_self_to_empty(
self.grammar.grammar_rules[i][1]):
self.resolved[self.grammar.grammar_rules[i][0]] = ''
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
change = 1
while change:
change = 0
if not change:
ret = self._check_self_nonterminals(optimized)
if ret == 1:
change = 1
elif ret != 0:
return ret
if not change:
while not change and len(self.bfs_queue) > 0:
myntr = self.bfs_queue.pop()
ret = self._check_self_replicate(myntr)
if ret == 1:
change = 1
elif ret != 0:
return ret
if optimized and self._check_intemediate(
myntr, self.maxstate):
change = 1
break
|
Generates a new random object generated from the nonterminal
Args:
optimized (bool): mode of operation - if enabled not all
CNF rules are included (mitigate O(n^3))
splitstring (bool): A boolean for enabling or disabling
Returns:
str: The generated string
|
juraj-google-style
|
def _case_helper(cond_fn, pred_fn_pairs, default, exclusive, name, allow_python_preds=False, **cond_kwargs):
predicates, actions = _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name, allow_python_preds)
with ops.name_scope(name, 'case', [predicates]):
if default is None:
default, predicates, actions = _case_create_default_action(predicates, actions)
fn = default
for predicate, action in reversed(list(zip(predicates, actions))):
fn = functools.partial(cond_fn, predicate, true_fn=action, false_fn=fn, **cond_kwargs)
if exclusive:
with ops.control_dependencies([_assert_at_most_n_true(predicates, n=1, msg='Input error: exclusive=True')]):
return fn()
else:
return fn()
|
Implementation of case that allows for different cond functions.
Args:
cond_fn: method that has signature and semantics of `cond` above.
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
allow_python_preds: if true, pred_fn_pairs may contain Python bools in
addition to boolean Tensors
**cond_kwargs: keyword arguments that will be passed to `cond_fn`.
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
|
github-repos
|
def __init__(self, package, ad):
super(SnippetClient, self).__init__(app_name=package, ad=ad)
self.package = package
self._ad = ad
self._adb = ad.adb
self._proc = None
|
Initializes a SnippetClient.
Args:
package: (str) The package name of the apk where the snippets are
defined.
ad: (AndroidDevice) the device object associated with this client.
|
juraj-google-style
|
def BreachDepressions(dem, in_place=False, topology='D8'):
if (type(dem) is not rdarray):
raise Exception('A richdem.rdarray or numpy.ndarray is required!')
if (topology not in ['D8', 'D4']):
raise Exception('Unknown topology!')
if (not in_place):
dem = dem.copy()
_AddAnalysis(dem, 'BreachDepressions(dem)')
demw = dem.wrap()
if (topology == 'D8'):
_richdem.rdBreachDepressionsD8(demw)
elif (topology == 'D4'):
_richdem.rdBreachDepressionsD4(demw)
dem.copyFromWrapped(demw)
if (not in_place):
return dem
|
Breaches all depressions in a DEM.
Args:
dem (rdarray): An elevation model
in_place (bool): If True, the DEM is modified in place and there is
no return; otherwise, a new, altered DEM is returned.
topology (string): A topology indicator
Returns:
DEM without depressions.
|
codesearchnet
|
def Parse(self, value):
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
options = value_line[1]
for option in options.split(','):
self._AddOption(option)
[option.OnCreateOptions() for option in self.options]
self.name = value_line[2]
self.regex = ' '.join(value_line[3:])
else:
self.name = value_line[1]
self.regex = ' '.join(value_line[2:])
if len(self.name) > self.max_name_len:
raise TextFSMTemplateError(
"Invalid Value name '%s' or name too long." % self.name)
if (not re.match(r'^\(.*\)$', self.regex) or
self.regex.count('(') != self.regex.count(')')):
raise TextFSMTemplateError(
"Value '%s' must be contained within a '()' pair." % self.regex)
self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex)
|
Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
|
juraj-google-style
|
def allreduce(self, x, mesh_axes, reduction_fn_string):
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
group_assignment = self._create_group_assignment(mesh_axes)
group_size = len(group_assignment[0])
tf_in = x.one_slice
dtype = tf_in.dtype
if dtype == tf.float32:
cast_to_float32 = False
elif dtype == tf.bfloat16:
cast_to_float32 = (
group_size > self._allreduce_in_bfloat16_max_group_size)
else:
tf.logging.info("Casting %s to float32 for allreduce" % tf_in.dtype)
cast_to_float32 = True
if cast_to_float32:
tf_in = tf.cast(tf_in, tf.float32)
tf_out = tpu_ops.cross_replica_sum(tf_in, group_assignment)
if cast_to_float32:
tf_out = tf.cast(tf_out, dtype)
return self.LaidOutTensor([tf_out])
else:
for axis in mesh_axes:
x = self.allconcat(x, axis, 0, stack=True)
x = self.LaidOutTensor(
[mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])
return x
|
Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented.
|
juraj-google-style
|
def do_ams_patch(endpoint, path, body, access_token):
headers = {"Content-Type": json_acceptformat,
"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"Authorization": "Bearer " + access_token,
"x-ms-version" : xmsversion}
response = requests.patch(endpoint, data=body, headers=headers, allow_redirects=False)
if response.status_code == 301:
redirected_url = ''.join([response.headers['location'], path])
response = requests.patch(redirected_url, data=body, headers=headers)
return response
|
Do a AMS PATCH request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers/', offer,
'/skus/', sku,
'/versions?api-version=', COMP_API])
return do_get(endpoint, access_token)
|
List available versions for a given publisher's sku.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): VM image publisher. E.g. MicrosoftWindowsServer.
offer (str): VM image offer. E.g. WindowsServer.
sku (str): VM image sku. E.g. 2016-Datacenter.
Returns:
HTTP response with JSON list of versions.
|
juraj-google-style
|
def get_forced_variation(self, experiment_key, user_id):
if (not self.is_valid):
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_forced_variation'))
return None
if (not validator.is_non_empty_string(experiment_key)):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return None
if (not isinstance(user_id, string_types)):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
forced_variation = self.config.get_forced_variation(experiment_key, user_id)
return (forced_variation.key if forced_variation else None)
|
Gets the forced variation for a given user and experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
Returns:
The forced variation key. None if no forced variation key.
|
codesearchnet
|
def insert_before(self, value: Union[RawValue, Value],
raw: bool = False) -> "ArrayEntry":
return ArrayEntry(self.index, self.before, self.after.cons(self.value),
self._cook_value(value, raw), self.parinst,
self.schema_node, datetime.now())
|
Insert a new entry before the receiver.
Args:
value: The value of the new entry.
raw: Flag to be set if `value` is raw.
Returns:
An instance node of the new inserted entry.
|
juraj-google-style
|
def Artifacts(self, os_name=None, cpe=None, label=None):
hit = (lambda x: ((x[0] == x[1]) or (not x[0])))
seq = [(self.os_name, os_name), (self.cpe, cpe), (self.label, label)]
return all(map(hit, seq))
|
Whether the conditions applies, modulo host data.
Args:
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
True if os_name, cpe or labels match. Empty values are ignored.
|
codesearchnet
|
async def game(
self, short_name, *, id=None,
text=None, parse_mode=(), link_preview=True,
geo=None, period=60, contact=None, game=False, buttons=None
):
result = types.InputBotInlineResultGame(
id=id or '',
short_name=short_name,
send_message=await self._message(
text=text, parse_mode=parse_mode, link_preview=link_preview,
geo=geo, period=period,
contact=contact,
game=game,
buttons=buttons
)
)
if id is None:
result.id = hashlib.sha256(bytes(result)).hexdigest()
return result
|
Creates a new inline result of game type.
Args:
short_name (`str`):
The short name of the game to use.
|
juraj-google-style
|
def Analyze(self, source_path, output_writer):
if (not os.path.exists(source_path)):
raise RuntimeError('No such source: {0:s}.'.format(source_path))
scan_context = source_scanner.SourceScannerContext()
scan_path_spec = None
scan_step = 0
scan_context.OpenSourcePath(source_path)
while True:
self._source_scanner.Scan(scan_context, auto_recurse=self._auto_recurse, scan_path_spec=scan_path_spec)
if (not scan_context.updated):
break
if (not self._auto_recurse):
output_writer.WriteScanContext(scan_context, scan_step=scan_step)
scan_step += 1
if (scan_context.source_type in [definitions.SOURCE_TYPE_DIRECTORY, definitions.SOURCE_TYPE_FILE]):
break
for locked_scan_node in scan_context.locked_scan_nodes:
self._PromptUserForEncryptedVolumeCredential(scan_context, locked_scan_node, output_writer)
if (not self._auto_recurse):
scan_node = scan_context.GetUnscannedScanNode()
if (not scan_node):
return
scan_path_spec = scan_node.path_spec
if self._auto_recurse:
output_writer.WriteScanContext(scan_context)
|
Analyzes the source.
Args:
source_path (str): the source path.
output_writer (StdoutWriter): the output writer.
Raises:
RuntimeError: if the source path does not exists, or if the source path
is not a file or directory, or if the format of or within the source
file is not supported.
|
codesearchnet
|
def get_site_spd_dos(self, site):
spd_dos = dict()
for orb, pdos in self.pdos[site].items():
orbital_type = _get_orb_type(orb)
if orbital_type in spd_dos:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
else:
spd_dos[orbital_type] = pdos
return {orb: Dos(self.efermi, self.energies, densities)
for orb, densities in spd_dos.items()}
|
Get orbital projected Dos of a particular site
Args:
site: Site in Structure associated with CompleteDos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
|
juraj-google-style
|
def rot90(array, k=1, axes=(0, 1)):
array = convert_to_tensor(array)
if array.shape.rank < 2:
raise ValueError(f'Input array must have at least 2 dimensions. Received: array.ndim={array.shape.rank}')
if len(axes) != 2 or axes[0] == axes[1]:
raise ValueError(f'Invalid axes: {axes}. Axes must be a tuple of two different dimensions.')
k = k % 4
if k == 0:
return array
axes = tuple((axis if axis >= 0 else array.shape.rank + axis for axis in axes))
perm = [i for i in range(array.shape.rank) if i not in axes]
perm.extend(axes)
array = tf.transpose(array, perm)
shape = tf.shape(array)
non_rot_shape = shape[:-2]
h, w = (shape[-2], shape[-1])
array = tf.reshape(array, tf.concat([[-1], [h, w]], axis=0))
array = tf.reverse(array, axis=[2])
array = tf.transpose(array, [0, 2, 1])
if k % 2 == 1:
final_h, final_w = (w, h)
else:
final_h, final_w = (h, w)
if k > 1:
array = tf.reshape(array, tf.concat([[-1], [final_h, final_w]], axis=0))
for _ in range(k - 1):
array = tf.reverse(array, axis=[2])
array = tf.transpose(array, [0, 2, 1])
final_shape = tf.concat([non_rot_shape, [final_h, final_w]], axis=0)
array = tf.reshape(array, final_shape)
inv_perm = [0] * len(perm)
for i, p in enumerate(perm):
inv_perm[p] = i
array = tf.transpose(array, inv_perm)
return array
|
Rotate an array by 90 degrees in the specified plane.
Args:
array: Input tensor
k: Number of 90-degree rotations (default=1)
axes: Tuple of two axes that define the plane of rotation.
Defaults to (0, 1).
Returns:
Rotated tensor with correct shape transformation
|
github-repos
|
def update(self, id, newObj):
newObj = self.validation(newObj)
for obj in self.model.db:
if obj["id"] != id:
continue
newObj.pop("id", None)
obj.update(newObj)
obj = self._cast_model(obj)
if not self._batch.enable.is_set():
self.model.save_db()
return obj
return None
|
Update a object
Args:
id (int): Target Object ID
newObj (object): New object will be merged into original object
Returns:
Object: Updated object
None: If specified object id is not found
MultipleInvalid: If input object is invaild
|
juraj-google-style
|
def grappler_optimize(graph, fetches=None, config_proto=None):
if config_proto is None:
config_proto = config_pb2.ConfigProto()
config_proto.graph_options.rewrite_options.min_graph_nodes = -1
if fetches is not None:
for fetch in fetches:
graph.add_to_collection('train_op', fetch)
metagraph = saver.export_meta_graph(graph_def=graph.as_graph_def())
return tf_optimizer.OptimizeGraph(config_proto, metagraph)
|
Tries to optimize the provided graph using grappler.
Args:
graph: A `tf.Graph` instance containing the graph to optimize.
fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
Grappler uses the 'train_op' collection to look for fetches, so if not
provided this collection should be non-empty.
config_proto: An optional `tf.compat.v1.ConfigProto` to use when rewriting
the graph.
Returns:
A `tf.compat.v1.GraphDef` containing the rewritten graph.
|
github-repos
|
def apply_inverse(self, y, in_place=False):
r
return cho_solve(self._factor, y, overwrite_b=in_place)
|
r"""
Apply the inverse of the covariance matrix to the input by solving
.. math::
K\,x = y
Args:
y (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or
matrix :math:`y`.
in_place (Optional[bool]): Should the data in ``y`` be overwritten
with the result :math:`x`? (default: ``False``)
|
juraj-google-style
|
def to_csv(self, filename: str, latexify_names: bool=False):
elements = set()
for entry in self.entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements), key=(lambda a: a.X))
writer = csv.writer(open(filename, 'w'), delimiter=unicode2str(','), quotechar=unicode2str('"'), quoting=csv.QUOTE_MINIMAL)
writer.writerow(((['Name'] + elements) + ['Energy']))
for entry in self.entries:
row = [(entry.name if (not latexify_names) else re.sub('([0-9]+)', '_{\\1}', entry.name))]
row.extend([entry.composition[el] for el in elements])
row.append(entry.energy)
writer.writerow(row)
|
Exports PDEntries to a csv
Args:
filename: Filename to write to.
entries: PDEntries to export.
latexify_names: Format entry names to be LaTex compatible,
e.g., Li_{2}O
|
codesearchnet
|
def default(self, obj):
if isinstance(obj, decimal.Decimal):
obj = format(obj, 'f')
str_digit = text_type(obj)
return (str_digit.rstrip('0').rstrip('.') if ('.' in str_digit) else str_digit)
elif isinstance(obj, phonenumbers.PhoneNumber):
return phonenumbers.format_number(obj, phonenumbers.PhoneNumberFormat.E164)
elif isinstance(obj, pendulum.Pendulum):
return text_type(obj)
elif isinstance(obj, arrow.Arrow):
return text_type(obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
try:
return list(iter(obj))
except TypeError:
pass
return super(FleakerJSONEncoder, self).default(obj)
|
Encode individual objects into their JSON representation.
This method is used by :class:`flask.json.JSONEncoder` to encode
individual items in the JSON object.
Args:
obj (object): Any Python object we wish to convert to JSON.
Returns:
str: The stringified, valid JSON representation of our provided
object.
|
codesearchnet
|
def index(self, entries):
if (not self.is_empty()):
raise ValueError('Cannot call index again on a non-empty index')
if (not isinstance(entries, list)):
queue = deque([])
for (key, minhash, size) in entries:
if (size <= 0):
raise ValueError('Set size must be positive')
queue.append((key, minhash, size))
entries = list(queue)
if (len(entries) == 0):
raise ValueError('entries is empty')
(sizes, counts) = np.array(sorted(Counter((e[2] for e in entries)).most_common())).T
partitions = optimal_partitions(sizes, counts, len(self.indexes))
for (i, (lower, upper)) in enumerate(partitions):
(self.lowers[i], self.uppers[i]) = (lower, upper)
entries.sort(key=(lambda e: e[2]))
curr_part = 0
for (key, minhash, size) in entries:
if (size > self.uppers[curr_part]):
curr_part += 1
for r in self.indexes[curr_part]:
self.indexes[curr_part][r].insert(key, minhash)
|
Index all sets given their keys, MinHashes, and sizes.
It can be called only once after the index is created.
Args:
entries (`iterable` of `tuple`): An iterable of tuples, each must be
in the form of `(key, minhash, size)`, where `key` is the unique
identifier of a set, `minhash` is the MinHash of the set,
and `size` is the size or number of unique items in the set.
Note:
`size` must be positive.
|
codesearchnet
|
def _parse_block_ref(cls, block_ref, deprecated=False):
if (deprecated and (block_ref is None)):
return None
if isinstance(block_ref, LocalId):
return block_ref
is_valid_deprecated = (deprecated and cls.DEPRECATED_ALLOWED_ID_RE.match(block_ref))
is_valid = cls.ALLOWED_ID_RE.match(block_ref)
if (is_valid or is_valid_deprecated):
return block_ref
else:
raise InvalidKeyError(cls, block_ref)
|
Given `block_ref`, tries to parse it into a valid block reference.
Returns `block_ref` if it is valid.
Raises:
InvalidKeyError: if `block_ref` is invalid.
|
codesearchnet
|
def set_card_simple(self, title, content):
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content
|
Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card.
|
juraj-google-style
|
def control(controllee: Union[('cirq.Gate', op_tree.OP_TREE)], control_qubits: Sequence['cirq.Qid']=None, default: Any=RaiseTypeErrorIfNotProvided) -> Any:
if (control_qubits is None):
control_qubits = []
controller = getattr(controllee, 'controlled_by', None)
result = (NotImplemented if (controller is None) else controller(*control_qubits))
if (result is not NotImplemented):
return result
if isinstance(controllee, collections.Iterable):
return op_tree.transform_op_tree(controllee, op_transformation=(lambda op: control(op, control_qubits)))
if (default is not RaiseTypeErrorIfNotProvided):
return default
if (controller is None):
raise TypeError("object of type '{}' has no controlled_by method.".format(type(controllee)))
raise TypeError("object of type '{}' does have a controlled_by method, but it returned NotImplemented.".format(type(controllee)))
|
Returns a Controlled version of the given value, if defined.
Controllees define how to be controlled by defining a method
controlled_by(self, control_qubits). Note that the method may return
NotImplemented to indicate a particular controlling can't be done.
Args:
controllee: The gate, operation or iterable of operations to control.
control_qubits: A list of Qids that would control this controllee.
default: Determines the fallback behavior when `controllee` doesn't
have a controlling defined. If `default` is not set and the
fallback occurs, a TypeError is raised instead.
Returns:
If `controllee` has a controlled_by method that returns something
besides NotImplemented, that result is returned. For an OP_TREE,
transformation is applied at the leaf. Otherwise, if a default value
was specified, the default value is returned.
Raises:
TypeError: `controllee` doesn't have a controlled_by method (or that
method returned NotImplemented) and no `default` was specified.
|
codesearchnet
|
def validate_composite_type_param(type_param, error_msg_prefix):
possible_classes = [type, TypeConstraint]
is_not_type_constraint = not is_typing_generic(type_param) and (not isinstance(type_param, tuple(possible_classes))) and (type_param is not None) and (getattr(type_param, '__module__', None) != 'typing')
if sys.version_info.major == 3 and sys.version_info.minor >= 10:
if isinstance(type_param, types.UnionType):
is_not_type_constraint = False
if is_not_type_constraint:
raise TypeError('%s must be a non-sequence, a type, or a TypeConstraint. %s is an instance of %s.' % (error_msg_prefix, type_param, type_param.__class__.__name__))
|
Determines if an object is a valid type parameter to a
:class:`CompositeTypeHint`.
Implements sanity checking to disallow things like::
List[1, 2, 3] or Dict[5].
Args:
type_param: An object instance.
error_msg_prefix (:class:`str`): A string prefix used to format an error
message in the case of an exception.
Raises:
TypeError: If the passed **type_param** is not a valid type
parameter for a :class:`CompositeTypeHint`.
|
github-repos
|
def Get(self, name, default=utils.NotAValue, context=None):
if (not self.initialized):
if (name not in self.constants):
raise RuntimeError(("Error while retrieving %s: Configuration hasn't been initialized yet." % name))
if context:
if (isinstance(context, string_types) or (not isinstance(context, collections.Iterable))):
raise ValueError(('context should be a list, got %r' % context))
calc_context = context
cache_key = (name, tuple((context or ())))
if ((default is utils.NotAValue) and (cache_key in self.cache)):
return self.cache[cache_key]
if (context is None):
calc_context = self.context
type_info_obj = self.FindTypeInfo(name)
(_, return_value) = self._GetValue(name, context=calc_context, default=default)
if (return_value is default):
return default
try:
return_value = self.InterpolateValue(return_value, default_section=name.split('.')[0], type_info_obj=type_info_obj, context=calc_context)
except (lexer.ParseError, ValueError) as e:
if (default is not utils.NotAValue):
return default
raise ConfigFormatError(('While parsing %s: %s' % (name, e)))
try:
new_value = type_info_obj.Validate(return_value)
if (new_value is not None):
return_value = new_value
except ValueError:
if (default is not utils.NotAValue):
return default
raise
if (default is utils.NotAValue):
self.cache[cache_key] = return_value
return return_value
|
Get the value contained by the named parameter.
This method applies interpolation/escaping of the named parameter and
retrieves the interpolated value.
Args:
name: The name of the parameter to retrieve. This should be in the format
of "Section.name"
default: If retrieving the value results in an error, return this default.
context: A list of context strings to resolve the configuration. This is a
set of roles the caller is current executing with. For example (client,
windows). If not specified we take the context from the current thread's
TLS stack.
Returns:
The value of the parameter.
Raises:
ConfigFormatError: if verify=True and the config doesn't validate.
RuntimeError: if a value is retrieved before the config is initialized.
ValueError: if a bad context is passed.
|
codesearchnet
|
def utterances_from_dir(eaf_dir: Path, tier_prefixes: Tuple[(str, ...)]) -> List[Utterance]:
logger.info('EAF from directory: {}, searching with tier_prefixes {}'.format(eaf_dir, tier_prefixes))
utterances = []
for eaf_path in eaf_dir.glob('**/*.eaf'):
eaf_utterances = utterances_from_eaf(eaf_path, tier_prefixes)
utterances.extend(eaf_utterances)
return utterances
|
Returns the utterances found in ELAN files in a directory.
Recursively explores the directory, gathering ELAN files and extracting
utterances from them for tiers that start with the specified prefixes.
Args:
eaf_dir: A path to the directory to be searched
tier_prefixes: Stings matching the start of ELAN tier names that are to
be extracted. For example, if you want to extract from tiers "xv-Jane"
and "xv-Mark", then tier_prefixes = ["xv"] would do the job.
Returns:
A list of Utterance objects.
|
codesearchnet
|
def loads(s, single=False, version=_default_version,
strict=False, errors='warn'):
ms = deserialize(s, version=version, strict=strict, errors=errors)
if single:
return next(ms)
else:
return ms
|
Deserialize SimpleMRS string representations
Args:
s (str): a SimpleMRS string
single (bool): if `True`, only return the first Xmrs object
Returns:
a generator of Xmrs objects (unless *single* is `True`)
|
juraj-google-style
|
def select_with_index(self, selector=IndexedElement, transform=identity):
if self.closed():
raise ValueError('Attempt to call select_with_index() on a closed Queryable.')
if (not is_callable(selector)):
raise TypeError('select_with_index() parameter selector={0} is not callable'.format(repr(selector)))
if (not is_callable(transform)):
raise TypeError('select_with_index() parameter item_selector={0} is not callable'.format(repr(selector)))
return self._create(itertools.starmap(selector, enumerate(imap(transform, iter(self)))))
|
Transforms each element of a sequence into a new form, incorporating
the index of the element.
Each element is transformed through a selector function which accepts
the element value and its zero-based index in the source sequence. The
generated sequence is lazily evaluated.
Note: This method uses deferred execution.
Args:
selector: A binary function mapping the index of a value in
the source sequence and the element value itself to the
corresponding value in the generated sequence. The two
positional arguments of the selector function are the zero-
based index of the current element and the value of the current
element. The return value should be the corresponding value in
the result sequence. The default selector produces an IndexedElement
containing the index and the element giving this function
similar behaviour to the built-in enumerate().
Returns:
A Queryable whose elements are the result of invoking the selector
function on each element of the source sequence
Raises:
ValueError: If this Queryable has been closed.
TypeError: If selector is not callable.
|
codesearchnet
|
def _ParseMFTAttribute(self, parser_mediator, mft_entry, mft_attribute):
if mft_entry.is_empty() or mft_entry.base_record_file_reference != 0:
return
if mft_attribute.attribute_type in [
self._MFT_ATTRIBUTE_STANDARD_INFORMATION,
self._MFT_ATTRIBUTE_FILE_NAME]:
file_attribute_flags = getattr(
mft_attribute, 'file_attribute_flags', None)
name = getattr(mft_attribute, 'name', None)
parent_file_reference = getattr(
mft_attribute, 'parent_file_reference', None)
event_data = NTFSFileStatEventData()
event_data.attribute_type = mft_attribute.attribute_type
event_data.file_attribute_flags = file_attribute_flags
event_data.file_reference = mft_entry.file_reference
event_data.is_allocated = mft_entry.is_allocated()
event_data.name = name
event_data.parent_file_reference = parent_file_reference
try:
creation_time = mft_attribute.get_creation_time_as_integer()
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read the creation timestamp from MFT attribute: '
'0x{0:08x} with error: {1!s}').format(
mft_attribute.attribute_type, exception))
creation_time = None
if creation_time is not None:
date_time = self._GetDateTime(creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
modification_time = mft_attribute.get_modification_time_as_integer()
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read the modification timestamp from MFT attribute: '
'0x{0:08x} with error: {1!s}').format(
mft_attribute.attribute_type, exception))
modification_time = None
if modification_time is not None:
date_time = self._GetDateTime(modification_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
access_time = mft_attribute.get_access_time_as_integer()
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read the access timestamp from MFT attribute: '
'0x{0:08x} with error: {1!s}').format(
exception, mft_attribute.attribute_type))
access_time = None
if access_time is not None:
date_time = self._GetDateTime(access_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
entry_modification_time = (
mft_attribute.get_entry_modification_time_as_integer())
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read the entry modification timestamp from MFT '
'attribute: 0x{0:08x} with error: {1!s}').format(
mft_attribute.attribute_type, exception))
entry_modification_time = None
if entry_modification_time is not None:
date_time = self._GetDateTime(entry_modification_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
elif mft_attribute.attribute_type == self._MFT_ATTRIBUTE_OBJECT_ID:
display_name = '$MFT: {0:d}-{1:d}'.format(
mft_entry.file_reference & 0xffffffffffff,
mft_entry.file_reference >> 48)
if mft_attribute.droid_file_identifier:
try:
self._ParseDistributedTrackingIdentifier(
parser_mediator, mft_attribute.droid_file_identifier,
display_name)
except (TypeError, ValueError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read droid file identifier from attribute: 0x{0:08x} '
'with error: {1!s}').format(
mft_attribute.attribute_type, exception))
if mft_attribute.birth_droid_file_identifier:
try:
self._ParseDistributedTrackingIdentifier(
parser_mediator, mft_attribute.droid_file_identifier,
display_name)
except (TypeError, ValueError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read birth droid file identifier from attribute: '
'0x{0:08x} with error: {1!s}').format(
mft_attribute.attribute_type, exception))
|
Extract data from a NFTS $MFT attribute.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
mft_entry (pyfsntfs.file_entry): MFT entry.
mft_attribute (pyfsntfs.attribute): MFT attribute.
|
juraj-google-style
|
def _ParseSourcePathOption(self, options):
self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION)
if not self._source_path:
raise errors.BadConfigOption('Missing source path.')
self._source_path = os.path.abspath(self._source_path)
|
Parses the source path option.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
juraj-google-style
|
def add_outbound_connection(self, uri):
LOGGER.debug("Adding connection to %s", uri)
conn = OutboundConnection(
connections=self._connections,
endpoint=uri,
dispatcher=self._dispatcher,
zmq_identity=self._zmq_identity,
secured=self._secured,
server_public_key=self._server_public_key,
server_private_key=self._server_private_key,
future_callback_threadpool=self._future_callback_threadpool,
heartbeat=True,
connection_timeout=self._connection_timeout)
self.outbound_connections[uri] = conn
conn.start()
self._add_connection(conn, uri)
connect_message = ConnectionRequest(endpoint=self._public_endpoint)
conn.send(
validator_pb2.Message.NETWORK_CONNECT,
connect_message.SerializeToString(),
callback=partial(
self._connect_callback,
connection=conn,
))
return conn
|
Adds an outbound connection to the network.
Args:
uri (str): The zmq-style (e.g. tcp://hostname:port) uri
to attempt to connect to.
|
juraj-google-style
|
def __eq__(self, other):
res = False
if len(self) == len(other):
if np.all(self._z == other.z) and np.all(self._x == other.x):
res = True
return res
|
Return True if all Pauli terms are equal.
Args:
other (Pauli): other pauli
Returns:
bool: are self and other equal.
|
juraj-google-style
|
def imresize(img, size, return_scale=False, interpolation='bilinear'):
(h, w) = img.shape[:2]
resized_img = cv2.resize(img, size, interpolation=interp_codes[interpolation])
if (not return_scale):
return resized_img
else:
w_scale = (size[0] / w)
h_scale = (size[1] / h)
return (resized_img, w_scale, h_scale)
|
Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple): Target (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos".
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
|
codesearchnet
|
def scan_meta_graph_def(meta_graph_def, op_denylist):
ops_in_metagraph = set(meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))
denylisted_ops = op_denylist & ops_in_metagraph
if denylisted_ops:
print('MetaGraph with tag set %s contains the following denylisted ops:' % meta_graph_def.meta_info_def.tags, denylisted_ops)
else:
print('MetaGraph with tag set %s does not contain the default denylisted ops:' % meta_graph_def.meta_info_def.tags, op_denylist)
|
Scans meta_graph_def and reports if there are ops on denylist.
Print ops if they are on denylist, or print success if no denylisted ops
found.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
op_denylist: set of ops to scan for.
|
github-repos
|
def report_error(self, read_tuple_name, error_name, wrong='', message='', warning=False):
if ((not self.report_only_first) or (error_name not in self.reported_errors)):
print('\t'.join([('error' if (warning == False) else 'warning'), read_tuple_name, error_name, wrong, message]))
self.reported_errors.add(error_name)
if warning:
self.warning_has_been_reported = True
else:
self.error_has_been_reported = True
|
Report an error.
Args:
read_tuple_name (): Name of the read tuple.
error_name (): Name of the error.
wrong (str): What is wrong.
message (str): Additional msessage to be printed.
warning (bool): Warning (not an error).
|
codesearchnet
|
def assign_device(cls, core):
return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.MAXIMAL, tile_assignment_dimensions=[1], tile_assignment_devices=[core]))
|
Returns an AssignDevice sharding attribute.
This causes an op to be computed in its entirety only on one core in
the XLA device.
Args:
core: The core to assign this Op to.
|
github-repos
|
def get_url_distribution(self, params=None):
params = params or {}
all_responses = {}
api_name = 'virustotal-url-distribution'
response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution')
self._extract_response_chunks(all_responses, response_chunks, api_name)
return all_responses
|
Retrieves a live feed with the latest URLs submitted to VT.
Args:
resources: a dictionary with name and value for optional arguments
Returns:
A dict with the VT report.
|
juraj-google-style
|
def func_load(code, defaults=None, closure=None, globs=None):
if isinstance(code, (tuple, list)):
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
def dummy_fn():
value
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple((ensure_value_to_cell(_) for _ in closure))
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(code, globs, name=code.co_name, argdefs=defaults, closure=closure)
|
Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
|
github-repos
|
def restore(self, state):
self._clear()
self._parseUserInfo({'labels': state['labels']})
self._parseNodes(state['nodes'])
self._keep_version = state['keep_version']
|
Unserialize saved note data.
Args:
state (dict): Serialized state to load.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.