code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def range(self, start_date=None, stop_date=None, field=lambda x: x.xfer):
assert start_date <= stop_date, \
"Start date must be earlier than end date."
out = Transactions()
for t in self.trans:
date = field(t)
if (start_date is not None) and not (date >= start_date):
continue
if (stop_date is not None) and not (date <= stop_date):
continue
out.append(t)
return out
|
Return a ``Transactions`` object in an inclusive date range.
Args:
start_date: A ``datetime.Date`` object that marks the inclusive
start date for the range.
stop_date: A ``datetime.Date`` object that marks the inclusive end
date for the range.
field: The field to compare start and end dates to. Default is the
``xfer`` field.
Returns:
A ``Transactions`` object.
|
juraj-google-style
|
def set_room_alias(self, room_id, room_alias):
data = {
"room_id": room_id
}
return self._send("PUT", "/directory/room/{}".format(quote(room_alias)),
content=data)
|
Set alias to room id
Args:
room_id (str): The room id.
room_alias (str): The room wanted alias name.
|
juraj-google-style
|
def func2md(self, func, clsname=None, names=None, depth=3):
section = "
if names is None:
names = [func.__name__]
funcname = ", ".join(names)
escfuncname = ", ".join(["`%s`" % funcname if funcname.startswith("_") else funcname for funcname in names])
header = "%s%s" % ("%s." % clsname if clsname else "", escfuncname)
path = self.get_src_path(func)
doc = self.doc2md(func)
args, kwargs = [], []
spec = getargspec(func)
vargsname, kwargsname = spec.varargs, spec.keywords
vargs = list(make_iter(spec.args)) if spec.args else []
defaults = list(make_iter(spec.defaults)) if spec.defaults else []
while vargs:
if vargs and vargs[0] == "self":
args.append(vargs.pop(0))
elif len(vargs) > len(defaults):
args.append(vargs.pop(0))
else:
default = defaults.pop(0)
if isinstance(default, str):
default = "\"%s\"" % default
else:
default = "%s" % str(default)
kwargs.append((vargs.pop(0), default))
if args:
args = ", ".join("%s" % arg for arg in args)
if kwargs:
kwargs = ", ".join("%s=%s" % kwarg for kwarg in kwargs)
if args:
kwargs = ", " + kwargs
if vargsname:
vargsname = "*%s" % vargsname
if args or kwargs:
vargsname = ", " + vargsname
if kwargsname:
kwargsname = "**%s" % kwargsname
if args or kwargs or vargsname:
kwargsname = ", " + kwargsname
_FUNCDEF = "{funcname}({args}{kwargs}{vargs}{vkwargs})"
funcdef = _FUNCDEF.format(funcname=funcname,
args=args or "",
kwargs=kwargs or "",
vargs=vargsname or "",
vkwargs=kwargsname or "")
lmax = 90
if len(funcdef) > lmax:
split = funcdef.split("(", 1)
rest = split[1]
args = rest.split(", ")
funcname = "(".join(split[:1]) + "("
lline = len(funcname)
parts = []
for arg in args:
larg = len(arg)
if larg > lmax - 5:
parts.append(arg)
elif lline + larg > lmax:
parts.append("\\\n " + arg)
lline = 0
else:
parts.append(arg)
lline += len(parts[-1])
funcdef = funcname + ", ".join(parts)
string = FUNC_TEMPLATE.format(section=section,
header=header,
funcdef=funcdef,
path=path,
doc=doc if doc else "*No documentation found.*")
return string
|
Takes a function (or method) and documents it.
Args:
clsname (str, optional): class name to prepend to funcname.
depth (int, optional): number of ### to append to function name
|
juraj-google-style
|
def _ParseFileData(self, knowledge_base, file_object):
text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8')
system_product = text_file_object.readline()
system_product = system_product.strip()
if not knowledge_base.GetValue('operating_system_product'):
if system_product:
knowledge_base.SetValue('operating_system_product', system_product)
|
Parses file content (data) for system product preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_object (dfvfs.FileIO): file-like object that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
juraj-google-style
|
def __init__(self, replica_id_in_sync_group=0, num_replicas_in_sync=1):
self._replica_id_in_sync_group = replica_id_in_sync_group
self._num_replicas_in_sync = num_replicas_in_sync
|
Initializes a ValueContext object.
Args:
replica_id_in_sync_group: the current replica_id, should be an int in
[0,`num_replicas_in_sync`).
num_replicas_in_sync: the number of replicas that are in sync.
|
github-repos
|
def __init__(self, resolver_context):
super(SQLiteBlobFile, self).__init__(resolver_context)
self._blob = None
self._current_offset = 0
self._database_object = None
self._number_of_rows = None
self._size = 0
self._table_name = None
|
Initializes the file-like object.
Args:
resolver_context (Context): resolver context.
|
juraj-google-style
|
def merge_results(inputs, arguments=None):
if arguments is None:
arguments = Arguments()
args = arguments.args
kwargs = arguments.kwargs
for i in inputs:
if isinstance(i.result, dict):
kwargs.update({k: v for k, v in i.result.items() if k not in kwargs})
elif isinstance(i.result, list):
args.extend(i.result)
elif isinstance(i.result, Arguments):
args.extend(i.result.args)
kwargs.update({k: v for k, v in i.result.kwargs.items() if k not in kwargs})
else:
args.append(i.result)
return arguments
|
Merges results to form arguments to run(). There are two cases for each result:
- dictionary: dictionaries get merged and passed as keyword arguments
- list: lists get concatenated to positional arguments
- Arguments: kwargs gets merged and args gets appended
- else: concatenated and passed as postitional arguments
Args:
inputs: the inputs whose results to merge
arguments: an optional existing Arguments object to merge into
|
juraj-google-style
|
def _dispatch_coroutine(self, event, listener, *args, **kwargs):
try:
coro = listener(*args, **kwargs)
except Exception as exc:
if (event == self.LISTENER_ERROR_EVENT):
raise
return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)
asyncio.ensure_future(_try_catch_coro(self, event, listener, coro), loop=self._loop)
|
Schedule a coroutine for execution.
Args:
event (str): The name of the event that triggered this call.
listener (async def): The async def that needs to be executed.
*args: Any number of positional arguments.
**kwargs: Any number of keyword arguments.
The values of *args and **kwargs are passed, unaltered, to the async
def when generating the coro. If there is an exception generating the
coro, such as the wrong number of arguments, the emitter's error event
is triggered. If the triggering event _is_ the emitter's error event
then the exception is reraised. The reraised exception may show in
debug mode for the event loop but is otherwise silently dropped.
|
codesearchnet
|
def diff_linesToChars(self, text1, text2):
lineArray = []
lineHash = {}
lineArray.append('')
def diff_linesToCharsMunge(text):
chars = []
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = text.find('\n', lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
if line in lineHash:
chars.append(chr(lineHash[line]))
else:
if len(lineArray) == maxLines:
line = text[lineStart:]
lineEnd = len(text)
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(chr(len(lineArray) - 1))
lineStart = lineEnd + 1
return "".join(chars)
maxLines = 666666
chars1 = diff_linesToCharsMunge(text1)
maxLines = 1114111
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
|
Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
|
juraj-google-style
|
def get_certificate(self, id):
return Certificate.get_object(api_token=self.token, cert_id=id)
|
Returns a Certificate object by its ID.
Args:
id (str): Certificate ID
|
codesearchnet
|
def decode(self, codes):
return self.pq.decode(codes) @ self.R.T
|
Given PQ-codes, reconstruct original D-dimensional vectors via :func:`PQ.decode`,
and applying an inverse-rotation.
Args:
codes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype.
Each row is a PQ-code
Returns:
np.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32
|
juraj-google-style
|
def create_binary_descriptor(descriptor):
func_names = {0: 'copy_latest_a', 1: 'average_a', 2: 'copy_all_a', 3: 'sum_a', 4: 'copy_count_a', 5: 'trigger_streamer', 6: 'call_rpc', 7: 'subtract_afromb'}
func_codes = {y: x for (x, y) in func_names.items()}
(node, inputs, processing) = parse_node_descriptor(descriptor, DeviceModel())
func_code = func_codes.get(processing)
if (func_code is None):
raise ArgumentError('Unknown processing function', function=processing)
(stream_a, trigger_a) = inputs[0]
stream_a = stream_a.encode()
if (len(inputs) == 2):
(stream_b, trigger_b) = inputs[1]
stream_b = stream_b.encode()
else:
(stream_b, trigger_b) = (65535, None)
if (trigger_a is None):
trigger_a = TrueTrigger()
if (trigger_b is None):
trigger_b = TrueTrigger()
ref_a = 0
if isinstance(trigger_a, InputTrigger):
ref_a = trigger_a.reference
ref_b = 0
if isinstance(trigger_b, InputTrigger):
ref_b = trigger_b.reference
trigger_a = _create_binary_trigger(trigger_a)
trigger_b = _create_binary_trigger(trigger_b)
combiner = node.trigger_combiner
bin_desc = struct.pack('<LLHHHBBBB2x', ref_a, ref_b, node.stream.encode(), stream_a, stream_b, func_code, trigger_a, trigger_b, combiner)
return bin_desc
|
Convert a string node descriptor into a 20-byte binary descriptor.
This is the inverse operation of parse_binary_descriptor and composing
the two operations is a noop.
Args:
descriptor (str): A string node descriptor
Returns:
bytes: A 20-byte binary node descriptor.
|
codesearchnet
|
def transform_ast(self, node, ctx):
raise NotImplementedError('subclasses must override this')
|
Performs an actual transformation of a function's AST.
Subclasses must implement this method, and do not usually call it.
Args:
node: One or more ast.AST nodes representing the AST to be transformed.
ctx: transformer.Context.
|
github-repos
|
def loss(probs, labels):
diff = -labels * tf.math.log(probs)
loss = tf.reduce_mean(diff)
return loss
|
Calculates cross entropy loss.
Args:
probs: Class probabilities predicted by the model. The shape is expected
to be (?, 10).
labels: Truth labels for the classes, as one-hot encoded vectors. The
shape is expected to be the same as `probs`.
Returns:
A scalar loss tensor.
|
github-repos
|
def put(self, block_id, priority, pb_type='offline'):
if pb_type not in ('offline', 'realtime'):
raise ValueError('Invalid PB type.')
with self._mutex:
added_time = datetime.datetime.utcnow().isoformat()
entry = (priority, sys.maxsize-self._index, block_id, pb_type,
added_time)
self._index += 1
if self._block_map.get(block_id) is not None:
raise KeyError('ERROR: Block id "{}" already exists in '
'PC PB queue!'.
format(block_id))
self._block_map[block_id] = entry
LOG.debug("Adding PB %s to queue", block_id)
self._queue.append(entry)
self._queue.sort()
self._queue.reverse()
|
Add a Processing Block to the queue.
When a new entry it added, the queue is (re-)sorted by priority
followed by insertion order (older blocks with equal priority are
first).
Args:
block_id (str): Processing Block Identifier
priority (int): Processing Block scheduling priority
(higher values = higher priority)
pb_type (str): Processing Block type (offline, realtime)
|
juraj-google-style
|
def create(options, timer=None, use_deque=True):
if (options is None):
return None
if (not isinstance(options, (CheckOptions, QuotaOptions, ReportOptions))):
_logger.error(u'make_cache(): bad options %s', options)
raise ValueError(u'Invalid options')
if (options.num_entries <= 0):
_logger.debug(u'did not create cache, options was %s', options)
return None
_logger.debug(u'creating a cache from %s', options)
if (options.flush_interval > ZERO_INTERVAL):
ttl = getattr(options, u'expiration', options.flush_interval)
cache_cls = (DequeOutTTLCache if use_deque else cachetools.TTLCache)
return LockedObject(cache_cls(options.num_entries, ttl=ttl.total_seconds(), timer=to_cache_timer(timer)))
cache_cls = (DequeOutLRUCache if use_deque else cachetools.LRUCache)
return LockedObject(cache_cls(options.num_entries))
|
Create a cache specified by ``options``
``options`` is an instance of either
:class:`endpoints_management.control.caches.CheckOptions` or
:class:`endpoints_management.control.caches.ReportOptions`
The returned cache is wrapped in a :class:`LockedObject`, requiring it to
be accessed in a with statement that gives synchronized access
Example:
>>> options = CheckOptions()
>>> synced_cache = make_cache(options)
>>> with synced_cache as cache: # acquire the lock
... cache['a_key'] = 'a_value'
Args:
options (object): an instance of either of the options classes
Returns:
:class:`cachetools.Cache`: the cache implementation specified by options
or None: if options is ``None`` or if options.num_entries < 0
Raises:
ValueError: if options is not a support type
|
codesearchnet
|
def last_updated(self, path):
try:
return s3io.S3IO(options=self._options).last_updated(path)
except Exception as e:
raise BeamIOError('last_updated operation failed', {path: e})
|
Get UNIX Epoch time in seconds on the FileSystem.
Args:
path: string path of file.
Returns: float UNIX Epoch time
Raises:
``BeamIOError``: if path doesn't exist.
|
github-repos
|
def resize(self, video: 'torch.Tensor', size: SizeDict, interpolation: 'F.InterpolationMode'=None, antialias: bool=True, **kwargs) -> 'torch.Tensor':
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if interpolation == F.InterpolationMode.LANCZOS:
logger.warning_once('You have used fast image processor with LANCZOS resample which not yet supported for torch.Tensor. BICUBIC resample will be used as an alternative. Please fall back to image processor if you want full consistency with the original model.')
interpolation = F.InterpolationMode.BICUBIC
if size.longest_edge:
new_size = get_resize_output_image_size(video, resolution_max_side=size.longest_edge)
elif size.height and size.width:
new_size = (size.height, size.width)
else:
raise ValueError(f"Size must contain 'height' and 'width' keys, or 'longest_edge' key. Got {size}.")
video = F.resize(video, new_size, interpolation=interpolation, antialias=antialias)
max_size = (self.max_image_size['longest_edge'], self.max_image_size['longest_edge'])
video = F.resize(video, max_size, interpolation=interpolation, antialias=antialias)
return video
|
Resize an video to `(size["height"], size["width"])`.
Args:
video (`torch.Tensor`):
Video to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output video.
resample (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the video e.g. `InterpolationMode.BICUBIC`.
Returns:
`torch.Tensor`: The resized video.
|
github-repos
|
def get_content_metadata(self, enterprise_customer):
content_metadata = OrderedDict()
if enterprise_customer.catalog:
response = self._load_data(self.ENTERPRISE_CUSTOMER_ENDPOINT, detail_resource='courses', resource_id=str(enterprise_customer.uuid), traverse_pagination=True)
for course in response['results']:
for course_run in course['course_runs']:
course_run['content_type'] = 'courserun'
content_metadata[course_run['key']] = course_run
for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all():
response = self._load_data(self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT, resource_id=str(enterprise_customer_catalog.uuid), traverse_pagination=True, querystring={'page_size': 1000})
for item in response['results']:
content_id = utils.get_content_metadata_item_id(item)
content_metadata[content_id] = item
return content_metadata.values()
|
Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.
Arguments:
enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.
Returns:
list: List of dicts containing content metadata.
|
codesearchnet
|
def exists(self, path):
try:
return s3io.S3IO(options=self._options).exists(path)
except Exception as e:
raise BeamIOError('exists() operation failed', {path: e})
|
Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
|
github-repos
|
def get_cmd_handler(self, cmd):
cmd = cmd.replace('-', '_')
handler = getattr(self, cmd, None)
if not handler:
raise BuildException(
'Command {} is not supported as a '
'build command'.format(cmd)
)
return handler
|
Return an handler for cmd.
The handler and the command should have the same name.
See class description for more info about handlers.
Args:
cmd (str): The name of the command
Returns:
callable: which handles cmd
Raises:
lago.build.BuildException: If an handler for cmd doesn't exist
|
juraj-google-style
|
def __getitem__(self, key):
if not isinstance(key, tuple) or len(key) != 2:
raise IndexError('Invalid index: {0}'.format(key))
return self._items.get(key, self._default_value)
|
Returns element of the matrix indexed by given key.
Args:
key: tuple of (row_idx, column_idx)
Returns:
Element of the matrix
Raises:
IndexError: if key is invalid.
|
juraj-google-style
|
def ResolvePrefix(self, subject, attribute_prefix, timestamp=None, limit=None):
for (_, values) in self.MultiResolvePrefix([subject], attribute_prefix, timestamp=timestamp, limit=limit):
values.sort(key=(lambda a: a[0]))
return values
return []
|
Retrieve a set of value matching for this subject's attribute.
Args:
subject: The subject that we will search.
attribute_prefix: The attribute prefix.
timestamp: A range of times for consideration (In microseconds). Can be a
constant such as ALL_TIMESTAMPS or NEWEST_TIMESTAMP or a tuple of ints
(start, end).
limit: The number of results to fetch.
Returns:
A list of (attribute, value string, timestamp).
Values with the same attribute (happens when timestamp is not
NEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed
to be ordered in the decreasing timestamp order.
Raises:
AccessError: if anything goes wrong.
|
codesearchnet
|
def delete_ldap_group_link(self, cn, provider=None, **kwargs):
path = ('/groups/%s/ldap_group_links' % self.get_id())
if (provider is not None):
path += ('/%s' % provider)
path += ('/%s' % cn)
self.manager.gitlab.http_delete(path)
|
Delete an LDAP group link.
Args:
cn (str): CN of the LDAP group
provider (str): LDAP provider for the LDAP group
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
|
codesearchnet
|
def write(self, *pb2_obj):
base = len(self._write_buff)
for (idx, obj) in enumerate(pb2_obj):
if ((self._buffer_size > 0) and ((idx + base) != 0) and (((idx + base) % self._buffer_size) == 0)):
self.flush()
self._write_buff.append(obj)
if (self._buffer_size == 0):
self.flush()
|
Write a group of one or more protobuf objects to the file. Multiple
object groups can be written by calling this method several times
before closing stream or exiting the runtime context.
The input protobuf objects get buffered and will be written down when
the number of buffered objects exceed the `self._buffer_size`.
Args:
pb2_obj (*protobuf.message.Message): list of protobuf messages.
|
codesearchnet
|
def freeze_to_tar(script_path, freeze_fn, extra_files=None):
if (not extra_files):
extra_files = []
freeze_dir = tempfile.mkdtemp()
try:
cmds = freeze(script_path, target_dir=freeze_dir)
if freeze_fn.endswith('.tar.gz'):
mode = 'w|gz'
elif freeze_fn.endswith('.tar'):
mode = 'w'
else:
raise NameError(('[%s] must end in .tar or .tar.gz' % freeze_fn))
fp = tarfile.open(freeze_fn, mode)
proj_name = os.path.basename(script_path)
proj_name = proj_name[:proj_name.rfind('.')]
for x in (glob.glob(('%s/dist/%s/*' % (freeze_dir, proj_name))) + extra_files):
fp.add(x, arcname=os.path.basename(x))
fp.close()
finally:
shutil.rmtree(freeze_dir)
return cmds
|
Freezes a script to a .tar or .tar.gz file
The script contains all of the files at the root of the tar
Args:
script_path: Path to python script to be frozen.
freeze_fn: Tar filename (must end in .tar or .tar.gz)
extra_files: List of paths to add to the tar (default is None)
Returns:
List of freeze commands ran
Raises:
subprocess.CalledProcessError: freeze error.
OSError: freeze not found.
NameError: Tar must end in .tar or .tar.gz
|
codesearchnet
|
def _get_ip_unnumbered(self, unnumbered_type, unnumbered_name):
unnumbered_type = self._callback(unnumbered_type, handler='get_config')
unnumbered_name = self._callback(unnumbered_name, handler='get_config')
unnumbered_type = pynos.utilities.return_xml(str(unnumbered_type))
unnumbered_name = pynos.utilities.return_xml(str(unnumbered_name))
return pynos.utilities.merge_xml(unnumbered_type, unnumbered_name)
|
Get and merge the `ip unnumbered` config from an interface.
You should not use this method.
You probably want `Interface.ip_unnumbered`.
Args:
unnumbered_type: XML document with the XML to get the donor type.
unnumbered_name: XML document with the XML to get the donor name.
Returns:
Merged XML document.
Raises:
None
|
juraj-google-style
|
def is_finite_number(value):
if not isinstance(value, (numbers.Integral, float)):
return False
if isinstance(value, bool):
return False
if isinstance(value, float):
if math.isnan(value) or math.isinf(value):
return False
if abs(value) > (2**53):
return False
return True
|
Validates if the given value is a number, enforces
absolute limit of 2^53 and restricts NAN, INF, -INF.
Args:
value: Value to be validated.
Returns:
Boolean: True if value is a number and not NAN, INF, -INF or
greater than absolute limit of 2^53 else False.
|
juraj-google-style
|
def random_init_mapping(candidate_mapping):
random.seed()
matched_dict = {}
result = []
for c in candidate_mapping:
candidates = list(c)
if not candidates:
result.append(-1)
continue
found = False
while candidates:
rid = random.randint(0, len(candidates) - 1)
candidate = candidates[rid]
if candidate in matched_dict:
candidates.pop(rid)
else:
matched_dict[candidate] = 1
result.append(candidate)
found = True
break
if not found:
result.append(-1)
return result
|
Generate a random node mapping.
Args:
candidate_mapping: candidate_mapping: candidate node match list
Returns:
randomly-generated node mapping between two AMRs
|
juraj-google-style
|
def init_from_acceptor_bycopying(self, acceptor):
for state in acceptor.states:
for arc in state.arcs:
self.add_arc(state.stateid, arc.nextstate, acceptor.isyms.find(arc.ilabel))
if state.final:
print state.stateid,' is final'
self[state.stateid].final = True;
|
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
|
juraj-google-style
|
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(GetRequestPayload, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)
self._unique_identifier.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.KEY_FORMAT_TYPE, local_stream):
self._key_format_type = primitives.Enumeration(enum=enums.KeyFormatType, tag=enums.Tags.KEY_FORMAT_TYPE)
self._key_format_type.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.KEY_COMPRESSION_TYPE, local_stream):
self._key_compression_type = primitives.Enumeration(enum=enums.KeyCompressionType, tag=enums.Tags.KEY_COMPRESSION_TYPE)
self._key_compression_type.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.KEY_WRAPPING_SPECIFICATION, local_stream):
self._key_wrapping_specification = objects.KeyWrappingSpecification()
self._key_wrapping_specification.read(local_stream, kmip_version=kmip_version)
self.is_oversized(local_stream)
|
Read the data encoding the Get request payload and decode it into its
constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
|
codesearchnet
|
def list_instances(i_info, param_str, numbered=False):
print(param_str)
for i in i_info:
if numbered:
print('Instance {}
print(' {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}'.format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2))
print(' AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}'.format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame']))
list_tags(i_info[i]['tag'])
debg.dprintx('All Data')
debg.dprintx(i_info, True)
|
Display a list of all instances and their details.
Iterates through all the instances in the dict, and displays
information for each instance.
Args:
i_info (dict): information on instances and details.
param_str (str): the title to display before the list.
numbered (bool): optional - indicates wheter the list should be
displayed with numbers before each instance.
This is used when called from user_picklist.
|
codesearchnet
|
def bool(name, default=None, allow_none=False, fallback=None):
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.bool):
return value
elif isinstance(value, builtins.int):
return True if value > 0 else False
elif value is None and allow_none:
return None
else:
value_str = builtins.str(value).lower().strip()
return _strtobool(value_str)
|
Get a boolean based environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
|
juraj-google-style
|
def calc_intent(self, query):
matches = self.calc_intents(query)
if len(matches) == 0:
return MatchData('', '')
best_match = max(matches, key=lambda x: x.conf)
best_matches = (match for match in matches if match.conf == best_match.conf)
return min(best_matches, key=lambda x: sum(map(len, x.matches.values())))
|
Tests all the intents against the query and returns
match data of the best intent
Args:
query (str): Input sentence to test against intents
Returns:
MatchData: Best intent match
|
juraj-google-style
|
def set_permitted_ip(address=None, deploy=False):
if (not address):
raise CommandExecutionError('Address option must not be empty.')
ret = {}
query = {'type': 'config', 'action': 'set', 'xpath': "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/permitted-ip", 'element': "<entry name='{0}'></entry>".format(address)}
ret.update(__proxy__['panos.call'](query))
if (deploy is True):
ret.update(commit())
return ret
|
Add an IPv4 address or network to the permitted IP list.
CLI Example:
Args:
address (str): The IPv4 address or network to allow access to add to the Palo Alto device.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_permitted_ip 10.0.0.1
salt '*' panos.set_permitted_ip 10.0.0.0/24
salt '*' panos.set_permitted_ip 10.0.0.1 deploy=True
|
codesearchnet
|
def unpack_binary(self, offset, length=False):
if not length:
return bytes("".encode("ascii"))
o = self._offset + offset
try:
return bytes(struct.unpack_from("<{}s".format(length), self._buf, o)[0])
except struct.error:
raise OverrunBufferException(o, len(self._buf))
|
Returns raw binary data from the relative offset with the given length.
Arguments:
- `offset`: The relative offset from the start of the block.
- `length`: The length of the binary blob. If zero, the empty string
zero length is returned.
Throws:
- `OverrunBufferException`
|
juraj-google-style
|
def _get_what_to_read_next(fp, previously_read_position, chunk_size):
seek_position = max(previously_read_position - chunk_size, 0)
read_size = chunk_size
while seek_position > 0:
fp.seek(seek_position)
if _is_partially_read_new_line(fp.read(1)):
seek_position -= 1
read_size += 1
else:
break
read_size = min(previously_read_position - seek_position, read_size)
return seek_position, read_size
|
Return information on which file pointer position to read from and how many bytes.
Args:
fp
past_read_positon (int): The file pointer position that has been read previously
chunk_size(int): ideal io chunk_size
Returns:
(int, int): The next seek position, how many bytes to read next
|
juraj-google-style
|
def AssertDictType(dct, expected_key_type, expected_value_type):
AssertType(dct, dict)
for (key, value) in iteritems(dct):
AssertType(key, expected_key_type)
AssertType(value, expected_value_type)
|
Ensures that given dictionary is actually a dictionary of specified type.
Args:
dct: A dictionary to assert the type for.
expected_key_type: An expected type for dictionary keys.
expected_value_type: An expected type for dictionary values.
Raises:
TypeError: If given dictionary is not really a dictionary or not all its
keys and values have the expected type.
|
codesearchnet
|
def is40(msg):
if allzeros(msg):
return False
d = hex2bin(data(msg))
if wrongstatus(d, 1, 2, 13):
return False
if wrongstatus(d, 14, 15, 26):
return False
if wrongstatus(d, 27, 28, 39):
return False
if wrongstatus(d, 48, 49, 51):
return False
if wrongstatus(d, 54, 55, 56):
return False
if (bin2int(d[39:47]) != 0):
return False
if (bin2int(d[51:53]) != 0):
return False
return True
|
Check if a message is likely to be BDS code 4,0
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False
|
codesearchnet
|
def join(self, basepath, *paths):
if not basepath.startswith(GCSFileSystem.GCS_PREFIX):
raise ValueError('Basepath %r must be GCS path.' % basepath)
path = basepath
for p in paths:
path = path.rstrip('/') + '/' + p.lstrip('/')
return path
|
Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all the passed components
|
github-repos
|
def to_microseconds(value):
if not value.tzinfo:
value = value.replace(tzinfo=pytz.utc)
value = value.astimezone(pytz.utc)
return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond
|
Convert a datetime to microseconds since the unix epoch.
Args:
value (datetime.datetime): The datetime to covert.
Returns:
int: Microseconds since the unix epoch.
|
juraj-google-style
|
def edge_length_sum(self, terminal=True, internal=True):
if (not isinstance(terminal, bool)):
raise TypeError('leaves must be a bool')
if (not isinstance(internal, bool)):
raise TypeError('internal must be a bool')
return sum((node.edge_length for node in self.traverse_preorder() if ((node.edge_length is not None) and ((terminal and node.is_leaf()) or (internal and (not node.is_leaf()))))))
|
Compute the sum of all selected edge lengths in this ``Tree``
Args:
``terminal`` (``bool``): ``True`` to include terminal branches, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal branches, otherwise ``False``
Returns:
``float``: Sum of all selected edge lengths in this ``Tree``
|
codesearchnet
|
def maybe_download_from_drive(directory, filename, url):
if (not tf.gfile.Exists(directory)):
tf.logging.info(('Creating directory %s' % directory))
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
confirm_token = None
if tf.gfile.Exists(filepath):
tf.logging.info(('Not downloading, file already found: %s' % filepath))
return filepath
confirm_token = None
session = requests.Session()
response = session.get(url, stream=True)
for (k, v) in response.cookies.items():
if k.startswith('download_warning'):
confirm_token = v
if confirm_token:
url = ((url + '&confirm=') + confirm_token)
tf.logging.info(('Downloading %s to %s' % (url, filepath)))
response = session.get(url, stream=True)
chunk_size = (16 * 1024)
with open(filepath, 'wb') as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
print()
statinfo = os.stat(filepath)
tf.logging.info(('Successfully downloaded %s, %s bytes.' % (filename, statinfo.st_size)))
return filepath
|
Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file.
|
codesearchnet
|
def check_python_requirements(path_or_repo_id, requirements_file='requirements.txt', **kwargs):
failed = []
try:
requirements = cached_file(path_or_repo_id=path_or_repo_id, filename=requirements_file, **kwargs)
with open(requirements, 'r') as f:
requirements = f.readlines()
for requirement in requirements:
requirement = requirement.strip()
if not requirement or requirement.startswith('
continue
try:
package_name, delimiter, version_number = split_package_version(requirement)
except ValueError:
package_name = requirement
delimiter, version_number = (None, None)
try:
local_package_version = importlib.metadata.version(package_name)
except importlib.metadata.PackageNotFoundError:
failed.append(f'{requirement} (installed: None)')
continue
if delimiter is not None and version_number is not None:
is_satisfied = VersionComparison.from_string(delimiter)(version.parse(local_package_version), version.parse(version_number))
else:
is_satisfied = True
if not is_satisfied:
failed.append(f'{requirement} (installed: {local_package_version})')
except OSError:
pass
if failed:
raise ImportError(f'Missing requirements in your local environment for `{path_or_repo_id}`:\n' + '\n'.join(failed))
|
Tries to locate `requirements_file` in a local folder or repo, and confirms that the environment has all the
python dependencies installed.
Args:
path_or_repo_id (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a model repo on huggingface.co.
- a path to a *directory* potentially containing the file.
kwargs (`Dict[str, Any]`, *optional*):
Additional arguments to pass to `cached_file`.
|
github-repos
|
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE):
logging.info(u'Downloading: %s to: %s', file_obj.urn, target_path)
target_file = open(target_path, 'wb')
file_obj.Seek(0)
count = 0
data_buffer = file_obj.Read(buffer_size)
while data_buffer:
target_file.write(data_buffer)
data_buffer = file_obj.Read(buffer_size)
count += 1
if (not (count % 3)):
logging.debug(u'Downloading: %s: %s done', file_obj.urn, utils.FormatNumberAsString((count * buffer_size)))
target_file.close()
|
Download an aff4 file to the local filesystem overwriting it if it exists.
Args:
file_obj: An aff4 object that supports the file interface (Read, Seek)
target_path: Full path of file to write to.
buffer_size: Read in chunks this size.
|
codesearchnet
|
def VerifyRow(self, parser_mediator, row):
if len(row) != 8:
return False
row_bytes = codecs.encode(row['date'], parser_mediator.codepage)
if row_bytes.startswith(b'\xef\xbb\xbf'):
row['date'] = row['date'][3:]
self._encoding = 'utf-8'
try:
timestamp = self._ConvertToTimestamp(
row['date'], row['time'], parser_mediator.timezone)
except errors.TimestampError:
return False
if timestamp is None:
return False
if (not 'Access Protection' in row['status'] and
not 'Would be blocked' in row['status']):
return False
return True
|
Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
|
juraj-google-style
|
def __parameter_default(self, field):
if field.default:
if isinstance(field, messages.EnumField):
return field.default.name
elif isinstance(field, messages.BooleanField):
return 'true' if field.default else 'false'
else:
return str(field.default)
|
Returns default value of field if it has one.
Args:
field: A simple field.
Returns:
The default value of the field, if any exists, with the exception of an
enum field, which will have its value cast to a string.
|
juraj-google-style
|
def snakecase(string):
string = re.sub('[\\-\\.\\s]', '_', str(string))
if (not string):
return string
return (lowercase(string[0]) + re.sub('[A-Z]', (lambda matched: ('_' + lowercase(matched.group(0)))), string[1:]))
|
Convert string into snake case.
Join punctuation with underscore
Args:
string: String to convert.
Returns:
string: Snake cased string.
|
codesearchnet
|
def merge_strings_files(old_strings_file, new_strings_file):
old_localizable_dict = generate_localization_key_to_entry_dictionary_from_file(old_strings_file)
output_file_elements = []
f = open_strings_file(new_strings_file, 'r+')
for (header_comment, comments, key, value) in extract_header_comment_key_value_tuples_from_file(f):
if (len(header_comment) > 0):
output_file_elements.append(Comment(header_comment))
localize_value = value
if (key in old_localizable_dict):
localize_value = old_localizable_dict[key].value
output_file_elements.append(LocalizationEntry(comments, key, localize_value))
f.close()
write_file_elements_to_strings_file(old_strings_file, output_file_elements)
|
Merges the old strings file with the new one.
Args:
old_strings_file (str): The path to the old strings file (previously produced, and possibly altered)
new_strings_file (str): The path to the new strings file (newly produced).
|
codesearchnet
|
def contains(self, key):
path = self.object_path(key)
return (os.path.exists(path) and os.path.isfile(path))
|
Returns whether the object named by `key` exists.
Optimized to only check whether the file object exists.
Args:
key: Key naming the object to check.
Returns:
boalean whether the object exists
|
codesearchnet
|
def scalarize(function):
def decorated(self, X, *args, **kwargs):
scalar = (not isinstance(X, np.ndarray))
if scalar:
X = np.array([X])
result = function(self, X, *args, **kwargs)
if scalar:
result = result[0]
return result
decorated.__doc__ = function.__doc__
return decorated
|
Allow methods that only accepts 1-d vectors to work with scalars.
Args:
function(callable): Function that accepts and returns vectors.
Returns:
callable: Decorated function that accepts and returns scalars.
|
codesearchnet
|
def __init__(self, start, stop, value):
self.start = start
self.stop = stop
self.value = value
|
Create a mapped range.
Args:
start: The start of the range, inclusive.
stop: The end of the range, exclusive.
value: The mapped value.
|
juraj-google-style
|
def learn_transportation_mode(track, clf):
for segment in track.segments:
tmodes = segment.transportation_modes
points = segment.points
features = []
labels = []
for tmode in tmodes:
points_part = points[tmode['from']:tmode['to']]
if (len(points_part) > 0):
features.append(extract_features_2(points_part))
labels.append(tmode['label'])
clf.learn(features, labels)
|
Inserts transportation modes of a track into a classifier
Args:
track (:obj:`Track`)
clf (:obj:`Classifier`)
|
codesearchnet
|
def cyclegan_upsample(net, num_outputs, stride, method='conv2d_transpose'):
with tf.variable_scope('upconv'):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if (method == 'nn_upsample_conv'):
net = tf.image.resize_nearest_neighbor(net, [(stride[0] * height), (stride[1] * width)])
net = tf.pad(net, spatial_pad_1, 'REFLECT')
net = layers().Conv2D(num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif (method == 'bilinear_upsample_conv'):
net = tf.image.resize_bilinear(net, [(stride[0] * height), (stride[1] * width)])
net = tf.pad(net, spatial_pad_1, 'REFLECT')
net = layers().Conv2D(num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif (method == 'conv2d_transpose'):
net = layers().Conv2DTranspose(num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net)
net = net[(:, 1:, 1:, :)]
else:
raise ValueError(('Unknown method: [%s]' % method))
return net
|
Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
|
codesearchnet
|
def register(self, name):
def register_func(func):
self.store[name] = func
return func
return register_func
|
Decorator for registering a function with PyPhi.
Args:
name (string): The name of the function
|
juraj-google-style
|
def set_approvers(self, approver_ids=[], approver_group_ids=[], **kwargs):
path = '%s/%s/approvers' % (self._parent.manager.path,
self._parent.get_id())
data = {'approver_ids': approver_ids,
'approver_group_ids': approver_group_ids}
self.gitlab.http_put(path, post_data=data, **kwargs)
|
Change MR-level allowed approvers and approver groups.
Args:
approver_ids (list): User IDs that can approve MRs
approver_group_ids (list): Group IDs whose members can approve MRs
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server failed to perform the request
|
juraj-google-style
|
def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=None):
lines = ((['
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if (file_extension in GetHeaderExtensions()):
CheckForHeaderGuard(filename, clean_lines, error)
for line in range(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
|
Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
|
codesearchnet
|
def ask_to_proceed_with_overwrite(filepath):
overwrite = input(f'[WARNING] {filepath} already exists - overwrite? [y/n]').strip().lower()
while overwrite not in ('y', 'n'):
overwrite = input('Enter "y" (overwrite) or "n" (cancel).').strip().lower()
if overwrite == 'n':
return False
print_msg('[TIP] Next time specify overwrite=True!')
return True
|
Produces a prompt asking about overwriting a file.
Args:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
|
github-repos
|
def _get_bit(self, n, hash_bytes):
if (((hash_bytes[(n
return True
return False
|
Determines if the n-th bit of passed bytes is 1 or 0.
Arguments:
hash_bytes - List of hash byte values for which the n-th bit value
should be checked. Each element of the list should be an integer from
0 to 255.
Returns:
True if the bit is 1. False if the bit is 0.
|
codesearchnet
|
def dumps(messages):
serialized_messages = []
try:
for message in messages:
message_dict = message._dump()
serialized_messages.append(message_dict)
except AttributeError:
_log.error('Improper object for messages serialization.')
raise TypeError('Message have to be instance of Message class or subclass.')
return json.dumps(serialized_messages, sort_keys=True)
|
Serialize messages to a JSON formatted str
Args:
messages (list): The list of messages to serialize. Each message in
the messages is subclass of Messge.
Returns:
str: Serialized messages.
Raises:
TypeError: If at least one message is not instance of Message class or subclass.
|
codesearchnet
|
def gradient(poly):
return differential(poly, chaospy.poly.collection.basis(1, 1, poly.dim))
|
Gradient of a polynomial.
Args:
poly (Poly) : polynomial to take gradient of.
Returns:
(Poly) : The resulting gradient.
Examples:
>>> q0, q1, q2 = chaospy.variable(3)
>>> poly = 2*q0 + q1*q2
>>> print(chaospy.gradient(poly))
[2, q2, q1]
|
juraj-google-style
|
def map_seqprop_resnums_to_seqprop_resnums(self, resnums, seqprop1, seqprop2):
resnums = ssbio.utils.force_list(resnums)
alignment = self._get_seqprop_to_seqprop_alignment(seqprop1=seqprop1, seqprop2=seqprop2)
mapped = ssbio.protein.sequence.utils.alignment.map_resnum_a_to_resnum_b(resnums=resnums, a_aln=alignment[0], b_aln=alignment[1])
return mapped
|
Map a residue number in any SeqProp to another SeqProp using the pairwise alignment information.
Args:
resnums (int, list): Residue numbers in seqprop1
seqprop1 (SeqProp): SeqProp object the resnums match to
seqprop2 (SeqProp): SeqProp object you want to map the resnums to
Returns:
dict: Mapping of seqprop1 residue numbers to seqprop2 residue numbers. If mappings don't exist in this
dictionary, that means the residue number cannot be mapped according to alignment!
|
codesearchnet
|
def add_import(self, from_package, import_list):
if from_package:
for item in import_list:
t = self.module_info.process_from_import(from_package, item)
self.type_map[t.new_name] = t.pytd_node
if isinstance(item, tuple) or from_package != 'typing' or self.module_info.module_name == 'protocols':
self.aliases[t.new_name] = t.pytd_alias()
if t.new_name != 'typing':
self.module_path_map[t.new_name] = t.qualified_name
else:
for item in import_list:
t = self.module_info.process_import(item)
if t:
self.aliases[t.new_name] = t.pytd_alias()
|
Add an import.
Args:
from_package: A dotted package name if this is a "from" statement, or None
if it is an "import" statement.
import_list: A list of imported items, which are either strings or pairs
of strings. Pairs are used when items are renamed during import using
"as".
|
github-repos
|
def configure(access_key=None, secret_key=None, logger=None):
if not logger:
logger = log.get_logger('s3')
if not all([access_key, secret_key]):
logger.info('')
access_key = input('AWS Access Key: ')
secret_key = input('AWS Secret Key: ')
_write_config(access_key, secret_key)
logger.info('')
logger.info('Completed writing S3 config file.')
logger.info('')
|
Configures s3cmd prior to first use.
If no arguments are provided, you will be prompted to enter
the access key and secret key interactively.
Args:
access_key (str): AWS access key
secret_key (str): AWS secret key
|
juraj-google-style
|
def _debug_run_and_get_dump(self, sess, fetches, feed_dict=None, debug_ops='DebugIdentity', tolerate_debug_op_creation_failures=False, global_step=-1, validate=True, expected_partition_graph_count=None):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options, sess.graph, debug_ops=debug_ops, debug_urls=self._debug_urls(), tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures, global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count, len(run_metadata.partition_graphs))
return (run_output, debug_data.DebugDumpDir(self._dump_root, partition_graphs=run_metadata.partition_graphs, validate=validate))
|
Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.compat.v1.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
|
github-repos
|
def verify(self, verify_locations: str) -> None:
with open(verify_locations):
pass
try:
self._ocsp_response.basic_verify(verify_locations)
except _nassl.OpenSSLError as e:
if ('certificate verify error' in str(e)):
raise OcspResponseNotTrustedError(verify_locations)
raise
|
Verify that the OCSP response is trusted.
Args:
verify_locations: The file path to a trust store containing pem-formatted certificates, to be used for
validating the OCSP response.
Raises OcspResponseNotTrustedError if the validation failed ie. the OCSP response is not trusted.
|
codesearchnet
|
def _generate_multielement_entries(self, entries, forced_include=None, nproc=None):
N = len(self._elt_comp)
total_comp = Composition(self._elt_comp)
forced_include = (forced_include or [])
entry_combos = [itertools.combinations(entries, ((j + 1) - len(forced_include))) for j in range(N)]
entry_combos = itertools.chain.from_iterable(entry_combos)
if forced_include:
entry_combos = [(forced_include + list(ec)) for ec in entry_combos]
entry_combos = filter((lambda x: (total_comp < MultiEntry(x).composition)), entry_combos)
processed_entries = []
total = sum([comb(len(entries), ((j + 1) - len(forced_include))) for j in range(N)])
if (total > 1000000.0):
warnings.warn('Your pourbaix diagram includes {} entries and may take a long time to generate.'.format(total))
if (nproc is not None):
f = partial(self.process_multientry, prod_comp=total_comp)
with Pool(nproc) as p:
processed_entries = list(tqdm(p.imap(f, entry_combos), total=total))
processed_entries = list(filter(bool, processed_entries))
else:
for entry_combo in entry_combos:
processed_entry = self.process_multientry(entry_combo, total_comp)
if (processed_entry is not None):
processed_entries.append(processed_entry)
return processed_entries
|
Create entries for multi-element Pourbaix construction.
This works by finding all possible linear combinations
of entries that can result in the specified composition
from the initialized comp_dict.
Args:
entries ([PourbaixEntries]): list of pourbaix entries
to process into MultiEntries
forced_include ([PourbaixEntries]) list of pourbaix entries
that must be included in multielement entries
nproc (int): number of processes to be used in parallel
treatment of entry combos
|
codesearchnet
|
def _compute_elemwise_op_output_shape(self, shape1, shape2):
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[:-len(shape2)])
for i, j in zip(shape1[-len(shape2):], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError(f'Inputs have incompatible shapes. Received shapes {shape1} and {shape2}')
output_shape.append(i)
return tuple(output_shape)
|
Computes the shape of the resultant of an elementwise operation.
Args:
shape1: Tuple or None. Shape of the first tensor
shape2: Tuple or None. Shape of the second tensor
Returns:
Expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: If shape1 and shape2 are not compatible for
element-wise operations.
|
github-repos
|
def set_shard_dimension(self, shard_dimension):
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError("Can't set shard dimension to %d since it has been frozen to use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
|
Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
|
github-repos
|
def _parse_expiry(response_data):
expires_in = response_data.get('expires_in', None)
if (expires_in is not None):
return (_helpers.utcnow() + datetime.timedelta(seconds=expires_in))
else:
return None
|
Parses the expiry field from a response into a datetime.
Args:
response_data (Mapping): The JSON-parsed response data.
Returns:
Optional[datetime]: The expiration or ``None`` if no expiration was
specified.
|
codesearchnet
|
def fit(self, documents, labels, weights=None):
block_groups = np.array([self.blockifier.blockify(doc) for doc in documents])
mask = [self._has_enough_blocks(blocks) for blocks in block_groups]
block_groups = block_groups[mask]
labels = np.concatenate(np.array(labels)[mask])
features_mat = np.concatenate([self.features.fit_transform(blocks)
for blocks in block_groups])
if weights is None:
self.model.fit(features_mat, labels)
else:
weights = np.concatenate(np.array(weights)[mask])
self.model.fit(features_mat, labels, sample_weight=weights)
return self
|
Fit :class`Extractor` features and model to a training dataset.
Args:
blocks (List[Block])
labels (``np.ndarray``)
weights (``np.ndarray``)
Returns:
:class`Extractor`
|
juraj-google-style
|
def address(self, num):
url_root = "company/{}/registered-office-address"
baseuri = self._BASE_URI + url_root.format(num)
res = self.session.get(baseuri)
self.handle_http_error(res)
return res
|
Search for company addresses by company number.
Args:
num (str): Company number to search on.
|
juraj-google-style
|
def _SymbolStackEndsWith(self, parser_symbol_stack, stack_pattern):
parser_symbol_stack_str = ' '.join((s.type for s in parser_symbol_stack))
stack_pattern_str = ' '.join(stack_pattern)
return parser_symbol_stack_str.endswith(stack_pattern_str)
|
Determines if |stack| matches against |symbol_stack|.
Args:
symbol_stack: The symbol stack from parser.symstack left on th parser
when an error was generarted.
stack: A list of strings to match against the token 'type' in
|symbol_stack|. (e.g. ['TRANSITION', 'NAME', 'params', '=']
|
github-repos
|
def _as_row_partitions(self):
rank = self.rank
if rank is None:
raise ValueError('rank must be known for _as_row_partitions')
elif rank < 1:
raise ValueError('rank must be >= 1 for _as_row_partitions')
fully_ragged = self._with_num_row_partitions(rank - 1)
return fully_ragged.row_partitions
|
Returns row partitions representing this shape.
In order to represent a shape as row partitions, the rank of the shape
must be known, and the shape must have rank at least one.
Returns:
A list of RowPartition objects.
Raises:
ValueError, if the shape cannot be represented by RowPartitions.
|
github-repos
|
def AddMapping(self, filename, new_mapping):
for field in self._REQUIRED_MAPPING_FIELDS:
if (field not in new_mapping):
raise problems.InvalidMapping(field)
if (filename in self.GetKnownFilenames()):
raise problems.DuplicateMapping(filename)
self._file_mapping[filename] = new_mapping
|
Adds an entry to the list of known filenames.
Args:
filename: The filename whose mapping is being added.
new_mapping: A dictionary with the mapping to add. Must contain all
fields in _REQUIRED_MAPPING_FIELDS.
Raises:
DuplicateMapping if the filename already exists in the mapping
InvalidMapping if not all required fields are present
|
codesearchnet
|
def _compute_edge_nodes(nodes, degree):
dimension, _ = np.shape(nodes)
nodes1 = np.empty((dimension, degree + 1), order="F")
nodes2 = np.empty((dimension, degree + 1), order="F")
nodes3 = np.empty((dimension, degree + 1), order="F")
curr2 = degree
curr3 = -1
for i in six.moves.xrange(degree + 1):
nodes1[:, i] = nodes[:, i]
nodes2[:, i] = nodes[:, curr2]
nodes3[:, i] = nodes[:, curr3]
curr2 += degree - i
curr3 -= i + 2
return nodes1, nodes2, nodes3
|
Compute the nodes of each edges of a surface.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): Control point nodes that define the surface.
degree (int): The degree of the surface define by ``nodes``.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]: The nodes in
the edges of the surface.
|
juraj-google-style
|
def read_tree_nexus(nexus):
if (not isinstance(nexus, str)):
raise TypeError('nexus must be a str')
if nexus.lower().endswith('.gz'):
f = gopen(expanduser(nexus))
elif isfile(expanduser(nexus)):
f = open(expanduser(nexus))
else:
f = nexus.splitlines()
trees = dict()
for line in f:
if isinstance(line, bytes):
l = line.decode().strip()
else:
l = line.strip()
if l.lower().startswith('tree '):
i = l.index('=')
left = l[:i].strip()
right = l[(i + 1):].strip()
name = ' '.join(left.split(' ')[1:])
trees[name] = read_tree_newick(right)
if hasattr(f, 'close'):
f.close()
return trees
|
Read a tree from a Nexus string or file
Args:
``nexus`` (``str``): Either a Nexus string or the path to a Nexus file (plain-text or gzipped)
Returns:
``dict`` of ``Tree``: A dictionary of the trees represented by ``nexus``, where keys are tree names (``str``) and values are ``Tree`` objects
|
codesearchnet
|
def chempot_plot_addons(self, plt, xrange, ref_el, axes, pad=2.4,
rect=[-0.047, 0, 0.84, 1], ylim=[]):
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
axes.set_xlabel(r"Chemical potential $\Delta\mu_{%s}$ (eV)" % (ref_el))
ylim = ylim if ylim else axes.get_ylim()
plt.xticks(rotation=60)
plt.ylim(ylim)
xlim = axes.get_xlim()
plt.xlim(xlim)
plt.tight_layout(pad=pad, rect=rect)
plt.plot([xrange[0], xrange[0]], ylim, '--k')
plt.plot([xrange[1], xrange[1]], ylim, '--k')
xy = [np.mean([xrange[1]]), np.mean(ylim)]
plt.annotate("%s-rich" % (ref_el), xy=xy,
xytext=xy, rotation=90, fontsize=17)
xy = [np.mean([xlim[0]]), np.mean(ylim)]
plt.annotate("%s-poor" % (ref_el), xy=xy,
xytext=xy, rotation=90, fontsize=17)
return plt
|
Helper function to a chempot plot look nicer.
Args:
plt (Plot) Plot to add things to.
xrange (list): xlim parameter
ref_el (str): Element of the referenced chempot.
axes(axes) Axes object from matplotlib
pad (float) For tight layout
rect (list): For tight layout
ylim (ylim parameter):
return (Plot): Modified plot with addons.
return (Plot): Modified plot with addons.
|
juraj-google-style
|
def update_configuration(self, configuration):
return self._client.update(configuration, uri=self.URI + "/configuration")
|
Updates the metrics configuration with the new values. Overwrites the existing configuration.
Args:
configuration (dict):
Dictionary with a list of objects which contain frequency, sample interval, and source type for each
resource-type.
Returns:
dict: The current configuration for which metrics are being relayed.
|
juraj-google-style
|
def notify_txn_invalid(self, txn_id, message=None, extended_data=None):
invalid_txn_info = {'id': txn_id}
if message is not None:
invalid_txn_info['message'] = message
if extended_data is not None:
invalid_txn_info['extended_data'] = extended_data
with self._lock:
for batch_id, txn_ids in self._batch_info.items():
if txn_id in txn_ids:
if batch_id not in self._invalid:
self._invalid[batch_id] = [invalid_txn_info]
else:
self._invalid[batch_id].append(invalid_txn_info)
self._pending.discard(batch_id)
self._update_observers(batch_id, ClientBatchStatus.INVALID)
return
|
Adds a batch id to the invalid cache along with the id of the
transaction that was rejected and any error message or extended data.
Removes that batch id from the pending set. The cache is only
temporary, and the batch info will be purged after one hour.
Args:
txn_id (str): The id of the invalid batch
message (str, optional): Message explaining why batch is invalid
extended_data (bytes, optional): Additional error data
|
juraj-google-style
|
def _group_centroid(mol, ilabels, group_atoms):
(c1x, c1y, c1z) = (0.0, 0.0, 0.0)
for i in group_atoms:
orig_idx = ilabels[(i - 1)]
oa1 = mol.GetAtom(orig_idx)
c1x += float(oa1.x())
c1y += float(oa1.y())
c1z += float(oa1.z())
num_atoms = len(group_atoms)
c1x /= num_atoms
c1y /= num_atoms
c1z /= num_atoms
return (c1x, c1y, c1z)
|
Calculate the centroids of a group atoms indexed by the labels of inchi
Args:
mol: The molecule. OpenBabel OBMol object
ilabel: inchi label map
Returns:
Centroid. Tuple (x, y, z)
|
codesearchnet
|
def read(self, bands=None, **kwargs):
arr = self
if (bands is not None):
arr = self[(bands, ...)]
return arr.compute(scheduler=threaded_get)
|
Reads data from a dask array and returns the computed ndarray matching the given bands
Args:
bands (list): band indices to read from the image. Returns bands in the order specified in the list of bands.
Returns:
ndarray: a numpy array of image data
|
codesearchnet
|
def slice_constant(data, batch_size=32, name='constant_data', global_step=None):
with tf.name_scope(name):
all_data = tf.convert_to_tensor(data)
global_step = global_step or bookkeeper.global_step()
count = len(data) / batch_size
extra = len(data) - count * batch_size
if extra:
offset = tf.mod(global_step, count)
return tf.slice(all_data, offset * batch_size, batch_size)
else:
offset = tf.mod(global_step, count + 1)
return tf.slice(all_data, offset * batch_size,
tf.where(tf.equal(offset, count), extra, batch_size))
|
Provide a slice based on the global_step.
This is useful when the entire data array can be stored in memory because it
allows you to feed the data very efficiently.
Args:
data: A numpy array or tensor.
batch_size: The batch size for the produced data.
name: An optional name for this data.
global_step: A global step variable that is used to read the data. If None
then the default prettytensor global_step is used.
Returns:
A tensor that produces the given data.
|
juraj-google-style
|
def get(self, identifier, default=None):
if isinstance(identifier, int):
values = list(self.data.values())
if (0 <= identifier < len(values)):
return values[identifier]
else:
return default
return super(Overlay, self).get(identifier, default)
|
Get a layer in the Overlay.
Get a particular layer in the Overlay using its path string
or an integer index.
Args:
identifier: Index or path string of the item to return
default: Value to return if no item is found
Returns:
The indexed layer of the Overlay
|
codesearchnet
|
def parallel(devices, fn, *args, **kwargs):
if not isinstance(devices, list):
raise ValueError("devices must be a list")
for x in list(args) + list(six.itervalues(kwargs)):
if not isinstance(x, list) or len(x) != len(devices):
raise ValueError(
"Argument not a list with same length as devices "
"arg=%s devices=%s" % (x, devices))
ret = []
for i, device in enumerate(devices):
with tf.device(device):
with tf.variable_scope("parallel_%d" % i):
my_args = [x[i] for x in args]
my_kwargs = {k: v[i] for k, v in six.iteritems(kwargs)}
ret.append(fn(*my_args, **my_kwargs))
return ret
|
Call a function once on each device.
Args:
devices: a list of n devices
fn: a function
*args: arguments, each of which is a list of length n
**kwargs: keyword-args, each of which is a list of length n
Returns:
a list of length n
Raises:
ValueError: if the arguments are not all lists of length n
|
juraj-google-style
|
def _ConvertToTimestamp(self, date, time):
if (len(date) != 8):
raise ValueError('Unsupported length of date string: {0!s}'.format(repr(date)))
if ((len(time) < 3) or (len(time) > 4)):
raise ValueError('Unsupported length of time string: {0!s}'.format(repr(time)))
try:
year = int(date[:4], 10)
month = int(date[4:6], 10)
day = int(date[6:8], 10)
except (TypeError, ValueError):
raise ValueError('Unable to parse date string: {0!s}'.format(repr(date)))
try:
hour = int(time[:(- 2)], 10)
minutes = int(time[(- 2):], 10)
except (TypeError, ValueError):
raise ValueError('Unable to parse time string: {0!s}'.format(repr(date)))
time_elements_tuple = (year, month, day, hour, minutes, 0)
date_time = dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
date_time._precision = dfdatetime_definitions.PRECISION_1_MINUTE
return date_time
|
Converts date and time strings into a timestamp.
Recent versions of Office Scan write a log field with a Unix timestamp.
Older versions may not write this field; their logs only provide a date and
a time expressed in the local time zone. This functions handles the latter
case.
Args:
date (str): date as an 8-character string in the YYYYMMDD format.
time (str): time as a 3 or 4-character string in the [H]HMM format or a
6-character string in the HHMMSS format.
Returns:
dfdatetime_time_elements.TimestampElements: the parsed timestamp.
Raises:
ValueError: if the date and time values cannot be parsed.
|
codesearchnet
|
def from_yang(self, text: str) -> ScalarValue:
res = self.parse_value(text)
if res is None:
raise InvalidArgument(text)
return res
|
Parse value specified in a YANG module.
Args:
text: String representation of the value.
Raises:
InvalidArgument: If the receiver type cannot parse the text.
|
juraj-google-style
|
def set_name(self, vid, name=None, default=False, disable=False):
cmds = self.command_builder('name', value=name, default=default, disable=disable)
return self.configure_vlan(vid, cmds)
|
Configures the VLAN name
EosVersion:
4.13.7M
Args:
vid (str): The VLAN ID to Configures
name (str): The value to configure the vlan name
default (bool): Defaults the VLAN ID name
disable (bool): Negates the VLAN ID name
Returns:
True if the operation was successful otherwise False
|
codesearchnet
|
def join(self, delimiter=' ', overlap_threshold=0.1):
sorted_by_start = sorted(self.labels)
concat_values = []
last_label_end = None
for label in sorted_by_start:
if ((last_label_end is None) or (((last_label_end - label.start) < overlap_threshold) and (last_label_end > 0))):
concat_values.append(label.value)
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return delimiter.join(concat_values)
|
Return a string with all labels concatenated together.
The order of the labels is defined by the start of the label.
If the overlapping between two labels is greater than ``overlap_threshold``,
an Exception is thrown.
Args:
delimiter (str): A string to join two consecutive labels.
overlap_threshold (float): Maximum overlap between two consecutive labels.
Returns:
str: A string with all labels concatenated together.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c', start=7.0, end=10.2),
>>> Label('d', start=10.3, end=14.0)
>>> ])
>>> ll.join(' - ')
'a - b - c - d'
|
codesearchnet
|
def get_tick(self, index):
name = self.tick_name(index)
if (name is None):
return [pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY), 0]
return [Error.NO_ERROR, self.ticks[name]]
|
Get a tick's interval.
Args:
index (int): The index of the tick that you want to fetch.
Returns:
int, int: Error code and The tick's interval in seconds.
A value of 0 means that the tick is disabled.
|
codesearchnet
|
def _Enum(docstring, *names):
enums = dict(zip(names, range(len(names))))
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
enums['__doc__'] = docstring
return type('Enum', (object,), enums)
|
Utility to generate enum classes used by annotations.
Args:
docstring: Docstring for the generated enum class.
*names: Enum names.
Returns:
A class that contains enum names as attributes.
|
juraj-google-style
|
def adjust_saturation(img, saturation_factor):
if (not _is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
|
Adjust color saturation of an image.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
|
codesearchnet
|
def parse_config(file_path):
if not os.path.isfile(file_path):
return {}
parser = ConfigParser()
parser.read(file_path)
for s in parser._sections:
for v in six.iterkeys(parser._sections[s]):
parser._sections[s][v] = parser._sections[s][v].split("
return parser._sections
|
Convert the CISM configuration file to a python dictionary
Args:
file_path: absolute path to the configuration file
Returns:
A dictionary representation of the given file
|
juraj-google-style
|
def managed(name, table, data, record=None):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if record is None:
record = name
current_data = {
column: __salt__['openvswitch.db_get'](table, record, column)
for column in data
}
comment_changes = 'Columns have been updated.'
comment_no_changes = 'All columns are already up to date.'
comment_error = 'Error while updating column {0}: {1}'
if __opts__['test']:
for column in data:
if data[column] != current_data[column]:
ret['changes'][column] = {'old': current_data[column],
'new': data[column]}
if ret['changes']:
ret['result'] = None
ret['comment'] = comment_changes
else:
ret['result'] = True
ret['comment'] = comment_no_changes
return ret
for column in data:
if data[column] != current_data[column]:
result = __salt__['openvswitch.db_set'](table, record, column,
data[column])
if result is not None:
ret['comment'] = comment_error.format(column, result)
ret['result'] = False
return ret
ret['changes'][column] = {'old': current_data[column],
'new': data[column]}
ret['result'] = True
ret['comment'] = comment_no_changes
return ret
|
Ensures that the specified columns of the named record have the specified
values.
Args:
name: The name of the record.
table: The name of the table to which the record belongs.
data: Dictionary containing a mapping from column names to the desired
values. Columns that exist, but are not specified in this
dictionary are not touched.
record: The name of the record (optional). Replaces name if specified.
|
juraj-google-style
|
def device(self, idx):
class GpuDevice(Structure):
pass
c_nvmlDevice_t = POINTER(GpuDevice)
c_index = c_uint(idx)
device = c_nvmlDevice_t()
_check_return(_NVML.get_function(
"nvmlDeviceGetHandleByIndex_v2")(c_index, byref(device)))
return NvidiaDevice(device)
|
Get a specific GPU device
Args:
idx: index of device
Returns:
NvidiaDevice: single GPU device
|
juraj-google-style
|
def get_backdoor(self, name, version=''):
params = {}
params['or'] = 1
params['c-name'] = name
params['c-aliases__in'] = name
r = requests.get('{0}/backdoors/'.format(self.url),
params=params,
verify=self.verify,
proxies=self.proxies)
if r.status_code == 200:
result_data = json.loads(r.text)
if 'meta' not in result_data:
return None
if 'total_count' not in result_data['meta']:
return None
if result_data['meta']['total_count'] <= 0:
return None
if 'objects' not in result_data:
return None
for backdoor in result_data['objects']:
if 'version' in backdoor:
if backdoor['version'] == version:
return backdoor
else:
log.error('Non-200 status code: {}'.format(r.status_code))
return None
|
Searches for the backdoor based on name and version.
Args:
name: The name of the backdoor. This can be an alias.
version: The version.
Returns:
Returns a JSON object contain one or more backdoor results or
None if not found.
|
juraj-google-style
|
def _RecAnnotate(tree, annotate_name, annotate_value):
for child in tree.children:
_RecAnnotate(child, annotate_name, annotate_value)
if isinstance(tree, pytree.Leaf):
cur_annotate = pytree_utils.GetNodeAnnotation(tree, annotate_name, default=0)
if cur_annotate < annotate_value:
pytree_utils.SetNodeAnnotation(tree, annotate_name, annotate_value)
|
Recursively set the given annotation on all leafs of the subtree.
Takes care to only increase the penalty. If the node already has a higher
or equal penalty associated with it, this is a no-op.
Args:
tree: subtree to annotate
annotate_name: name of the annotation to set
annotate_value: value of the annotation to set
|
github-repos
|
def __init__(self, fn, buffer_size=_DEFAULT_BUFFER_SIZE):
if not callable(fn):
raise TypeError('Expected a callable object instead of: %r' % fn)
super().__init__()
self._fn = fn
self._buffer_size = buffer_size
|
Initializes a CallableFn object wrapping a callable.
Args:
fn: A callable object that reduces elements of an iterable to a single
value (like the builtins sum and max). This callable must be capable of
receiving the kind of values it generates as output in its input, and
for best results, its operation must be commutative and associative.
Raises:
TypeError: if fn parameter is not a callable type.
|
github-repos
|
def debug_string(self, indent: int=0) -> str:
self_repr = f'{'| ' * indent}{self.__class__.__name__}<{repr(self)}>'
if self._children:
child_repr = '\n'.join((child.debug_string(indent=indent + 1) for child in self._children))
self_repr = f'{self_repr}\n{child_repr}'
return self_repr
|
Returns a debug string for the tree rooted at this node.
Args:
indent: The level of indentation to begin printing the tree.
Returns:
A string representing this node and its descendant nodes.
|
github-repos
|
def logical_not(x):
if any_symbolic_tensors((x,)):
return LogicalNot().symbolic_call(x)
return backend.numpy.logical_not(x)
|
Computes the element-wise NOT of the given input tensor.
Zeros are treated as `False` and non-zeros are treated as `True`.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise logical NOT of the input.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.