code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def GetVolumeIdentifiers(self, volume_system):
volume_identifiers = []
for volume in volume_system.volumes:
volume_identifier = getattr(volume, 'identifier', None)
if volume_identifier:
volume_identifiers.append(volume_identifier)
return sorted(volume_identifiers)
|
Retrieves the volume identifiers.
Args:
volume_system (VolumeSystem): volume system.
Returns:
list[str]: sorted volume identifiers.
|
codesearchnet
|
def BuildAdGroupCriterionOperations(adgroup_id):
criterion_operations = [
{
'xsi_type': 'AdGroupCriterionOperation',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': adgroup_id,
'criterion': {
'xsi_type': 'Keyword',
'text': 'mars%s%s' % (uuid.uuid4(),
'!!!' if i % 10 == 0 else ''),
'matchType': 'BROAD'
}
},
'operator': 'ADD'
}
for i in range(KEYWORD_COUNT)]
return criterion_operations
|
Builds the operations adding a Keyword Criterion to each AdGroup.
Args:
adgroup_id: an integer identifying an AdGroup to associate the keywords
with.
Returns:
a list containing the operations that will create a new Keyword Criterion
associated with each provided AdGroup.
|
juraj-google-style
|
def delete(self, filething=None, delete_v1=True, delete_v2=True):
delete(filething, delete_v1, delete_v2)
self.clear()
|
delete(filething=None, delete_v1=True, delete_v2=True)
Remove tags from a file.
Args:
filething (filething): A filename or `None` to use the one used
when loading.
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
If no filename is given, the one most recently loaded is used.
|
juraj-google-style
|
def get_config(self):
return {}
|
Returns a Python dict of the object config.
A constraint config is a Python dictionary (JSON-serializable) that can
be used to reinstantiate the same object.
Returns:
Python dict containing the configuration of the constraint object.
|
github-repos
|
def _sample_field(self, sample):
tag_values = self.sample_tag_values[sample].values()
if tag_values:
return ':'.join(tag_values)
else:
return '.'
|
Returns string representation of sample-format values.
Raises:
KeyError: if requested sample is not defined.
|
codesearchnet
|
def processPhoneList(platformNames=[], numbers=[], excludePlatformNames=[]):
platforms = platform_selection.getPlatformsByName(platformNames, mode='phonefy', excludePlatformNames=excludePlatformNames)
results = []
for num in numbers:
for pla in platforms:
entities = pla.getInfo(query=num, process=True, mode='phonefy')
if (entities != {}):
results += json.loads(entities)
return results
|
Method to perform searchs on a series of numbers.
Args:
-----
platformNames: List of names of the platforms.
numbers: List of numbers to be queried.
excludePlatformNames: A list of platforms not to be searched.
Return:
-------
A list of verified emails.
|
codesearchnet
|
def AddMemberDefinition(self, member_definition):
self.members.append(member_definition)
member_definition.family_definition = self
|
Adds a member definition.
Args:
member_definition (DataTypeDefinition): member data type definition.
|
juraj-google-style
|
def save_to_well_known_file(credentials, well_known_file=None):
if (well_known_file is None):
well_known_file = _get_well_known_file()
config_dir = os.path.dirname(well_known_file)
if (not os.path.isdir(config_dir)):
raise OSError('Config directory does not exist: {0}'.format(config_dir))
credentials_data = credentials.serialization_data
_save_private_file(well_known_file, credentials_data)
|
Save the provided GoogleCredentials to the well known file.
Args:
credentials: the credentials to be saved to the well known file;
it should be an instance of GoogleCredentials
well_known_file: the name of the file where the credentials are to be
saved; this parameter is supposed to be used for
testing only
|
codesearchnet
|
def __getIp6Address(self, addressType):
addrType = ['link local', 'global', 'rloc', 'mesh EID']
addrs = []
globalAddr = []
linkLocal64Addr = ''
rlocAddr = ''
meshEIDAddr = ''
addrs = self.__sendCommand('ipaddr')
for ip6Addr in addrs:
if ip6Addr == 'Done':
break
ip6AddrPrefix = ip6Addr.split(':')[0]
if ip6AddrPrefix == 'fe80':
if ip6Addr.split(':')[4] != '0':
linkLocal64Addr = ip6Addr
elif ip6Addr.startswith(self.meshLocalPrefix):
if ip6Addr.split(':')[4] == '0':
rlocAddr = ip6Addr
else:
meshEIDAddr = ip6Addr
else:
if ip6Addr != None:
globalAddr.append(ip6Addr)
else:
pass
if addressType == addrType[0]:
return linkLocal64Addr
elif addressType == addrType[1]:
return globalAddr
elif addressType == addrType[2]:
return rlocAddr
elif addressType == addrType[3]:
return meshEIDAddr
else:
pass
|
get specific type of IPv6 address configured on thread device
Args:
addressType: the specific type of IPv6 address
link local: link local unicast IPv6 address that's within one-hop scope
global: global unicast IPv6 address
rloc: mesh local unicast IPv6 address for routing in thread network
mesh EID: mesh Endpoint Identifier
Returns:
IPv6 address string
|
juraj-google-style
|
def get_config_value(self, section_name, option, default_option='default'):
if (self.config is None):
self.config = configparser.ConfigParser()
self.config.read(self.ini_file_name)
if option:
try:
return self.config.get(section_name, option)
except configparser.NoOptionError:
log.debug("Didn't find a configuration option for '%s' section and '%s' option", section_name, option)
return self.config.get(section_name, default_option)
|
Read a value from the configuration, with a default.
Args:
section_name (str): name of the section in the configuration from which
the option should be found.
option (str): name of the configuration option.
default_option (str): name of the default configuration option whose
value should be returned if the requested option is not found.
Returns:
str: the value from the ini file.
|
codesearchnet
|
def __init__(self, ctx, config):
super(AnfTransformer, self).__init__(ctx)
if config is None:
if gast_util.GAST2:
literal_node_types = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant, gast.Name)
elif gast_util.GAST3:
literal_node_types = (gast.Constant, gast.Name)
else:
assert False
self._overrides = [(ASTEdgePattern(ANY, ANY, literal_node_types), LEAVE), (ASTEdgePattern(ANY, ANY, gast.expr), REPLACE)]
else:
self._overrides = config
self._gensym = DummyGensym()
self._pending_statements = []
|
Creates an ANF transformer.
Args:
ctx: transformer.Context
config: Configuration
|
github-repos
|
def _somethingFound(self, data, mode="phonefy"):
if data:
try:
for text in self.notFoundText[mode]:
if text in data:
return False
return True
except AttributeError as e:
verifier = self.modes.get(mode)
if verifier:
if verifier.get("not_found_text", "") in data:
return False
else:
return True
return False
|
Verifying if something was found.
Args:
-----
data: Data where the self.notFoundText will be searched.
mode: Mode to be executed.
Return:
-------
True if exists.
|
juraj-google-style
|
def perfcounters(infile):
measurements = []
with open(infile, 'r') as in_file:
read_struct(in_file)
for region_struct in read_structs(in_file):
region = region_struct['1'][1]
core_info = region_struct['Region Info']
measurements += get_measurements(region, core_info, region_struct)
for table_struct in read_tables(in_file):
core_info = None
if ('Event' in table_struct):
offset = 1
core_info = table_struct['Event'][offset:]
measurements += get_measurements(region, core_info, table_struct, offset)
elif ('Metric' in table_struct):
core_info = table_struct['Metric']
measurements += get_measurements(region, core_info, table_struct)
return measurements
|
Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream.
|
codesearchnet
|
def server(self, value):
self._server = value
self._connectionXML.set('server', value)
|
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
|
codesearchnet
|
def add_note(path, filename="note.txt"):
path = os.path.expanduser(path)
assert os.path.isdir(path), "{} is not a valid directory.".format(path)
filepath = os.path.join(path, filename)
exists = os.path.isfile(filepath)
try:
subprocess.call([EDITOR, filepath])
except Exception as exc:
logger.error("Editing note failed!")
raise exc
if exists:
print("Note updated at:", filepath)
else:
print("Note created at:", filepath)
|
Opens a txt file at the given path where user can add and save notes.
Args:
path (str): Directory where note will be saved.
filename (str): Name of note. Defaults to "note.txt"
|
juraj-google-style
|
def __init__(self, key='', *value):
if key == '':
self.key = self.__class__.__name__
else:
self.key = key
if len(value) != 0:
self.value = list(flatten(value))
|
init
Args:
key (str): the key
*value: the value to be stored
|
juraj-google-style
|
def _parse_string_to_list_of_pairs(s, seconds_to_int=False):
ret = []
for p in [s.split(':') for s in re.sub('[,.;]', ' ', s).split()]:
if (len(p) != 2):
raise ValueError(('bad input to _parse_string_to_list_of_pairs %s' % s))
if seconds_to_int:
ret.append((p[0], int(p[1])))
else:
ret.append(tuple(p))
return ret
|
r"""Parses a string into a list of pairs.
In the input string, each pair is separated by a colon, and the delimiters
between pairs are any of " ,.;".
e.g. "rows:32,cols:32"
Args:
s: str to parse.
seconds_to_int: Boolean. If True, then the second elements are returned
as integers; otherwise they are strings.
Returns:
List of tuple pairs.
Raises:
ValueError: Badly formatted string.
|
codesearchnet
|
def tags(pode, leaf=False):
fulltags = [tag for tag in pode[1]['tags']]
if (not leaf):
return fulltags
retn = []
for (size, tag) in sorted([(len(t), t) for t in fulltags], reverse=True):
look = (tag + '.')
if any([r.startswith(look) for r in retn]):
continue
retn.append(tag)
return retn
|
Get all the tags for a given node.
Args:
pode (tuple): A packed node.
leaf (bool): If True, only return the full tags.
Returns:
list: A list of tag strings.
|
codesearchnet
|
def _usage(shorthelp):
doc = _sys.modules['__main__'].__doc__
if not doc:
doc = '\nUSAGE: %s [flags]\n' % _sys.argv[0]
doc = flags.text_wrap(doc, indent=' ', firstline_indent='')
else:
num_specifiers = doc.count('%') - 2 * doc.count('%%')
try:
doc %= (_sys.argv[0],) * num_specifiers
except (OverflowError, TypeError, ValueError):
pass
if shorthelp:
flag_str = flags.FLAGS.main_module_help()
else:
flag_str = str(flags.FLAGS)
try:
_sys.stdout.write(doc)
if flag_str:
_sys.stdout.write('\nflags:\n')
_sys.stdout.write(flag_str)
_sys.stdout.write('\n')
except IOError as e:
if e.errno != _errno.EPIPE:
raise
|
Writes __main__'s docstring to stdout with some help text.
Args:
shorthelp: bool, if True, prints only flags from the main module,
rather than all flags.
|
juraj-google-style
|
def decrypt(self, ciphertext):
plaintext = self._rx_tinh.dec(ciphertext)
if (plaintext is None):
logger.error('Message decryption failure')
raise s_exc.CryptoErr(mesg='Message decryption failure')
seqn = next(self._rx_sn)
(sn, mesg) = s_msgpack.un(plaintext)
if (sn != seqn):
logger.error('Message out of sequence: got %d expected %d', sn, seqn)
raise s_exc.CryptoErr(mesg='Message out of sequence', expected=seqn, got=sn)
return mesg
|
Decrypt a message, validating its sequence number is as we expect.
Args:
ciphertext (bytes): The message to decrypt and verify.
Returns:
mesg: A mesg.
Raises:
s_exc.CryptoErr: If the message decryption fails or the sequence number was unexpected.
|
codesearchnet
|
def prepare_or_wait_for_session(self, master='', config=None, wait_for_checkpoint=False, max_wait_secs=7200, start_standard_services=True):
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(master, init_op=self.init_op, saver=self.saver, checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config, init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
logging.info('Starting standard services.')
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master, config=config, max_wait_secs=max_wait_secs)
if start_standard_services:
logging.info('Starting queue runners.')
self.start_queue_runners(sess)
return sess
|
Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the
`tf.compat.v1.Session` constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session, which is
passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
|
github-repos
|
def vlog_is_on(level):
if level > converter.ABSL_DEBUG:
standard_level = converter.STANDARD_DEBUG - (level - 1)
else:
if level < converter.ABSL_FATAL:
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
return _absl_logger.isEnabledFor(standard_level)
|
Checks if vlog is enabled for the given level in caller's source file.
Args:
level: int, the C++ verbose logging level at which to log the message,
e.g. 1, 2, 3, 4... While absl level constants are also supported,
callers should prefer level_debug|level_info|... calls for
checking those.
Returns:
True if logging is turned on for that level.
|
juraj-google-style
|
def create_profile(profile_name):
try:
profile = Profile(profile_name=profile_name)
profile.full_clean()
profile.save()
except ValidationError as err:
raise ValCannotCreateError(err.message_dict)
|
Used to create Profile objects in the database
A profile needs to exists before an EncodedVideo object can be created.
Args:
profile_name (str): ID of the profile
Raises:
ValCannotCreateError: Raised if the profile name is invalid or exists
|
codesearchnet
|
def handle_duplications(file_path):
logging.info('Handling duplications for "%s"', file_path)
f = open_strings_file(file_path, 'r+')
header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f)
file_elements = []
section_file_elements = []
keys_to_objects = {}
duplicates_found = []
for (header_comment, comments, key, value) in header_comment_key_value_tuples:
if (len(header_comment) > 0):
for elem in sorted(section_file_elements, key=(lambda x: x.comments[0])):
file_elements.append(elem)
section_file_elements = []
file_elements.append(Comment(header_comment))
if (key in keys_to_objects):
keys_to_objects[key].add_comments(comments)
duplicates_found.append(key)
else:
loc_obj = LocalizationEntry(comments, key, value)
keys_to_objects[key] = loc_obj
section_file_elements.append(loc_obj)
for elem in sorted(section_file_elements, key=(lambda x: x.comments[0])):
file_elements.append(elem)
f.seek(0)
for element in file_elements:
f.write(unicode(element))
f.write(u'\n')
f.truncate()
f.close()
logging.info(('Omitted %d duplicates (%s)' % (len(duplicates_found), ','.join(duplicates_found))))
logging.info('Finished handling duplications for "%s"', file_path)
|
Omits the duplications in the strings files.
Keys that appear more than once, will be joined to one appearance and the omit will be documented.
Args:
file_path (str): The path to the strings file.
|
codesearchnet
|
def _aggregation_op(cls,
op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor],
x: 'TensorFluent',
vars_list: List[str]) -> 'TensorFluent':
axis = cls._varslist2axis(x, vars_list)
t = op(x.tensor, axis)
scope = []
for var in x.scope.as_list():
if var not in vars_list:
scope.append(var)
batch = x.batch
return TensorFluent(t, scope, batch=batch)
|
Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output.
|
juraj-google-style
|
def parallel(devices, fn, *args, **kwargs):
if (not isinstance(devices, list)):
raise ValueError('devices must be a list')
for x in (list(args) + list(six.itervalues(kwargs))):
if ((not isinstance(x, list)) or (len(x) != len(devices))):
raise ValueError(('Argument not a list with same length as devices arg=%s devices=%s' % (x, devices)))
ret = []
for (i, device) in enumerate(devices):
with tf.device(device):
with tf.variable_scope(('parallel_%d' % i)):
my_args = [x[i] for x in args]
my_kwargs = {k: v[i] for (k, v) in six.iteritems(kwargs)}
ret.append(fn(*my_args, **my_kwargs))
return ret
|
Call a function once on each device.
Args:
devices: a list of n devices
fn: a function
*args: arguments, each of which is a list of length n
**kwargs: keyword-args, each of which is a list of length n
Returns:
a list of length n
Raises:
ValueError: if the arguments are not all lists of length n
|
codesearchnet
|
def create_public_ip(access_token, subscription_id, resource_group, public_ip_name, dns_label,
location):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/publicIPAddresses/', public_ip_name,
'?api-version=', NETWORK_API])
ip_body = {'location': location}
properties = {'publicIPAllocationMethod': 'Dynamic'}
properties['dnsSettings'] = {'domainNameLabel': dns_label}
ip_body['properties'] = properties
body = json.dumps(ip_body)
return do_put(endpoint, body, access_token)
|
Create a public ip address.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
public_ip_name (str): Name of the new public ip address resource.
dns_label (str): DNS label to apply to the IP address.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. Public IP address JSON body.
|
juraj-google-style
|
def uniquelines(q):
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
|
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
|
juraj-google-style
|
def fit_gaussian(samples, ddof=0):
if len(samples.shape) == 1:
return np.mean(samples), np.std(samples, ddof=ddof)
return np.mean(samples, axis=1), np.std(samples, axis=1, ddof=ddof)
|
Calculates the mean and the standard deviation of the given samples.
Args:
samples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all
values. If two dimensional, we fit the Gaussian for every set of samples over the first dimension.
ddof (int): the difference degrees of freedom in the std calculation. See numpy.
|
juraj-google-style
|
def log_deferred(op, log_id, every_n=1, first_n=None):
prefix = ':::MLPv0.5.0 [{}]'.format(log_id)
if ((not (first_n is not None)) and (first_n == 1)):
return tf.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)
counter = tf.Variable((tf.zeros(shape=(), dtype=tf.int32) - 1), aggregation=tf.VariableAggregation.MEAN)
increment = tf.assign_add(counter, 1, use_locking=True)
return tf.cond(tf.equal(tf.mod(increment, every_n), 0), (lambda : tf.Print(op, [tf.timestamp(), op], message=prefix, first_n=first_n)), (lambda : op))
|
Helper method inserting compliance logging ops.
Note: This helper is not guaranteed to be efficient, as it will insert ops
and control dependencies. If this proves to be a bottleneck, submitters
may wish to consider other methods such as extracting values from an
.events file.
Args:
op: A tf op to be printed.
log_id: a uuid provided by the logger in mlperf_log.py
every_n: If repeat is True, with what frequency should the input op be '
logged. If repeat is False, this argument is ignored.
first_n: Only log this many values. This arg does not interact with every_n.
The first_n refers to the first n that would have been logged.
|
codesearchnet
|
def g_square_bin(dm, x, y, s):
def _calculate_tlog(x, y, s, dof, dm):
nijk = np.zeros((2, 2, dof))
s_size = len(s)
z = []
for z_index in range(s_size):
z.append(s.pop())
pass
for row_index in range(0, dm.shape[0]):
i = dm[row_index, x]
j = dm[row_index, y]
k = []
k_index = 0
for z_index in range(s_size):
k_index += dm[row_index, z[z_index]] * int(pow(2, z_index))
pass
nijk[i, j, k_index] += 1
pass
nik = np.ndarray((2, dof))
njk = np.ndarray((2, dof))
for k_index in range(dof):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((2, 2 , dof))
tlog.fill(np.nan)
for k in range(dof):
tx = np.array([nik[:,k]]).T
ty = np.array([njk[:,k]])
tdijk = tx.dot(ty)
tlog[:,:,k] = nijk[:,:,k] * nk[k] / tdijk
pass
return (nijk, tlog)
_logger.debug('Edge %d -- %d with subset: %s' % (x, y, s))
row_size = dm.shape[0]
s_size = len(s)
dof = int(pow(2, s_size))
row_size_required = 10 * dof
if row_size < row_size_required:
_logger.warning('Not enough samples. %s is too small. Need %s.'
% (str(row_size), str(row_size_required)))
return 1
nijk = None
if s_size < 6:
if s_size == 0:
nijk = np.zeros((2, 2))
for row_index in range(0, dm.shape[0]):
i = dm[row_index, x]
j = dm[row_index, y]
nijk[i, j] += 1
pass
tx = np.array([nijk.sum(axis = 1)]).T
ty = np.array([nijk.sum(axis = 0)])
tdij = tx.dot(ty)
tlog = nijk * row_size / tdij
pass
if s_size > 0:
nijk, tlog = _calculate_tlog(x, y, s, dof, dm)
pass
pass
else:
nijk = np.zeros((2, 2, 1))
i = dm[0, x]
j = dm[0, y]
k = []
for z in s:
k.append(dm[:,z])
pass
k = np.array(k).T
parents_count = 1
parents_val = np.array([k[0,:]])
nijk[i, j, parents_count - 1] = 1
for it_sample in range(1, row_size):
is_new = True
i = dm[it_sample, x]
j = dm[it_sample, y]
tcomp = parents_val[:parents_count,:] == k[it_sample,:]
for it_parents in range(parents_count):
if np.all(tcomp[it_parents,:]):
nijk[i, j, it_parents] += 1
is_new = False
break
pass
if is_new is True:
parents_count += 1
parents_val = np.r_[parents_val, [k[it_sample,:]]]
nnijk = np.zeros((2,2,parents_count))
for p in range(parents_count - 1):
nnijk[:,:,p] = nijk[:,:,p]
nnijk[i, j, parents_count - 1] = 1
nijk = nnijk
pass
pass
nik = np.ndarray((2, parents_count))
njk = np.ndarray((2, parents_count))
for k_index in range(parents_count):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((2, 2 , parents_count))
tlog.fill(np.nan)
for k in range(parents_count):
tX = np.array([nik[:,k]]).T
tY = np.array([njk[:,k]])
tdijk = tX.dot(tY)
tlog[:,:,k] = nijk[:,:,k] * nk[k] / tdijk
pass
pass
log_tlog = np.log(tlog)
G2 = np.nansum(2 * nijk * log_tlog)
_logger.debug('G2 = %f' % G2)
p_val = chi2.sf(G2, dof)
_logger.info('p_val = %s' % str(p_val))
return p_val
|
G square test for a binary data.
Args:
dm: the data matrix to be used (as a numpy.ndarray).
x: the first node (as an integer).
y: the second node (as an integer).
s: the set of neibouring nodes of x and y (as a set()).
Returns:
p_val: the p-value of conditional independence.
|
juraj-google-style
|
def check(self, url: str) -> Optional[dict]:
data = self.data.get(url)
if data:
data = self._check_expiration(url, data)
return (data.data if data else None)
|
Check if data for a url has expired.
Data is not fetched again if it has expired.
Args:
url: url to check expiration on
Returns:
value of the data, possibly None
|
codesearchnet
|
def _get_arguments_for_execution(self, function_name, serialized_args):
arguments = []
for (i, arg) in enumerate(serialized_args):
if isinstance(arg, ObjectID):
argument = self.get_object([arg])[0]
if isinstance(argument, RayError):
raise argument
else:
argument = arg
arguments.append(argument)
return arguments
|
Retrieve the arguments for the remote function.
This retrieves the values for the arguments to the remote function that
were passed in as object IDs. Arguments that were passed by value are
not changed. This is called by the worker that is executing the remote
function.
Args:
function_name (str): The name of the remote function whose
arguments are being retrieved.
serialized_args (List): The arguments to the function. These are
either strings representing serialized objects passed by value
or they are ray.ObjectIDs.
Returns:
The retrieved arguments in addition to the arguments that were
passed by value.
Raises:
RayError: This exception is raised if a task that
created one of the arguments failed.
|
codesearchnet
|
def _call_and_serialize(cls, method, data, refresh=False):
method(data)
if refresh:
return cls.read(method.__self__, data[cls.__uid_field__])
else:
return cls.deserialize(cls._get_non_empty_dict(data))
|
Call the remote method with data, and optionally refresh.
Args:
method (callable): The method on the Authenticated Five9 object
that should be called.
data (dict): A data dictionary that will be passed as the first
and only position argument to ``method``.
refresh (bool, optional): Set to ``True`` to get the record data
from Five9 before returning the record.
Returns:
BaseModel: The newly created record. If ``refresh`` is ``True``,
this will be fetched from Five9. Otherwise, it's the data
record that was sent to the server.
|
codesearchnet
|
def isnan(x):
if any_symbolic_tensors((x,)):
return Isnan().symbolic_call(x)
return backend.numpy.isnan(x)
|
Test element-wise for NaN and return result as a boolean tensor.
Args:
x: Input tensor.
Returns:
Output boolean tensor.
|
github-repos
|
def merge(self, other):
if other.seed != self.seed:
raise ValueError("Cannot merge MinHash with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot merge MinHash with\
different numbers of permutation functions")
self.hashvalues = np.minimum(other.hashvalues, self.hashvalues)
|
Merge the other MinHash with this one, making this one the union
of both.
Args:
other (datasketch.MinHash): The other MinHash.
|
juraj-google-style
|
def get_duration(self, matrix_name):
duration = 0.0
if matrix_name in self.data:
duration = sum([stage.duration() for stage in self.data[matrix_name]])
return duration
|
Get duration for a concrete matrix.
Args:
matrix_name (str): name of the Matrix.
Returns:
float: duration of concrete matrix in seconds.
|
juraj-google-style
|
def preprocess_input(x, data_format=None):
return x
|
A placeholder method for backward compatibility.
The preprocessing logic has been included in the mobilenet_v3 model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a tensor.
data_format: Optional data format of the image tensor/array.
`None` means the global setting
`keras.config.image_data_format()` is used
(unless you changed it, it uses `"channels_last"`).
Defaults to `None`.
Returns:
Unchanged `numpy.array` or tensor.
|
github-repos
|
def Convert(self, values, start_index=0, end_index=None):
if (not values):
return
try:
total_batch_count = (len(values)
except TypeError:
total_batch_count = (- 1)
pool = ThreadPool.Factory(self.threadpool_prefix, self.threadpool_size)
val_iterator = itertools.islice(values, start_index, end_index)
pool.Start()
try:
for (batch_index, batch) in enumerate(collection.Batch(val_iterator, self.batch_size)):
logging.debug('Processing batch %d out of %d', batch_index, total_batch_count)
pool.AddTask(target=self.ConvertBatch, args=(batch,), name=('batch_%d' % batch_index), inline=False)
finally:
pool.Stop(join_timeout=3600)
|
Converts given collection to exported values.
This method uses a threadpool to do the conversion in parallel. It
blocks for up to one hour until everything is converted.
Args:
values: Iterable object with values to convert.
start_index: Start from this index in the collection.
end_index: Finish processing on the (index - 1) element of the collection.
If None, work till the end of the collection.
Returns:
Nothing. ConvertedBatch() should handle the results.
|
codesearchnet
|
def setlogging(mlogger, defval=None):
log_level = os.getenv('SYN_LOG_LEVEL', defval)
if log_level:
log_level = log_level.upper()
if (log_level not in s_const.LOG_LEVEL_CHOICES):
raise ValueError('Invalid log level provided: {}'.format(log_level))
logging.basicConfig(level=log_level, format=s_const.LOG_FORMAT)
mlogger.info('log level set to %s', log_level)
|
Configure synapse logging.
Args:
mlogger (logging.Logger): Reference to a logging.Logger()
defval (str): Default log level
Notes:
This calls logging.basicConfig and should only be called once per process.
Returns:
None
|
codesearchnet
|
def _Open(self, path_spec, mode='rb'):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
try:
vslvm_handle = pyvslvm.handle()
vslvm_handle.open_file_object(file_object)
vslvm_handle.open_physical_volume_files_as_file_objects([
file_object])
vslvm_volume_group = vslvm_handle.get_volume_group()
except:
file_object.close()
raise
self._file_object = file_object
self._vslvm_handle = vslvm_handle
self._vslvm_volume_group = vslvm_volume_group
|
Opens the file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
|
juraj-google-style
|
def max_validator(max_value):
def validator(value):
if value > max_value:
raise ValidationError("{} is not <= {}".format(value, max_value))
return validator
|
Return validator function that ensures upper bound of a number.
Result validation function will validate the internal value of resource
instance field with the ``value >= min_value`` check.
Args:
max_value: maximum value for new validator
|
juraj-google-style
|
def generate(self, model_len=None, model_width=None):
if model_len is None:
model_len = Constant.MODEL_LEN
if model_width is None:
model_width = Constant.MODEL_WIDTH
pooling_len = int(model_len / 4)
graph = Graph(self.input_shape, False)
temp_input_channel = self.input_shape[-1]
output_node_id = 0
stride = 1
for i in range(model_len):
output_node_id = graph.add_layer(StubReLU(), output_node_id)
output_node_id = graph.add_layer(
self.batch_norm(graph.node_list[output_node_id].shape[-1]), output_node_id
)
output_node_id = graph.add_layer(
self.conv(temp_input_channel, model_width, kernel_size=3, stride=stride),
output_node_id,
)
temp_input_channel = model_width
if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1):
output_node_id = graph.add_layer(self.pooling(), output_node_id)
output_node_id = graph.add_layer(self.global_avg_pooling(), output_node_id)
output_node_id = graph.add_layer(
self.dropout(Constant.CONV_DROPOUT_RATE), output_node_id
)
output_node_id = graph.add_layer(
StubDense(graph.node_list[output_node_id].shape[0], model_width),
output_node_id,
)
output_node_id = graph.add_layer(StubReLU(), output_node_id)
graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id)
return graph
|
Generates a CNN.
Args:
model_len: An integer. Number of convolutional layers.
model_width: An integer. Number of filters for the convolutional layers.
Returns:
An instance of the class Graph. Represents the neural architecture graph of the generated model.
|
juraj-google-style
|
def as_dict(self):
ret = {}
for job in self.jobs:
task_indices = self.task_indices(job)
if len(task_indices) == 0:
ret[job] = {}
continue
if max(task_indices) + 1 == len(task_indices):
ret[job] = self.job_tasks(job)
else:
ret[job] = {i: self.task_address(job, i) for i in task_indices}
return ret
|
Returns a dictionary from job names to their tasks.
For each job, if the task index space is dense, the corresponding
value will be a list of network addresses; otherwise it will be a
dictionary mapping (sparse) task indices to the corresponding
addresses.
Returns:
A dictionary mapping job names to lists or dictionaries
describing the tasks in those jobs.
|
github-repos
|
def ParseLastVisitedRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
query_hash = hash(query)
hidden = self._GetRowValue(query_hash, row, 'hidden')
transition = self._GetRowValue(query_hash, row, 'transition')
visit_identifier = self._GetRowValue(query_hash, row, 'visit_id')
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
event_data = ChromeHistoryPageVisitedEventData()
event_data.from_visit = self._GetUrl(from_visit, cache, database)
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.page_transition_type = (
transition & self._PAGE_TRANSITION_CORE_MASK)
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.url_hidden = hidden == '1'
event_data.visit_source = self._GetVisitSource(
visit_identifier, cache, database)
timestamp = self._GetRowValue(query_hash, row, 'visit_time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a last visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (SQLiteCache): cache which contains cached results from querying
the visits and urls tables.
database (Optional[SQLiteDatabase]): database.
|
juraj-google-style
|
def setScales(self, scales=None, term_num=None):
if (scales == None):
for term_i in range(self.n_terms):
n_scales = self.vd.getTerm(term_i).getNumberScales()
self.vd.getTerm(term_i).setScales(SP.array(SP.randn(n_scales)))
elif (term_num == None):
assert (scales.shape[0] == self.vd.getNumberScales()), 'incompatible shape'
index = 0
for term_i in range(self.n_terms):
index1 = (index + self.vd.getTerm(term_i).getNumberScales())
self.vd.getTerm(term_i).setScales(scales[index:index1])
index = index1
else:
assert (scales.shape[0] == self.vd.getTerm(term_num).getNumberScales()), 'incompatible shape'
self.vd.getTerm(term_num).setScales(scales)
|
get random initialization of variances based on the empirical trait variance
Args:
scales: if scales==None: set them randomly,
else: set scales to term_num (if term_num==None: set to all terms)
term_num: set scales to term_num
|
codesearchnet
|
def is_ref(x):
return isinstance(x, variables_module.Variable) or (isinstance(x, module.Module) and hasattr(x, 'dtype') and hasattr(x, 'shape'))
|
Evaluates if the object has reference semantics.
An object is deemed "reference" if it is a `tf.Variable` instance or is
derived from a `tf.Module` with `dtype` and `shape` properties.
Args:
x: Any object.
Returns:
is_ref: Python `bool` indicating input is has nonreference semantics, i.e.,
is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties.
|
github-repos
|
def forward(self, x):
head_outputs = ([None] * self.t)
if isinstance(self.input_layer, list):
input_outputs = [mod(x) for (mod, x) in zip(self.input_layer, x)]
x = torch.stack(input_outputs, dim=1)
for t in self.task_map[0]:
head = self.heads[t]
head_outputs[t] = head(input_outputs[t])
else:
x = self.input_layer(x)
for t in self.task_map[0]:
head = self.heads[t]
head_outputs[t] = head(x)
for (i, layer) in enumerate(self.middle_layers, start=1):
x = layer(x)
for t in self.task_map[i]:
head = self.heads[t]
if (self.config['pass_predictions'] and bool(self.task_graph.parents[t])):
task_input = [x]
for p in self.task_graph.parents[t]:
task_input.append(head_outputs[p])
task_input = torch.stack(task_input, dim=1)
else:
task_input = x
head_outputs[t] = head(task_input)
return head_outputs
|
Returns a list of outputs for tasks 0,...t-1
Args:
x: a [batch_size, ...] batch from X
|
codesearchnet
|
def _load_from_cache_if_available(self, key):
if (key in self._cache):
entity = self._cache[key]
if ((entity is None) or (entity._key == key)):
raise tasklets.Return(entity)
|
Returns a cached Model instance given the entity key if available.
Args:
key: Key instance.
Returns:
A Model instance if the key exists in the cache.
|
codesearchnet
|
def __init__(self, params_arr, cost_functionable):
self.__params_arr = params_arr
if isinstance(cost_functionable, CostFunctionable):
self.__cost_functionable = cost_functionable
else:
raise TypeError
|
Init.
Args:
params_arr: The parameters.
cost_functionable: is-a `CostFunctionable`.
|
juraj-google-style
|
def _Ifup(self, interfaces, logger):
ifup = ['/usr/sbin/wicked', 'ifup', '--timeout', '1']
try:
subprocess.check_call((ifup + interfaces))
except subprocess.CalledProcessError:
logger.warning('Could not activate interfaces %s.', interfaces)
|
Activate network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
|
codesearchnet
|
def ReleaseRecords(cls, ids, token):
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.QueueReleaseRecords(ids)
|
Release records identified by subjects.
Releases any claim on the records identified by ids.
Args:
ids: A list of ids provided by ClaimRecords.
token: The database access token to write with.
Raises:
LockError: If the queue is not locked.
|
codesearchnet
|
def put(self, entity):
self._cur_batch.put(entity)
self._num_mutations += 1
if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:
self.commit()
self.begin()
|
Adds mutation of the entity to the mutation buffer.
If mutation buffer reaches its capacity then this method commit all pending
mutations from the buffer and emties it.
Args:
entity: entity which should be put into the datastore
|
juraj-google-style
|
def _batch_prepare_for_model(self, batch_ids_pairs: List[Tuple[List[int], None]], batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]], batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
batch_outputs = {}
for input_ids, entity_ids, entity_token_span_pairs in zip(batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs):
first_ids, second_ids = input_ids
first_entity_ids, second_entity_ids = entity_ids
first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
outputs = self.prepare_for_model(first_ids, second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
|
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
batch_entity_ids_pairs: list of entity ids or entity ids pairs
batch_entity_token_spans_pairs: list of entity spans or entity spans pairs
max_entity_length: The maximum length of the entity sequence.
|
github-repos
|
def make_slot_check(wanted):
if isinstance(wanted, types.FunctionType):
return wanted
if isinstance(wanted, int):
item, meta = wanted, None
elif isinstance(wanted, Slot):
item, meta = wanted.item_id, wanted.damage
elif isinstance(wanted, (Item, Block)):
item, meta = wanted.id, wanted.metadata
elif isinstance(wanted, str):
item_or_block = get_item_or_block(wanted, init=True)
item, meta = item_or_block.id, item_or_block.metadata
else:
try:
item, meta = wanted
except TypeError:
raise ValueError('Illegal args for make_slot_check(): %s' % wanted)
return lambda slot: item == slot.item_id and meta in (None, slot.damage)
|
Creates and returns a function that takes a slot
and checks if it matches the wanted item.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
|
juraj-google-style
|
def addSources(self, *sources):
self._sources.extend(sources)
((debug.logger & debug.flagCompiler) and debug.logger(('current MIB source(s): %s' % ', '.join([str(x) for x in self._sources]))))
return self
|
Add more ASN.1 MIB source repositories.
MibCompiler.compile will invoke each of configured source objects
in order of their addition asking each to fetch MIB module specified
by name.
Args:
sources: reader object(s)
Returns:
reference to itself (can be used for call chaining)
|
codesearchnet
|
def days_in_leap_and_nonleap_years_between(start_date, end_date):
days_between = end_date.ordinal() - start_date.ordinal()
days_in_leap_years = days_in_leap_years_between(start_date, end_date)
return (days_in_leap_years, days_between - days_in_leap_years)
|
Calculates number of days that fall on leap and non-leap years.
Calculates a tuple '(days_in_leap_years, days_in_nonleap_years)'.
'start_date' is included and 'end_date' is excluded from the period.
For example, for dates `2019-12-24` and `2024-2-10` the result is
(406, 1103):
406 = 366 days in 2020 + 31 in Jan 2024 + 9 in Feb 2024,
1103 = 8 in 2019 + 365 in 2021 + 365 in 2022 + 365 in 2023.
If `end_date` is earlier than `start_date`, the result will be negative or
zero.
Args:
start_date: DateTensor.
end_date: DateTensor compatible with `start_date`.
Returns:
Tuple of two Tensors of type 'int32'.
|
github-repos
|
def plot_val_with_title(self, idxs, y):
if (len(idxs) > 0):
imgs = np.stack([self.ds[x][0] for x in idxs])
title_probs = [self.probs[(x, y)] for x in idxs]
return plots(self.ds.denorm(imgs), rows=1, titles=title_probs)
else:
return False
|
Displays the images and their probabilities of belonging to a certain class
Arguments:
idxs (numpy.ndarray): indexes of the image samples from the dataset
y (int): the selected class
Returns:
Plots the images in n rows [rows = n]
|
codesearchnet
|
def GetSubFileEntryByName(self, name, case_sensitive=True):
name_lower = name.lower()
matching_sub_file_entry = None
for sub_file_entry in self.sub_file_entries:
if sub_file_entry.name == name:
return sub_file_entry
if not case_sensitive and sub_file_entry.name.lower() == name_lower:
if not matching_sub_file_entry:
matching_sub_file_entry = sub_file_entry
return matching_sub_file_entry
|
Retrieves a sub file entry by name.
Args:
name (str): name of the file entry.
case_sensitive (Optional[bool]): True if the name is case sensitive.
Returns:
FileEntry: a file entry or None if not available.
|
juraj-google-style
|
def grid(self, dimensions=None, **kwargs):
dimensions = self._valid_dimensions(dimensions)
if len(dimensions) == self.ndims:
with item_check(False):
return GridSpace(self, **kwargs).reindex(dimensions)
return self.groupby(dimensions, container_type=GridSpace, **kwargs)
|
Group by supplied dimension(s) and lay out groups in grid
Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a GridSpace.
Args:
dimensions: Dimension/str or list
Dimension or list of dimensions to group by
Returns:
GridSpace with supplied dimensions
|
juraj-google-style
|
def from_api_repr(cls, resource):
version = resource.get("version")
etag = resource.get("etag")
policy = cls(etag, version)
for binding in resource.get("bindings", ()):
role = binding["role"]
members = sorted(binding["members"])
policy[role] = members
return policy
|
Factory: create a policy from a JSON resource.
Args:
resource (dict): policy resource returned by ``getIamPolicy`` API.
Returns:
:class:`Policy`: the parsed policy
|
juraj-google-style
|
def validate_source_dir(script, directory):
if directory:
if (not os.path.isfile(os.path.join(directory, script))):
raise ValueError('No file named "{}" was found in directory "{}".'.format(script, directory))
return True
|
Validate that the source directory exists and it contains the user script
Args:
script (str): Script filename.
directory (str): Directory containing the source file.
Raises:
ValueError: If ``directory`` does not exist, is not a directory, or does not contain ``script``.
|
codesearchnet
|
def layer_normalization(x, gamma=None, beta=None, axis=-1, epsilon=None, **kwargs):
rms_scaling = kwargs.pop('rms_scaling', False)
if rms_scaling:
warnings.warn('You passed `rms_scaling=True`, which is deprecated. This argument incorrectly scales the input by the variance, not the root mean square. To correctly use RMS Normalization, please use `keras.ops.rms_normalization` / `keras.ops.nn.rms_normalization` instead.')
if any_symbolic_tensors((x,)):
return LayerNorm(gamma=gamma, beta=beta, axis=axis, epsilon=epsilon, rms_scaling=rms_scaling).symbolic_call(x)
return _layer_normalization(x, gamma=gamma, beta=beta, axis=axis, epsilon=epsilon, rms_scaling=rms_scaling)
|
Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
example close to 0 and the activation standard deviation close to 1.
Args:
x: Input tensor.
axis: The axis or axes along which to perform normalization.
Default to -1.
gamma: Optional scaling factor for the normalization.
beta: Optional add offset for the normalized tensor.
epsilon: A lower bound value for the norm.
Defaults to `backend.epsilon()`.
Returns:
The normalized array.
>>> x = ops.arange(5,dtype = "float32")
>>> x_norm = ops.layer_normalization(x)
>>> print(x_norm)
array([-1.4142135 , -0.70710677, 0., 0.7071067 , 1.4142135 ])
|
github-repos
|
def add_periodic_callback(self, callback, period_milliseconds):
from ..server.callbacks import PeriodicCallback
cb = PeriodicCallback(self, None, period_milliseconds)
return self._add_session_callback(cb, callback, one_shot=False, originator=self.add_periodic_callback)
|
Add a callback to be invoked on a session periodically.
Args:
callback (callable) :
A callback function to execute periodically
period_milliseconds (int) :
Number of milliseconds between each callback execution.
Returns:
PeriodicCallback : can be used with ``remove_periodic_callback``
.. note::
Periodic callbacks only work within the context of a Bokeh server
session. This function will no effect when Bokeh outputs to
standalone HTML or Jupyter notebook cells.
|
codesearchnet
|
def add_genstrings_comments_to_file(localization_file, genstrings_err):
errors_to_log = [line for line in genstrings_err.splitlines() if ('used with multiple comments' not in line)]
if (len(errors_to_log) > 0):
logging.warning('genstrings warnings:\n%s', '\n'.join(errors_to_log))
loc_file = open_strings_file(localization_file, 'a')
regex_matches = re.findall('Warning: Key "(.*?)" used with multiple comments ("[^"]*" (& "[^"]*")+)', genstrings_err)
logging.info('Adding multiple comments from genstrings output')
for regex_match in regex_matches:
if (len(regex_match) == 3):
key = regex_match[0]
comments = [comment.strip()[1:(- 1)] for comment in regex_match[1].split('&')]
logging.info('Found key with %d comments: %s', len(comments), key)
loc_key = LocalizationEntry(comments, key, key)
loc_file.write(unicode(loc_key))
loc_file.write(u'\n')
loc_file.close()
|
Adds the comments produced by the genstrings script for duplicate keys.
Args:
localization_file (str): The path to the strings file.
|
codesearchnet
|
def gene_panel(self, panel_id, version=None):
query = {'panel_name': panel_id}
if version:
LOG.info("Fetch gene panel {0}, version {1} from database".format(
panel_id, version
))
query['version'] = version
return self.panel_collection.find_one(query)
else:
LOG.info("Fetching gene panels %s from database", panel_id)
res = self.panel_collection.find(query).sort('version', -1)
if res.count() > 0:
return res[0]
else:
LOG.info("No gene panel found")
return None
|
Fetch a gene panel.
If no panel is sent return all panels
Args:
panel_id (str): unique id for the panel
version (str): version of the panel. If 'None' latest version will be returned
Returns:
gene_panel: gene panel object
|
juraj-google-style
|
def arctanh(x):
if any_symbolic_tensors((x,)):
return Arctanh().symbolic_call(x)
return backend.numpy.arctanh(x)
|
Inverse hyperbolic tangent, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
|
github-repos
|
def get_ignition_type(root):
properties = {}
elem = root.find('ignitionType')
if (elem is None):
raise MissingElementError('ignitionType')
elem = elem.attrib
if ('target' in elem):
ign_target = elem['target'].rstrip(';').upper()
else:
raise MissingAttributeError('target', 'ignitionType')
if ('type' in elem):
ign_type = elem['type']
if (ign_type == 'baseline max intercept from d/dt'):
ign_type = 'd/dt max extrapolated'
else:
raise MissingAttributeError('type', 'ignitionType')
if (len(ign_target.split(';')) > 1):
raise NotImplementedError('Multiple ignition targets not supported.')
if (ign_target == 'OHEX'):
ign_target = 'OH*'
elif (ign_target == 'CHEX'):
ign_target = 'CH*'
elif (ign_target == 'P'):
ign_target = 'pressure'
elif (ign_target == 'T'):
ign_target = 'temperature'
if (ign_target not in ['pressure', 'temperature', 'OH', 'OH*', 'CH*', 'CH']):
raise KeywordError((ign_target + ' not valid ignition target'))
if (ign_type not in ['max', 'd/dt max', '1/2 max', 'min', 'd/dt max extrapolated']):
raise KeywordError((ign_type + ' not valid ignition type'))
properties['type'] = ign_type
properties['target'] = ign_target
return properties
|
Gets ignition type and target.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with ignition type/target information
|
codesearchnet
|
def random_sample(list_, nSample, strict=False, rng=None, seed=None):
rng = ensure_rng((seed if (rng is None) else rng))
if isinstance(list_, list):
list2_ = list_[:]
else:
list2_ = np.copy(list_)
if ((len(list2_) == 0) and (not strict)):
return list2_
rng.shuffle(list2_)
if ((nSample is None) and (strict is False)):
return list2_
if (not strict):
nSample = min(max(0, nSample), len(list2_))
sample_list = list2_[:nSample]
return sample_list
|
Grabs data randomly
Args:
list_ (list):
nSample (?):
strict (bool): (default = False)
rng (module): random number generator(default = numpy.random)
seed (None): (default = None)
Returns:
list: sample_list
CommandLine:
python -m utool.util_numpy --exec-random_sample
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_numpy import * # NOQA
>>> list_ = np.arange(10)
>>> nSample = 4
>>> strict = False
>>> rng = np.random.RandomState(0)
>>> seed = None
>>> sample_list = random_sample(list_, nSample, strict, rng, seed)
>>> result = ('sample_list = %s' % (str(sample_list),))
>>> print(result)
|
codesearchnet
|
def _get_saver_or_default():
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if savers:
if len(savers) > 1:
raise RuntimeError('More than one item in collection {}. Please indicate which one to use by passing it to the constructor.'.format(collection_key))
return savers[0]
saver = Saver(sharded=True, allow_empty=True)
if saver is not None:
ops.add_to_collection(collection_key, saver)
return saver
|
Returns the saver from SAVERS collection, or creates a default one.
This method is used by other members of the training module, such as
`Scaffold`, or `CheckpointSaverHook`.
Returns:
`Saver`.
Raises:
RuntimeError: If the SAVERS collection already has more than one items.
|
github-repos
|
def _LineContainsI18n(line):
if style.Get('I18N_COMMENT'):
for tok in line.tokens:
if tok.is_comment and re.match(style.Get('I18N_COMMENT'), tok.value):
return True
if style.Get('I18N_FUNCTION_CALL'):
length = len(line.tokens)
for index in range(length - 1):
if line.tokens[index + 1].value == '(' and line.tokens[index].value in style.Get('I18N_FUNCTION_CALL'):
return True
return False
|
Return true if there are i18n comments or function calls in the line.
I18n comments and pseudo-function calls are closely related. They cannot
be moved apart without breaking i18n.
Arguments:
line: (logical_line.LogicalLine) The line currently being formatted.
Returns:
True if the line contains i18n comments or function calls. False otherwise.
|
github-repos
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
values_dict = {}
service_type_value = registry_key.GetValueByName('Type')
service_start_value = registry_key.GetValueByName('Start')
if service_type_value and service_start_value:
service_dll = self.GetServiceDll(registry_key)
if service_dll:
values_dict['ServiceDll'] = service_dll
for value in registry_key.GetValues():
if not value.name:
continue
if value.name not in values_dict:
if value.DataIsString() or value.DataIsInteger():
values_dict[value.name] = value.GetDataAsObject()
elif value.DataIsMultiString():
values_dict[value.name] = ', '.join(value.GetDataAsObject())
event_data = windows_events.WindowsRegistryServiceEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
juraj-google-style
|
def _parameterize_string(raw):
parts = []
s_index = 0
for match in _PARAMETER_PATTERN.finditer(raw):
parts.append(raw[s_index:match.start()])
parts.append({u'Ref': match.group(1)})
s_index = match.end()
if (not parts):
return GenericHelperFn(raw)
parts.append(raw[s_index:])
return GenericHelperFn({u'Fn::Join': [u'', parts]})
|
Substitute placeholders in a string using CloudFormation references
Args:
raw (`str`): String to be processed. Byte strings are not
supported; decode them before passing them to this function.
Returns:
`str` | :class:`troposphere.GenericHelperFn`: An expression with
placeholders from the input replaced, suitable to be passed to
Troposphere to be included in CloudFormation template. This will
be the input string without modification if no substitutions are
found, and a composition of CloudFormation calls otherwise.
|
codesearchnet
|
def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = HangoutsMessageData()
event_data.sender = self._GetRowValue(query_hash, row, 'full_name')
event_data.body = self._GetRowValue(query_hash, row, 'text')
event_data.offset = self._GetRowValue(query_hash, row, '_id')
event_data.query = query
event_data.message_status = self._GetRowValue(query_hash, row, 'status')
event_data.message_type = self._GetRowValue(query_hash, row, 'type')
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an Messages row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
juraj-google-style
|
def create(self, *args, **kwargs):
rules_dict = [rule.__dict__ for rule in self.forwarding_rules]
params = {'name': self.name, 'region': self.region, 'forwarding_rules': rules_dict, 'redirect_http_to_https': self.redirect_http_to_https}
if (self.droplet_ids and self.tag):
raise ValueError('droplet_ids and tag are mutually exclusive args')
elif self.tag:
params['tag'] = self.tag
else:
params['droplet_ids'] = self.droplet_ids
if self.algorithm:
params['algorithm'] = self.algorithm
if self.health_check:
params['health_check'] = self.health_check.__dict__
if self.sticky_sessions:
params['sticky_sessions'] = self.sticky_sessions.__dict__
data = self.get_data('load_balancers/', type=POST, params=params)
if data:
self.id = data['load_balancer']['id']
self.ip = data['load_balancer']['ip']
self.algorithm = data['load_balancer']['algorithm']
self.health_check = HealthCheck(**data['load_balancer']['health_check'])
self.sticky_sessions = StickySesions(**data['load_balancer']['sticky_sessions'])
self.droplet_ids = data['load_balancer']['droplet_ids']
self.status = data['load_balancer']['status']
self.created_at = data['load_balancer']['created_at']
return self
|
Creates a new LoadBalancer.
Note: Every argument and parameter given to this method will be
assigned to the object.
Args:
name (str): The Load Balancer's name
region (str): The slug identifier for a DigitalOcean region
algorithm (str, optional): The load balancing algorithm to be
used. Currently, it must be either "round_robin" or
"least_connections"
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
health_check (obj, optional): A `HealthCheck` object
sticky_sessions (obj, optional): A `StickySessions` object
redirect_http_to_https (bool, optional): A boolean indicating
whether HTTP requests to the Load Balancer should be
redirected to HTTPS
droplet_ids (obj:`list` of `int`): A list of IDs representing
Droplets to be added to the Load Balancer (mutually
exclusive with 'tag')
tag (str): A string representing a DigitalOcean Droplet tag
(mutually exclusive with 'droplet_ids')
|
codesearchnet
|
def partial_declaration_path(decl):
if (not decl):
return []
if (not decl.cache.partial_declaration_path):
result = [decl.partial_name]
parent = decl.parent
while parent:
if parent.cache.partial_declaration_path:
result.reverse()
decl.cache.partial_declaration_path = (parent.cache.partial_declaration_path + result)
return decl.cache.partial_declaration_path
else:
result.append(parent.partial_name)
parent = parent.parent
result.reverse()
decl.cache.partial_declaration_path = result
return result
return decl.cache.partial_declaration_path
|
Returns a list of parent declarations names without template arguments that
have default value.
Args:
decl (declaration_t): declaration for which the partial declaration
path should be calculated.
Returns:
list[(str | basestring)]: list of names, where first item is the top
parent name and last item the inputted
declaration name.
|
codesearchnet
|
class AriaTextDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__(self)
self.mlp = AriaTextMoELayer(config)
|
Aria Text Decoder Layer.
This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
layer_idx (`int`):
Index of the layer.
|
github-repos
|
def getCmdOpts(self, text):
off = 0
_, off = s_syntax.nom(text, off, s_syntax.whites)
name, off = s_syntax.meh(text, off, s_syntax.whites)
_, off = s_syntax.nom(text, off, s_syntax.whites)
opts = {}
args = collections.deque([synt for synt in self._cmd_syntax if not synt[0].startswith('-')])
switches = {synt[0]: synt for synt in self._cmd_syntax if synt[0].startswith('-')}
for synt in self._cmd_syntax:
snam = synt[0].strip('-')
defval = synt[1].get('defval')
if defval is not None:
opts[snam] = defval
if synt[1].get('type') in ('list', 'kwlist'):
opts[snam] = []
def atswitch(t, o):
if not text.startswith('-', o):
return None, o
name, x = s_syntax.meh(t, o, s_syntax.whites)
swit = switches.get(name)
if swit is None:
return None, o
return swit, x
while off < len(text):
_, off = s_syntax.nom(text, off, s_syntax.whites)
swit, off = atswitch(text, off)
if swit is not None:
styp = swit[1].get('type', 'flag')
snam = swit[0].strip('-')
if styp == 'valu':
valu, off = s_syntax.parse_cmd_string(text, off)
opts[snam] = valu
elif styp == 'list':
valu, off = s_syntax.parse_cmd_string(text, off)
if not isinstance(valu, list):
valu = valu.split(',')
opts[snam].extend(valu)
elif styp == 'enum':
vals = swit[1].get('enum:vals')
valu, off = s_syntax.parse_cmd_string(text, off)
if valu not in vals:
raise s_exc.BadSyntax(mesg='%s (%s)' % (swit[0], '|'.join(vals)),
text=text)
opts[snam] = valu
else:
opts[snam] = True
continue
if not args:
raise s_exc.BadSyntax(mesg='trailing text: [%s]' % (text[off:],),
text=text)
synt = args.popleft()
styp = synt[1].get('type', 'valu')
if styp == 'glob':
opts[synt[0]] = text[off:]
break
if styp == 'list':
valu = []
while off < len(text):
item, off = s_syntax.parse_cmd_string(text, off)
valu.append(item)
opts[synt[0]] = valu
break
if styp == 'kwlist':
kwlist, off = s_syntax.parse_cmd_kwlist(text, off)
opts[snam] = kwlist
break
valu, off = s_syntax.parse_cmd_string(text, off)
opts[synt[0]] = valu
return opts
|
Use the _cmd_syntax def to split/parse/normalize the cmd line.
Args:
text (str): Command to process.
Notes:
This is implemented independent of argparse (et al) due to the
need for syntax aware argument splitting. Also, allows different
split per command type
Returns:
dict: An opts dictionary.
|
juraj-google-style
|
def _dqdv_combinded_frame(cell, **kwargs):
cycles = cell.get_cap(
method="forth-and-forth",
categorical_column=True,
label_cycle_number=True,
)
ica_df = dqdv_cycles(cycles, **kwargs)
assert isinstance(ica_df, pd.DataFrame)
return ica_df
|
Returns full cycle dqdv data for all cycles as one pd.DataFrame.
Args:
cell: CellpyData-object
Returns:
pandas.DataFrame with the following columns:
cycle: cycle number
voltage: voltage
dq: the incremental capacity
|
juraj-google-style
|
def create_korobov_samples(order, dim, base=17797):
values = numpy.empty(dim)
values[0] = 1
for idx in range(1, dim):
values[idx] = base*values[idx-1] % (order+1)
grid = numpy.mgrid[:dim, :order+1]
out = values[grid[0]] * (grid[1]+1) / (order+1.) % 1.
return out[:, :order]
|
Create Korobov lattice samples.
Args:
order (int):
The order of the Korobov latice. Defines the number of
samples.
dim (int):
The number of dimensions in the output.
base (int):
The number based used to calculate the distribution of values.
Returns (numpy.ndarray):
Korobov lattice with ``shape == (dim, order)``
|
juraj-google-style
|
def __init__(self, filehandles):
self._filehandles = filehandles
self._pools = [None] * len(filehandles)
|
Constructor.
Args:
filehandles: list of file handles that this writer outputs to.
|
juraj-google-style
|
def get_details(app='groupproject', env='dev', region='us-east-1'):
url = '{host}/applications/{app}'.format(host=API_URL, app=app)
request = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
if (not request.ok):
raise SpinnakerAppNotFound('"{0}" not found.'.format(app))
app_details = request.json()
LOG.debug('App details: %s', app_details)
group = app_details['attributes'].get('repoProjectKey')
project = app_details['attributes'].get('repoSlug')
generated = gogoutils.Generator(group, project, env=env, region=region, formats=APP_FORMATS)
LOG.debug('Application details: %s', generated)
return generated
|
Extract details for Application.
Args:
app (str): Application Name
env (str): Environment/account to get details from
Returns:
collections.namedtuple with _group_, _policy_, _profile_, _role_,
_user_.
|
codesearchnet
|
def _HashRow(cls, row):
values = []
for value in row:
try:
value = '{0!s}'.format(value)
except UnicodeDecodeError:
value = repr(value)
values.append(value)
return hash(' '.join(values))
|
Hashes the given row.
Args:
row (sqlite3.Row): row.
Returns:
int: hash value of the given row.
|
codesearchnet
|
def convert_to_jax_compatible(cls, x):
return x
|
Convert a tensor to something that the JAX backend can consume.
This can be a `JAX` array, `JAXSparse` or a NumPy array.
Only called after slicing using `__getitem__`.
Used to convert sparse tensors and densify ragged tensors.
Args:
x: the tensor to convert.
Returns: the converted tensor.
|
github-repos
|
def get_containers(self, container_class):
with self._store_lock:
return self.store.get(container_class.CONTAINER_TYPE, [])
|
Thread-safe method to retrieve data from the state's store.
Args:
container_class: AttributeContainer class used to filter data.
Returns:
A list of AttributeContainer objects of matching CONTAINER_TYPE.
|
juraj-google-style
|
def _get_sorted_inputs(filename):
with tf.gfile.Open(filename) as f:
records = f.read().split('\n')
inputs = [record.strip() for record in records]
if (not inputs[(- 1)]):
inputs.pop()
input_lens = [(i, len(line.split())) for (i, line) in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=(lambda x: x[1]), reverse=True)
sorted_inputs = []
sorted_keys = {}
for (i, (index, _)) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return (sorted_inputs, sorted_keys)
|
Read and sort lines from the file sorted by decreasing length.
Args:
filename: String name of file to read inputs from.
Returns:
Sorted list of inputs, and dictionary mapping original index->sorted index
of each element.
|
codesearchnet
|
def _compute_posterior(self, likelihoods_watermarked: torch.Tensor, likelihoods_unwatermarked: torch.Tensor, mask: torch.Tensor, prior: float) -> torch.Tensor:
mask = torch.unsqueeze(mask, dim=-1)
prior = torch.clamp(prior, min=1e-05, max=1 - 1e-05)
log_likelihoods_watermarked = torch.log(torch.clamp(likelihoods_watermarked, min=1e-30, max=float('inf')))
log_likelihoods_unwatermarked = torch.log(torch.clamp(likelihoods_unwatermarked, min=1e-30, max=float('inf')))
log_odds = log_likelihoods_watermarked - log_likelihoods_unwatermarked
relative_surprisal_likelihood = torch.einsum('i...->i', log_odds * mask)
relative_surprisal_prior = torch.log(prior) - torch.log(1 - prior)
relative_surprisal = relative_surprisal_prior + relative_surprisal_likelihood
return torch.sigmoid(relative_surprisal)
|
Compute posterior P(w|g) given likelihoods, mask and prior.
Args:
likelihoods_watermarked (`torch.Tensor` of shape `(batch, length, depth)`):
Likelihoods P(g_values|watermarked) of g-values under watermarked model.
likelihoods_unwatermarked (`torch.Tensor` of shape `(batch, length, depth)`):
Likelihoods P(g_values|unwatermarked) of g-values under unwatermarked model.
mask (`torch.Tensor` of shape `(batch, length)`):
A binary array indicating which g-values should be used. g-values with mask value 0 are discarded.
prior (`float`):
the prior probability P(w) that the text is watermarked.
Returns:
Posterior probability P(watermarked|g_values), shape [batch].
|
github-repos
|
def switch_to_frame(self, frame_reference=None):
if ((frame_reference is not None) and (type(frame_reference) not in [int, WebElement])):
raise TypeError('Type of frame_reference must be None or int or WebElement')
self._execute(Command.SWITCH_TO_FRAME, {'id': frame_reference})
|
Switches focus to the specified frame, by index, name, or webelement.
Support:
Web(WebView)
Args:
frame_reference(None|int|WebElement):
The identifier of the frame to switch to.
None means to set to the default context.
An integer representing the index.
A webelement means that is an (i)frame to switch to.
Otherwise throw an error.
Returns:
WebDriver Object.
|
codesearchnet
|
def args(self, args):
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
|
Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
|
juraj-google-style
|
def __init__(self, nw_ttl=None):
super().__init__(action_type=ActionType.OFPAT_SET_NW_TTL, length=8)
self.nw_ttl = nw_ttl
|
Create an ActionSetNWTTL with the optional parameters below.
Args:
nw_ttl (int): the TTL address to set in the IP header.
|
juraj-google-style
|
def class_label_top(body_output, targets, model_hparams, vocab_size):
del targets
with tf.variable_scope("class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
res = tf.layers.dense(x, vocab_size)
return tf.expand_dims(res, 3)
|
Transform inputs from model space to target space.
Average over inner dims and a linear layer to logits.
Args:
body_output: A Tensor with shape [batch, ?, ?, body_output_size].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]
|
juraj-google-style
|
def usb(self, state):
state_lookup = {'off': 0, 'on': 1, 'auto': 2}
state = state.lower()
if (state in state_lookup):
current_state = self.mon.GetUsbPassthrough()
while (current_state != state_lookup[state]):
self.mon.SetUsbPassthrough(state_lookup[state])
time.sleep(1)
current_state = self.mon.GetUsbPassthrough()
return True
return False
|
Sets the monsoon's USB passthrough mode. This is specific to the
USB port in front of the monsoon box which connects to the powered
device, NOT the USB that is used to talk to the monsoon itself.
"Off" means USB always off.
"On" means USB always on.
"Auto" means USB is automatically turned off when sampling is going on,
and turned back on when sampling finishes.
Args:
stats: The state to set the USB passthrough to.
Returns:
True if the state is legal and set. False otherwise.
|
codesearchnet
|
def create_option(name, ty, docstring, default_factory=lambda: None):
def get_fn(option):
if name not in option._options:
option._options[name] = default_factory()
return option._options.get(name)
def set_fn(option, value):
if not isinstance(value, ty):
raise TypeError('Property "{}" must be of type {}, got: {} (type: {})'.format(name, ty, value, type(value)))
option._options[name] = value
return property(get_fn, set_fn, None, docstring)
|
Creates a type-checked property.
Args:
name: The name to use.
ty: The type to use. The type of the property will be validated when it
is set.
docstring: The docstring to use.
default_factory: A callable that takes no arguments and returns a default
value to use if not set.
Returns:
A type-checked property.
|
github-repos
|
def scatter_add(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')
return self._lazy_read(gen_resource_variable_ops.resource_scatter_add(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
|
Adds `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
|
github-repos
|
def _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents):
with _TRACE_COUNT_CONSISTENCY_LOCK:
_TRACE_COUNT[op_name] = _TRACE_COUNT.get(op_name, 0) + 1
special_case = _SPECIAL_CASES.get(op_name, None)
if special_case is not None:
return special_case(attr_tuple, inputs, outputs, tangents)
if not outputs:
return []
with forwardprop_util.push_forwardprop_state():
trainable_inputs = []
trainable_indices = []
nontrivial_tangents = []
for input_index, tensor in enumerate(inputs):
if backprop_util.IsTrainable(tensor):
trainable_inputs.append(tensor)
trainable_indices.append(input_index)
nontrivial_tangents.append(tangents[input_index])
with backprop.GradientTape() as transpose_tape:
with backprop.GradientTape() as backfunc_tape:
backfunc_tape.watch(trainable_inputs)
execute.record_gradient(op_name, inputs, attr_tuple, outputs)
forwardprop_aids = []
trainable_outputs = []
nontrivial_output_indices = []
for output_index, output in enumerate(outputs):
if backprop_util.IsTrainable(output):
forwardprop_aids.append(array_ops.ones_like(output, name='unused_forwardprop_aid'))
trainable_outputs.append(output)
nontrivial_output_indices.append(output_index)
transpose_tape.watch(forwardprop_aids)
grads = backfunc_tape.gradient(trainable_outputs, trainable_inputs, forwardprop_aids, unconnected_gradients=UnconnectedGradients.ZERO)
nontrivial_output_tangents = transpose_tape.gradient(grads, forwardprop_aids, output_gradients=nontrivial_tangents)
output_tangents = [None] * len(outputs)
for index, tangent in zip(nontrivial_output_indices, nontrivial_output_tangents):
output_tangents[index] = tangent
return output_tangents
|
Computes a Jacobian-vector product for an op.
Note that this function would be wasteful if executed eagerly. It runs the
backward gradient function and throws away the result just to record its
operations on a GradientTape. These unused ops are pruned away when this
function is traced.
Args:
op_name: A string, the type of operation being executed.
attr_tuple: Attributes of the operation.
inputs: A flat list of input Tensors to the operation.
outputs: A flat list of output Tensors from the operation.
tangents: A flat list of Tensors, same shape as `inputs`.
Returns:
A flat list of tangents corresponding to `outputs`.
|
github-repos
|
def MergeOrAddUser(self, kb_user):
user = self.GetUser(sid=kb_user.sid, uid=kb_user.uid, username=kb_user.username)
new_attrs = []
merge_conflicts = []
if (not user):
new_attrs = self._CreateNewUser(kb_user)
else:
for (key, val) in iteritems(kb_user.AsDict()):
if (user.Get(key) and (user.Get(key) != val)):
merge_conflicts.append((key, user.Get(key), val))
user.Set(key, val)
new_attrs.append(('users.%s' % key))
return (new_attrs, merge_conflicts)
|
Merge a user into existing users or add new if it doesn't exist.
Args:
kb_user: A User rdfvalue.
Returns:
A list of strings with the set attribute names, e.g. ["users.sid"]
|
codesearchnet
|
def ToType(item, allow_constants=False, allow_functions=False, allow_singletons=False):
if isinstance(item, Type):
return item
elif isinstance(item, Module):
return item
elif isinstance(item, (ParamSpecArgs, ParamSpecKwargs)):
return item
elif isinstance(item, Class):
return ClassType(item.name, item)
elif isinstance(item, Function) and allow_functions:
return item
elif isinstance(item, Constant):
if allow_singletons and item.name in SINGLETON_TYPES:
return item.type
elif item.type.name == 'builtins.type':
if isinstance(item.type, GenericType):
return item.type.parameters[0]
else:
return AnythingType()
elif isinstance(item.type, AnythingType) or item.name == 'typing_extensions.TypedDict':
return AnythingType()
elif allow_constants:
return item
elif isinstance(item, Alias):
return item.type
raise NotImplementedError(f"Can't convert {type(item)}: {item}")
|
Convert a pytd AST item into a type.
Takes an AST item representing the definition of a type and returns an item
representing a reference to the type. For example, if the item is a
pytd.Class, this method will return a pytd.ClassType whose cls attribute
points to the class.
Args:
item: A pytd.Node item.
allow_constants: When True, constants that cannot be converted to types will
be passed through unchanged.
allow_functions: When True, functions that cannot be converted to types will
be passed through unchanged.
allow_singletons: When True, singletons that act as their types in
annotations will return that type.
Returns:
A pytd.Type object representing the type of an instance of `item`.
|
github-repos
|
def find_bucket(self, bucketing_id, parent_id, traffic_allocations):
bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)
bucketing_number = self._generate_bucket_value(bucketing_key)
self.config.logger.debug('Assigned bucket %s to user with bucketing ID "%s".' % (
bucketing_number,
bucketing_id
))
for traffic_allocation in traffic_allocations:
current_end_of_range = traffic_allocation.get('endOfRange')
if bucketing_number < current_end_of_range:
return traffic_allocation.get('entityId')
return None
|
Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation.
|
juraj-google-style
|
async def get_next_match(self):
if (self._final_rank is not None):
return None
matches = (await self.get_matches(MatchState.open_))
if (len(matches) == 0):
matches = (await self.get_matches(MatchState.pending))
if (len(matches) > 0):
return matches[0]
return None
|
Return the first open match found, or if none, the first pending match found
|methcoro|
Raises:
APIException
|
codesearchnet
|
def pack(self, value=None):
if isinstance(value, type(self)):
return value.pack()
if (value is None):
value = self.value
elif ('value' in dir(value)):
value = value.value
try:
return struct.pack(self._fmt, value)
except struct.error:
expected_type = type(self).__name__
actual_type = type(value).__name__
msg_args = (expected_type, value, actual_type)
msg = 'Expected {}, found value "{}" of type {}'.format(*msg_args)
raise PackException(msg)
|
r"""Pack the value as a binary representation.
Considering an example with UBInt8 class, that inherits from
GenericType:
>>> from pyof.foundation.basic_types import UBInt8
>>> objectA = UBInt8(1)
>>> objectB = 5
>>> objectA.pack()
b'\x01'
>>> objectA.pack(objectB)
b'\x05'
Args:
value: If the value is None, then we will pack the value of the
current instance. Otherwise, if value is an instance of the
same type as the current instance, then we call the pack of the
value object. Otherwise, we will use the current instance pack
method on the passed value.
Returns:
bytes: The binary representation.
Raises:
:exc:`~.exceptions.BadValueException`: If the value does not
fit the binary format.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.