code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def init(deb1, deb2=False):
global DEBUG
global DEBUGALL
DEBUG = deb1
DEBUGALL = deb2
|
Initialize DEBUG and DEBUGALL.
Allows other modules to set DEBUG and DEBUGALL, so their
call to dprint or dprintx generate output.
Args:
deb1 (bool): value of DEBUG to set
deb2 (bool): optional - value of DEBUGALL to set,
defaults to False.
|
codesearchnet
|
def round(x):
return math_ops.round(x)
|
Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Args:
x: Tensor or variable.
Returns:
A tensor.
|
github-repos
|
def GetPluginObjects(cls, plugin_names):
plugin_objects = {}
for (plugin_name, plugin_class) in iter(cls._plugin_classes.items()):
if (plugin_name not in plugin_names):
continue
plugin_objects[plugin_name] = plugin_class()
return plugin_objects
|
Retrieves the plugin objects.
Args:
plugin_names (list[str]): names of plugins that should be retrieved.
Returns:
dict[str, AnalysisPlugin]: analysis plugins per name.
|
codesearchnet
|
def from_dict(event_dict):
return SnippetEvent(
callback_id=event_dict['callbackId'],
name=event_dict['name'],
creation_time=event_dict['time'],
data=event_dict['data'])
|
Create a SnippetEvent object from a dictionary.
Args:
event_dict: a dictionary representing an event.
Returns:
A SnippetEvent object.
|
juraj-google-style
|
def _ReadSequenceDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):
if is_member:
supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE
else:
supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE
return self._ReadElementSequenceDataTypeDefinition(definitions_registry, definition_values, data_types.SequenceDefinition, definition_name, supported_definition_values)
|
Reads a sequence data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
SequenceDefinition: sequence data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
|
codesearchnet
|
def __init__(self, conf):
if not isinstance(conf, dict):
raise RuntimeError('Source constructor not passed a dictionary')
self.conf = conf
self.log = logging.getLogger(__name__)
|
Initialise the Source object.
Args:
conf: A dictionary of key/value pairs.
Raises:
RuntimeError: object wasn't initialised with a dict
|
github-repos
|
def CreateCustomizerFeed(client, feed_name):
ad_customizer_feed_service = client.GetService('AdCustomizerFeedService',
'v201809')
customizer_feed = {
'feedName': feed_name,
'feedAttributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = ad_customizer_feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['feedId'],
'nameId': feed['feedAttributes'][0]['id'],
'priceId': feed['feedAttributes'][1]['id'],
'dateId': feed['feedAttributes'][2]['id']
}
print ('Feed with name "%s" and ID %s was added with:\n'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
return feed
else:
raise errors.GoogleAdsError('No feeds were added')
|
Creates a new AdCustomizerFeed.
Args:
client: an AdWordsClient instance.
feed_name: the name for the new AdCustomizerFeed.
Returns:
The new AdCustomizerFeed.
|
juraj-google-style
|
def _merge_doc(original, to_merge):
if (not original):
return (to_merge or '')
if (not to_merge):
return (original or '')
sections = []
for name in ('usage', 'arguments', 'options'):
sections.append(_merge_section(_get_section(name, original), _get_section(name, to_merge)))
return format_usage('\n\n'.join((s for s in sections)).rstrip())
|
Merge two usage strings together.
Args:
original: The source of headers and initial section lines.
to_merge: The source for the additional section lines to append.
Returns:
A new usage string that contains information from both usage strings.
|
codesearchnet
|
def check_file(path: str) -> str:
if os.path.isfile(path):
return path
else:
raise argparse.ArgumentTypeError(f"'{path}' is not found.")
|
Check if a given filepath exists or not.
Args:
path (str): Model path
Raises:
FileNotFoundError: Raise if given path does not exist.
Returns:
str: A model path.
|
github-repos
|
def get_text_features(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor:
text_features = self.groupvit.get_text_features(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
return text_features
|
Returns:
text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
the projection layer to the pooled output of [`TFGroupViTTextModel`].
Examples:
```python
>>> from transformers import CLIPTokenizer, TFGroupViTModel
>>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
>>> text_features = model.get_text_features(**inputs)
```
|
github-repos
|
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params)
|
Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.
Args:
request: (BigqueryTablesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
|
github-repos
|
def email(self, name, to, from_addr, subject, body, header, owner=None, **kwargs):
return Email(self.tcex, name, to, from_addr, subject, body, header, owner=owner, **kwargs)
|
Create the Email TI object.
Args:
owner:
to:
from_addr:
name:
subject:
header:
body:
**kwargs:
Return:
|
codesearchnet
|
def write_input(self, output_dir=".", make_dir_if_not_present=True):
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.items():
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
|
Write VASP input to a directory.
Args:
output_dir (str): Directory to write to. Defaults to current
directory (".").
make_dir_if_not_present (bool): Create the directory if not
present. Defaults to True.
|
juraj-google-style
|
def validate_items(self):
logger.debug(fmt('Validating {}', self))
from python_jsonschema_objects import classbuilder
if (self.__itemtype__ is None):
return
type_checks = self.__itemtype__
if (not isinstance(type_checks, (tuple, list))):
type_checks = ([type_checks] * len(self.data))
elif (len(type_checks) > len(self.data)):
raise ValidationError('{1} does not have sufficient elements to validate against {0}'.format(self.__itemtype__, self.data))
typed_elems = []
for (elem, typ) in zip(self.data, type_checks):
if isinstance(typ, dict):
for (param, paramval) in six.iteritems(typ):
validator = registry(param)
if (validator is not None):
validator(paramval, elem, typ)
typed_elems.append(elem)
elif util.safe_issubclass(typ, classbuilder.LiteralValue):
val = typ(elem)
val.validate()
typed_elems.append(val)
elif util.safe_issubclass(typ, classbuilder.ProtocolBase):
if (not isinstance(elem, typ)):
try:
if isinstance(elem, (six.string_types, six.integer_types, float)):
val = typ(elem)
else:
val = typ(**util.coerce_for_expansion(elem))
except TypeError as e:
raise ValidationError("'{0}' is not a valid value for '{1}': {2}".format(elem, typ, e))
else:
val = elem
val.validate()
typed_elems.append(val)
elif util.safe_issubclass(typ, ArrayWrapper):
val = typ(elem)
val.validate()
typed_elems.append(val)
elif isinstance(typ, (classbuilder.TypeProxy, classbuilder.TypeRef)):
try:
if isinstance(elem, (six.string_types, six.integer_types, float)):
val = typ(elem)
else:
val = typ(**util.coerce_for_expansion(elem))
except TypeError as e:
raise ValidationError("'{0}' is not a valid value for '{1}': {2}".format(elem, typ, e))
else:
val.validate()
typed_elems.append(val)
self._dirty = False
self._typed = typed_elems
return typed_elems
|
Validates the items in the backing array, including
performing type validation.
Sets the _typed property and clears the dirty flag as a side effect
Returns:
The typed array
|
codesearchnet
|
def remove_user_from_template(self, template_id, account_id=None, email_address=None):
return self._add_remove_user_template(self.TEMPLATE_REMOVE_USER_URL, template_id, account_id, email_address)
|
Removes the specified Account's access to the specified Template
Args:
template_id (str): The id of the template to remove the account's access from.
account_id (str): The id of the account to remove access from the template. The account id prevails if both account_id and email_address are provided.
email_address (str): The email address of the account to remove access from.
Returns:
An Template object
|
codesearchnet
|
def distinct_values_of(self, field, count_deleted=False):
solr_params = "facet=true&facet.field=%s&rows=0" % field
result = self.riak_http_search_query(self.index_name, solr_params, count_deleted)
facet_fields = result['facet_counts']['facet_fields'][field]
keys = facet_fields[0::2]
vals = facet_fields[1::2]
return dict(zip(keys, vals))
|
Uses riak http search query endpoint for advanced SOLR queries.
Args:
field (str): facet field
count_deleted (bool): ignore deleted or not
Returns:
(dict): pairs of field values and number of counts
|
juraj-google-style
|
def get_linenumbers(functions, module, searchstr='def {}(image):\n'):
lines = inspect.getsourcelines(module)[0]
line_numbers = {}
for function in functions:
try:
line_numbers[function] = (lines.index(searchstr.format(function)) + 1)
except ValueError:
print('Can not find `{}`'.format(searchstr.format(function)))
line_numbers[function] = 0
return line_numbers
|
Returns a dictionary which maps function names to line numbers.
Args:
functions: a list of function names
module: the module to look the functions up
searchstr: the string to search for
Returns:
A dictionary with functions as keys and their line numbers as values.
|
codesearchnet
|
def __init__(
self, location=None, parent=None, volume_index=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(APFSContainerPathSpec, self).__init__(parent=parent, **kwargs)
self.location = location
self.volume_index = volume_index
|
Initializes a path specification.
Note that an APFS container path specification must have a parent.
Args:
location (Optional[str]): location.
parent (Optional[PathSpec]): parent path specification.
volume_index (Optional[int]): index of the volume within the container.
Raises:
ValueError: when parent is not set.
|
juraj-google-style
|
def extend(self, trajectory):
if (self.time_step != trajectory.time_step):
raise ValueError('Trajectory not extended: Time steps of trajectories is incompatible')
if ((len(self.species) != len(trajectory.species)) and (self.species != trajectory.species)):
raise ValueError('Trajectory not extended: species in trajectory do not match')
self.to_positions()
trajectory.to_positions()
self.frac_coords = np.concatenate((self.frac_coords, trajectory.frac_coords), axis=0)
(self.lattice, self.constant_lattice) = self._combine_attribute(self.lattice, trajectory.lattice, self.frac_coords.shape[0], trajectory.frac_coords.shape[0])
self.site_properties = self._combine_attribute(self.site_properties, trajectory.site_properties, self.frac_coords.shape[0], trajectory.frac_coords.shape[0])
|
Concatenate another trajectory
Args:
trajectory (Trajectory): Trajectory to add
|
codesearchnet
|
def clone(self, name=None):
if (name is None):
name = (self.module_name + '_clone')
return MLP(name=name, output_sizes=self.output_sizes, activation=self.activation, activate_final=self.activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)
|
Creates a new MLP with the same structure.
Args:
name: Optional string specifying the name of the new module. The default
name is constructed by appending "_clone" to the original name.
Returns:
A cloned `MLP` module.
|
codesearchnet
|
def getOSName(self):
_system = platform.system()
if (_system in [self.__class__.OS_WINDOWS, self.__class__.OS_MAC, self.__class__.OS_LINUX]):
if (_system == self.__class__.OS_LINUX):
_dist = platform.linux_distribution()[0]
if (_dist.lower() == self.__class__.OS_UBUNTU.lower()):
return self.__class__.OS_UBUNTU
elif (_dist.lower() == self.__class__.OS_DEBIAN.lower()):
return self.__class__.OS_DEBIAN
elif (_dist.lower() == self.__class__.OS_CENTOS.lower()):
return self.__class__.OS_CENTOS
elif (_dist.lower() == self.__class__.OS_REDHAT.lower()):
return self.__class__.OS_REDHAT
elif (_dist.lower() == self.__class__.OS_KALI.lower()):
return self.__class__.OS_KALI
return _system
else:
return None
|
Get the OS name. If OS is linux, returns the Linux distribution name
Returns:
str: OS name
|
codesearchnet
|
def _prefix_exists_in_gcs(gcs_prefix, credentials=None):
gcs_service = _get_storage_service(credentials)
(bucket_name, prefix) = gcs_prefix[len('gs:
request = gcs_service.objects().list(bucket=bucket_name, prefix=prefix, maxResults=1)
response = request.execute()
return response.get('items', None)
|
Check whether there is a GCS object whose name starts with the prefix.
Since GCS doesn't actually have folders, this is how we check instead.
Args:
gcs_prefix: The path; should start with 'gs://'.
credentials: Optional credential to be used to load the file from gcs.
Returns:
True if the prefix matches at least one object in GCS.
Raises:
errors.HttpError: if it can't talk to the server
|
codesearchnet
|
def with_extrapolation(points, noise, n_points):
n_points = 10
return kalman_filter((extrapolate_points(points, n_points) + points), noise)[n_points:]
|
Smooths a set of points, but it extrapolates some points at the beginning
Args:
points (:obj:`list` of :obj:`Point`)
noise (float): Expected noise, the higher it is the more the path will
be smoothed.
Returns:
:obj:`list` of :obj:`Point`
|
codesearchnet
|
def _produce_request(self, node_id, acks, timeout, batches):
produce_records_by_partition = collections.defaultdict(dict)
for batch in batches:
topic = batch.topic_partition.topic
partition = batch.topic_partition.partition
buf = batch.records.buffer()
produce_records_by_partition[topic][partition] = buf
kwargs = {}
if (self.config['api_version'] >= (0, 11)):
version = 3
kwargs = dict(transactional_id=None)
elif (self.config['api_version'] >= (0, 10)):
version = 2
elif (self.config['api_version'] == (0, 9)):
version = 1
else:
version = 0
return ProduceRequest[version](required_acks=acks, timeout=timeout, topics=[(topic, list(partition_info.items())) for (topic, partition_info) in six.iteritems(produce_records_by_partition)], **kwargs)
|
Create a produce request from the given record batches.
Returns:
ProduceRequest (version depends on api_version)
|
codesearchnet
|
def _apply_conv(self, inputs, w):
outputs = tf.nn.convolution(inputs, w, strides=self._stride,
padding=self._conv_op_padding,
dilation_rate=self._rate,
data_format=self._data_format)
return outputs
|
Apply a convolution operation on `inputs` using variable `w`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
w: A weight matrix of the same type as `inputs`.
Returns:
outputs: The result of the convolution operation on `inputs`.
|
juraj-google-style
|
def install(path, capture_error=False):
cmd = '%s -m pip install -U . ' % _process.python_executable()
if has_requirements(path):
cmd += '-r requirements.txt'
logger.info('Installing module with the following command:\n%s', cmd)
_process.check_error(shlex.split(cmd), _errors.InstallModuleError, cwd=path, capture_error=capture_error)
|
Install a Python module in the executing Python environment.
Args:
path (str): Real path location of the Python module.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
|
juraj-google-style
|
def indicator_arrays(tc_entity_array):
type_dict = {}
for ea in tc_entity_array:
type_dict.setdefault(ea['type'], []).append(ea['value'])
return type_dict
|
Convert TCEntityArray to Indicator Type dictionary.
Args:
tc_entity_array (dictionary): The TCEntityArray to convert.
Returns:
(dictionary): Dictionary containing arrays of indicators for each indicator type.
|
codesearchnet
|
def _update_token(self, request):
self._source_credentials.refresh(request)
body = {'delegates': self._delegates, 'scope': self._target_scopes, 'lifetime': (str(self._lifetime) + 's')}
headers = {'Content-Type': 'application/json'}
self._source_credentials.apply(headers)
(self.token, self.expiry) = _make_iam_token_request(request=request, principal=self._target_principal, headers=headers, body=body)
|
Updates credentials with a new access_token representing
the impersonated account.
Args:
request (google.auth.transport.requests.Request): Request object
to use for refreshing credentials.
|
codesearchnet
|
def register(self, message, host):
cuuid = message['cuuid']
if (len(self.registry) > self.registration_limit):
logger.warning(('<%s> Registration limit exceeded' % cuuid))
response = serialize_data({'method': 'BYE REGISTER'}, self.compression, encryption=False)
return response
data = {'host': host[0], 'port': host[1], 'time': datetime.now()}
return_msg = {'method': 'OK REGISTER'}
if (('encryption' in message) and self.encryption):
data['encryption'] = PublicKey(message['encryption'][0], message['encryption'][1])
self.encrypted_hosts[host] = cuuid
return_msg['encryption'] = [self.encryption.n, self.encryption.e]
if (cuuid in self.registry):
for key in data:
self.registry[cuuid][key] = data[key]
else:
self.registry[cuuid] = data
self.registry[cuuid]['authenticated'] = False
response = serialize_data(return_msg, self.compression, encryption=False)
logger.debug(('<%s> Registry entries:' % cuuid))
for (key, value) in self.registry.items():
logger.debug(('<%s> %s %s' % (str(cuuid), str(key), pformat(value))))
return response
|
This function will register a particular client in the server's
registry dictionary.
Any clients that are registered will be able to send and recieve events
to and from the server.
Args:
message (dict): The client message from the client who wants to
register.
host (tuple): The (address, port) tuple of the client that is
registering.
Returns:
A server response with an "OK REGISTER" if the registration was
successful or a "BYE REGISTER" if unsuccessful.
|
codesearchnet
|
def __init__(self, parent, *args, **kwargs):
self.parent = parent
super().__init__(*args, **kwargs)
|
A simple landing view, template may be overwriten to customize.
Args:
parent (Group): ``Group`` host of ``self``.
|
juraj-google-style
|
def image(cam):
(yield marv.set_header(title=cam.topic))
msg = (yield marv.pull(cam))
if (msg is None):
return
pytype = get_message_type(cam)
rosmsg = pytype()
rosmsg.deserialize(msg.data)
name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:])
imgfile = (yield marv.make_file(name))
img = imgmsg_to_cv2(rosmsg, 'rgb8')
cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60))
(yield marv.push(imgfile))
|
Extract first image of input stream to jpg file.
Args:
cam: Input stream of raw rosbag messages.
Returns:
File instance for first image of input stream.
|
codesearchnet
|
def report_progress(stream=None):
if (stream is None):
stream = sys.stderr
for reporter in _reporters:
reporter(stream)
|
Report progress from any currently installed reporters.
Args:
stream: The text stream (default: sys.stderr) to which
progress will be reported.
|
codesearchnet
|
def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
max_rel_dist = int(2 * max(q_size, k_size) - 1)
rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
|
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int):
size of the query.
k_size (int):
size of key k.
rel_pos (`torch.Tensor`):
relative position embeddings (L, channel).
Returns:
Extracted positional embeddings according to relative positions.
|
github-repos
|
def rotate(self, matrix, tol=0.001):
matrix = SquareTensor(matrix)
if (not matrix.is_rotation(tol)):
raise ValueError('Rotation matrix is not valid.')
sop = SymmOp.from_rotation_and_translation(matrix, [0.0, 0.0, 0.0])
return self.transform(sop)
|
Applies a rotation directly, and tests input matrix to ensure a valid
rotation.
Args:
matrix (3x3 array-like): rotation matrix to be applied to tensor
tol (float): tolerance for testing rotation matrix validity
|
codesearchnet
|
def parse_GDS_columns(lines, subsets):
data = []
index = []
for line in lines:
line = line.rstrip()
if line.startswith('
tmp = __parse_entry(line)
data.append(tmp[1])
index.append(tmp[0])
df = DataFrame(data, index=index, columns=['description'])
subset_ids = defaultdict(dict)
for (subsetname, subset) in iteritems(subsets):
for expid in subset.metadata['sample_id'][0].split(','):
try:
subset_type = subset.get_type()
subset_ids[subset_type][expid] = subset.metadata['description'][0]
except Exception as err:
logger.error(('Error processing subsets: %s for subset %s' % (subset.get_type(), subsetname)))
return df.join(DataFrame(subset_ids))
|
Parse list of line with columns description from SOFT file of GDS.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
subsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use.
Returns:
:obj:`pandas.DataFrame`: Columns description.
|
codesearchnet
|
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_buffer = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(local_buffer, kmip_version=kmip_version)
self.length = local_buffer.length()
super(GetAttributeListRequestPayload, self).write(output_buffer, kmip_version=kmip_version)
output_buffer.write(local_buffer.buffer)
|
Write the data encoding the GetAttributeList request payload to a
stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
|
codesearchnet
|
def set_encapsulation(self, name, vid, default=False, disable=False):
if ('.' not in name):
raise NotImplementedError('parameter encapsulation can only be set on subinterfaces')
if (name[0:2] not in ['Et', 'Po']):
raise NotImplementedError('parameter encapsulation can only be set on Ethernet and Port-Channel subinterfaces')
commands = [('interface %s' % name)]
commands.append(self.command_builder('encapsulation dot1q vlan', str(vid), default=default, disable=disable))
return self.configure(commands)
|
Configures the subinterface encapsulation value
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
vid (int): The vlan id number
default (boolean): Specifies to default the subinterface
encapsulation
disable (boolean): Specifies to disable the subinterface
encapsulation
Returns:
True if the operation succeeds otherwise False is returned
|
codesearchnet
|
def frag2text(endpoint, stype, selector, clean=False, raw=False, verbose=False):
try:
return main(endpoint, stype, selector, clean, raw, verbose)
except StandardError as err:
return err
|
returns Markdown text of selected fragment.
Args:
endpoint: URL, file, or HTML string
stype: { 'css' | 'xpath' }
selector: CSS selector or XPath expression
Returns:
Markdown text
Options:
clean: cleans fragment (lxml.html.clean defaults)
raw: returns raw HTML fragment
verbose: show http status, encoding, headers
|
codesearchnet
|
def indexed_slices_intersection_indices_and_values(x1, x2):
dim_0 = x1.dense_shape[0]
x1_indices_expanded = tf.expand_dims(x1.indices, axis=1)
x2_indices_expanded = tf.expand_dims(x2.indices, axis=1)
x1_indices_count = x1_indices_expanded.shape[0]
x2_indices_count = x2_indices_expanded.shape[0]
x1_indices_one_hot = tf.scatter_nd(x1_indices_expanded, ones_bool((x1_indices_count,)), (dim_0,))
x2_indices_one_hot = tf.scatter_nd(x2_indices_expanded, ones_bool((x2_indices_count,)), (dim_0,))
intersection_indices = tf.squeeze(tf.where(tf.math.logical_and(x1_indices_one_hot, x2_indices_one_hot)), axis=-1)
intersection_indices_count = tf.shape(intersection_indices)[0]
def empty_intersection():
return (intersection_indices, tf.zeros((0,) + x1.values.shape[1:], x1.dtype), tf.zeros((0,) + x2.values.shape[1:], x2.dtype))
def non_empty_intersection():
def values_for_intersection(indices_expanded, indices_count, values):
indices_indices = tf.scatter_nd(indices_expanded, tf.range(indices_count), (dim_0,))
to_intersection_indices = tf.gather(indices_indices, intersection_indices)
return tf.gather(values, to_intersection_indices)
x1_values_for_intersection = tf.cond(tf.equal(x1_indices_count, intersection_indices_count), lambda: x1.values, lambda: values_for_intersection(x1_indices_expanded, x1_indices_count, x1.values))
x2_values_for_intersection = tf.cond(tf.equal(x2_indices_count, intersection_indices_count), lambda: x2.values, lambda: values_for_intersection(x2_indices_expanded, x2_indices_count, x2.values))
return (intersection_indices, x1_values_for_intersection, x2_values_for_intersection)
return tf.cond(tf.equal(intersection_indices_count, 0), empty_intersection, non_empty_intersection)
|
Compute the indices for the intersection of two `tf.IndexedSlices` and
modify the values for these indices.
Args:
x1: the first `tf.IndexedSlices`.
x2: the second `tf.IndexedSlices`.
Returns: A tuple containing:
- the indices for the intersection
- `x1` values for the intersection indices (some values were removed)
- `x2` values for the intersection indices (some values were removed)
|
github-repos
|
def start_task(self, task_type_str, current_task_index=None):
assert (task_type_str in self._task_dict), 'Task type has not been started yet: {}'.format(task_type_str)
if (current_task_index is not None):
self._task_dict[task_type_str]['task_idx'] = current_task_index
else:
self._task_dict[task_type_str]['task_idx'] += 1
self._log_progress_if_interval_elapsed()
|
Call when processing is about to start on a single task of the given task
type, typically at the top inside of the loop that processes the tasks.
Args:
task_type_str (str):
The name of the task, used as a dict key and printed in the progress
updates.
current_task_index (int):
If the task processing loop may skip or repeat tasks, the index of the
current task must be provided here. This parameter can normally be left
unset.
|
codesearchnet
|
def _GetImportTimestamps(self, pefile_object):
import_timestamps = []
if (not hasattr(pefile_object, 'DIRECTORY_ENTRY_IMPORT')):
return import_timestamps
for importdata in pefile_object.DIRECTORY_ENTRY_IMPORT:
dll_name = getattr(importdata, 'dll', '')
try:
dll_name = dll_name.decode('ascii')
except UnicodeDecodeError:
dll_name = dll_name.decode('ascii', errors='replace')
if (not dll_name):
dll_name = '<NO DLL NAME>'
timestamp = getattr(importdata.struct, 'TimeDateStamp', 0)
if timestamp:
import_timestamps.append([dll_name, timestamp])
return import_timestamps
|
Retrieves timestamps from the import directory, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[int]: import timestamps.
|
codesearchnet
|
def check_grads(self, f, args, atol=None, rtol=None, delta=None):
if delta is None:
dtype = np_utils.result_type(*args)
epsilon = onp.finfo(dtype).eps
delta = epsilon ** (1.0 / 3.0)
theoretical, numerical = gradient_checker_v2.compute_gradient(to_tf_fn(f), args, delta=delta)
self.assertAllClose(theoretical, numerical, check_dtypes=False, atol=atol, rtol=rtol)
|
Check gradients against finite differences.
Args:
f: function to check at ``f(*args)``.
args: a list or tuple of argument values.
atol: absolute tolerance for gradient equality.
rtol: relative tolerance for gradient equality.
delta: step size used for finite differences.
|
github-repos
|
def add_user_to_template(self, template_id, account_id=None, email_address=None):
return self._add_remove_user_template(self.TEMPLATE_ADD_USER_URL, template_id, account_id, email_address)
|
Gives the specified Account access to the specified Template
Args:
template_id (str): The id of the template to give the account access to
account_id (str): The id of the account to give access to the template. The account id prevails if both account_id and email_address are provided.
email_address (str): The email address of the account to give access to.
Returns:
A Template object
|
codesearchnet
|
def AddCustomJsonEnumMapping(enum_type, python_name, json_name, package=None):
if (not issubclass(enum_type, messages.Enum)):
raise exceptions.TypecheckError(('Cannot set JSON enum mapping for non-enum "%s"' % enum_type))
if (python_name not in enum_type.names()):
raise exceptions.InvalidDataError(('Enum value %s not a value for type %s' % (python_name, enum_type)))
field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_type, {})
_CheckForExistingMappings('enum', enum_type, python_name, json_name)
field_mappings[python_name] = json_name
|
Add a custom wire encoding for a given enum value.
This is primarily used in generated code, to handle enum values
which happen to be Python keywords.
Args:
enum_type: (messages.Enum) An enum type
python_name: (basestring) Python name for this value.
json_name: (basestring) JSON name to be used on the wire.
package: (NoneType, optional) No effect, exists for legacy compatibility.
|
codesearchnet
|
def discard_observer(self, observer):
discarded = False
key = self.make_key(observer)
if (key in self.observers):
del self.observers[key]
discarded = True
return discarded
|
Un-register an observer.
Args:
observer: The observer to un-register.
Returns true if an observer was removed, otherwise False.
|
codesearchnet
|
def persist(id_obj, filename=None, suffix=None):
if (suffix is None):
suffix = '.pickle'
if hasattr(id_obj, 'id'):
ident = id_obj.id
else:
ident = str(id(id_obj))
if (filename is None):
filename = '{obj_id}{suffix}'.format(obj_id=ident, suffix=suffix)
with open(filename, 'wb') as obj_file:
dill.dump(id_obj, obj_file)
return os.path.abspath(filename)
|
Persist an object in the filesystem.
This will generate a pickled version of the given obj in the filename path.
Objects shall provide an id() method to be able to use this persistence API.
If not, we will use the id() builtin of python to generate an identifier
for you.
The file will be created, if it does not exist.
If the file already exists, we will overwrite it.
Args:
id_obj (Any): An identifiable object you want to persist in the
filesystem.
|
codesearchnet
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RemBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*, defaults to `None`):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def _join_index_objects(self, axis, other_index, how, sort=True):
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
|
Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
|
juraj-google-style
|
def type_parameter(self, unknown: _UnknownType, base_class: pytd.Class, item: pytd.TemplateItem) -> StrictType:
assert is_unknown(unknown)
name = unknown.name + '.' + base_class.name + '.' + item.type_param.name
return StrictType(name)
|
This generates the type parameter when matching against a generic type.
For example, when we match ~unknown1 against list[T], we need an additional
type to model the T in "~unknown1[T]". This type would have the name
"~unknown1.list.T".
Args:
unknown: An unknown type. This is the type that's matched against
base_class[T].
base_class: The base class of the generic we're matching the unknown
against. E.g. "list".
item: The actual type parameter. ("T" in the examples above).
Returns:
A type (pytd.Node) to represent this type parameter.
|
github-repos
|
def check(self, read_tuple_name):
parts = read_tuple_name.split("__")
if len(parts[0]) != self.prefix_width or len(parts[1]) != self.read_tuple_id_width:
return False
segments = parts[2][1:-1].split("),(")
for segment in segments:
int_widths = list(map(len, segment.split(",")))
if self.genome_id_width != int_widths[0]:
return False
if self.chr_id_width != int_widths[1]:
return False
if self.coor_width != int_widths[3] or self.coor_width != int_widths[4]:
return False
return True
|
Check if the given read tuple name satisfies this profile.
Args:
read_tuple_name (str): Read tuple name.
|
juraj-google-style
|
def plot_projectors(self, ax=None, fontsize=12, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax)
title = kwargs.pop("title", "Projectors")
ax.grid(True)
ax.set_xlabel('r [Bohr]')
ax.set_ylabel(r"$r\tilde p\, [Bohr]^{-\frac{1}{2}}$")
for state, rfunc in self.projector_functions.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
|
Plot the PAW projectors.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
|
juraj-google-style
|
def _ExtractPath(response, pathspec_attribute=None):
path_specification = response
if pathspec_attribute is not None:
if response.HasField(pathspec_attribute):
path_specification = response.Get(pathspec_attribute)
if path_specification.HasField("pathspec"):
path_specification = path_specification.pathspec
if path_specification.HasField("path"):
path_specification = path_specification.path
if isinstance(path_specification, Text):
return path_specification
return None
|
Returns the path from a client action response as a string.
Args:
response: A client action response.
pathspec_attribute: Specifies the field which stores the pathspec.
Returns:
The path as a string or None if no path is found.
|
juraj-google-style
|
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = BytearrayStream()
if self._username:
self._username.write(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Username/password credential struct missing the username.')
if self._password:
self._password.write(local_stream, kmip_version=kmip_version)
self.length = local_stream.length()
super(UsernamePasswordCredential, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer)
|
Write the data encoding the UsernamePasswordCredential struct to a
stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the username is not defined.
|
codesearchnet
|
def predict_proba(self, a, b, **kwargs):
a = scale(a).reshape((-1, 1))
b = scale(b).reshape((-1, 1))
return self.anm_score(b, a) - self.anm_score(a, b)
|
Prediction method for pairwise causal inference using the ANM model.
Args:
a (numpy.ndarray): Variable 1
b (numpy.ndarray): Variable 2
Returns:
float: Causation score (Value : 1 if a->b and -1 if b->a)
|
juraj-google-style
|
def _unescape_token(escaped_token):
def match(m):
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError):
return ""
trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
return _UNESCAPE_REGEX.sub(match, trimmed)
|
Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string
|
juraj-google-style
|
def prepare_headers(self, http_headers, soap_action):
headers = {'Content-Type': 'text/xml; charset="utf-8"'}
if soap_action is not None:
headers.update({'SOAPACTION': '"{}"'.format(soap_action)})
if http_headers is not None:
headers.update(http_headers)
return headers
|
Prepare the http headers for sending.
Add the SOAPACTION header to the others.
Args:
http_headers (dict): A dict in the form {'Header': 'Value,..}
containing http headers to use for the http request.
soap_action (str): The value of the SOAPACTION header.
Returns:
dict: headers including the SOAPACTION header.
|
juraj-google-style
|
def parse(f, encoding='utf-8'):
if hasattr(f, 'read'):
for event in _parse(f):
yield event
else:
with io.open(f, encoding=encoding) as fh:
for event in _parse(fh):
yield event
|
Parse the TDL file *f* and yield the interpreted contents.
If *f* is a filename, the file is opened and closed when the
generator has finished, otherwise *f* is an open file object and
will not be closed when the generator has finished.
Args:
f (str, file): a filename or open file object
encoding (str): the encoding of the file (default: `"utf-8"`;
ignored if *f* is an open file)
|
juraj-google-style
|
def _sym_missing(self) -> typing.Dict[Union[str, int], Any]:
missing = dict()
if self._value_spec and self._value_spec.schema:
matched_keys, _ = self._value_spec.schema.resolve(self.keys())
for key_spec, keys in matched_keys.items():
field = self._value_spec.schema[key_spec]
assert keys or isinstance(key_spec, pg_typing.NonConstKey), key_spec
if keys:
for key in keys:
v = self.sym_getattr(key)
if utils.MISSING_VALUE == v:
missing[key] = field.value.default
elif isinstance(v, base.Symbolic):
missing_child = v.sym_missing(flatten=False)
if missing_child:
missing[key] = missing_child
else:
for k, v in self.sym_items():
if isinstance(v, base.Symbolic):
missing_child = v.sym_missing(flatten=False)
if missing_child:
missing[k] = missing_child
return missing
|
Returns missing values.
Returns:
A dict of key to MISSING_VALUE.
|
github-repos
|
def set_submission_objects(form_fields):
variant_ids = get_submission_variants(form_fields)
variant_objs = get_objects_from_form(variant_ids, form_fields, 'variant')
casedata_objs = get_objects_from_form(variant_ids, form_fields, 'casedata')
return (variant_objs, casedata_objs)
|
Creates a list of submission objects (variant and case-data) from the clinvar submission form in blueprints/variants/clinvar.html.
Args:
form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER
Returns:
submission_objects(list): a list of variant and case-data submission objects, ready to be included in the clinvar database collection
|
juraj-google-style
|
def to_list(x):
if isinstance(x, list):
return x
return [x]
|
Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Args:
x: target object to be normalized.
Returns:
A list.
|
github-repos
|
def AddTimeZoneOption(self, argument_group):
argument_group.add_argument('-z', '--zone', '--timezone', dest='timezone', action='store', type=str, default=None, help='explicitly define the timezone. Typically the timezone is determined automatically where possible otherwise it will default to UTC. Use "-z list" to see a list of available timezones.')
|
Adds the time zone option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
|
codesearchnet
|
def add_deploy(state, deploy_func, *args, **kwargs):
frameinfo = get_caller_frameinfo()
kwargs['frameinfo'] = frameinfo
for host in state.inventory:
deploy_func(state, host, *args, **kwargs)
|
Prepare & add an deploy to pyinfra.state by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
deploy_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function
|
juraj-google-style
|
def _GenDiscoveryDocCallback(args, discovery_func=_GenDiscoveryDoc):
discovery_paths = discovery_func(args.service, args.output,
hostname=args.hostname,
application_path=args.application)
for discovery_path in discovery_paths:
print 'API discovery document written to %s' % discovery_path
|
Generate discovery docs to files.
Args:
args: An argparse.Namespace object to extract parameters from
discovery_func: A function that generates discovery docs and stores them to
files, accepting a list of service names, a discovery doc format, and an
output directory.
|
juraj-google-style
|
def _retry_from_retry_config(retry_params, retry_codes):
exception_classes = [_exception_class_for_grpc_status_name(code) for code in retry_codes]
return retry.Retry(retry.if_exception_type(*exception_classes), initial=(retry_params['initial_retry_delay_millis'] / _MILLIS_PER_SECOND), maximum=(retry_params['max_retry_delay_millis'] / _MILLIS_PER_SECOND), multiplier=retry_params['retry_delay_multiplier'], deadline=(retry_params['total_timeout_millis'] / _MILLIS_PER_SECOND))
|
Creates a Retry object given a gapic retry configuration.
Args:
retry_params (dict): The retry parameter values, for example::
{
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
"initial_rpc_timeout_millis": 120000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 120000,
"total_timeout_millis": 600000
}
retry_codes (sequence[str]): The list of retryable gRPC error code
names.
Returns:
google.api_core.retry.Retry: The default retry object for the method.
|
codesearchnet
|
def split(input_layer, split_dim=0, num_splits=2):
shape = input_layer.shape
_check_split_dims(num_splits, split_dim, shape)
splits = tf.split(
value=input_layer, num_or_size_splits=num_splits, axis=split_dim)
return input_layer.with_sequence(splits)
|
Splits this Tensor along the split_dim into num_splits Equal chunks.
Examples:
* `[1, 2, 3, 4] -> [1, 2], [3, 4]`
* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [2, 2]], [[3, 3], [4, 4]]`
Args:
input_layer: The chainable object, supplied.
split_dim: The dimension to split along. Defaults to batch.
num_splits: The number of splits.
Returns:
A list of PrettyTensors.
Raises:
ValueError: If split_dim is out of range or isn't divided evenly by
num_splits.
|
juraj-google-style
|
def normalize_tuple(value, n, name):
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' + str(n) + ' integers. Received: ' + str(value) + ' including element ' + str(single_value) + ' of type' + ' ' + str(type(single_value)))
return value_tuple
|
Transforms a single integer or iterable of integers into an integer tuple.
Args:
value: The value to validate and convert. Could an int, or any iterable of
ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
|
github-repos
|
def __fa_process_sequence(self, sequence, avoid, initial_state, execution_state, trace_current, next_addr):
ip = sequence.address
next_ip = None
while ip:
try:
instr = sequence.fetch(ip)
except ReilSequenceInvalidAddressError:
assert (split_address(ip)[1] == 0)
next_ip = ip
break
try:
target_addr = sequence.get_next_address(ip)
except ReilSequenceInvalidAddressError:
target_addr = next_addr
next_ip = self.__process_instr(instr, avoid, target_addr, initial_state, execution_state, trace_current)
try:
ip = (next_ip if next_ip else sequence.get_next_address(ip))
except ReilSequenceInvalidAddressError:
break
return next_ip
|
Process a REIL sequence.
Args:
sequence (ReilSequence): A REIL sequence to process.
avoid (list): List of address to avoid.
initial_state: Initial state.
execution_state: Execution state queue.
trace_current (list): Current trace.
next_addr: Address of the next instruction following the current one.
Returns:
Returns the next instruction to execute in case there is one, otherwise returns None.
|
codesearchnet
|
def _StartAnalysisProcesses(self, storage_writer, analysis_plugins):
logger.info('Starting analysis plugins.')
for analysis_plugin in analysis_plugins.values():
self._analysis_plugins[analysis_plugin.NAME] = analysis_plugin
process = self._StartWorkerProcess(analysis_plugin.NAME, storage_writer)
if (not process):
logger.error('Unable to create analysis process: {0:s}'.format(analysis_plugin.NAME))
logger.info('Analysis plugins running')
|
Starts the analysis processes.
Args:
storage_writer (StorageWriter): storage writer.
analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that
should be run and their names.
|
codesearchnet
|
def dimensions(self, selection='all', label=False):
if (label in ['name', True]):
label = 'short'
elif (label == 'label'):
label = 'long'
elif label:
raise ValueError("label needs to be one of True, False, 'name' or 'label'")
lambdas = {'k': ((lambda x: x.kdims), {'full_breadth': False}), 'v': ((lambda x: x.vdims), {}), 'c': ((lambda x: x.cdims), {})}
aliases = {'key': 'k', 'value': 'v', 'constant': 'c'}
if (selection in ['all', 'ranges']):
groups = [d for d in self._dim_groups if (d != 'cdims')]
dims = [dim for group in groups for dim in getattr(self, group)]
elif isinstance(selection, list):
dims = [dim for group in selection for dim in getattr(self, ('%sdims' % aliases.get(group)))]
elif (aliases.get(selection) in lambdas):
selection = aliases.get(selection, selection)
(lmbd, kwargs) = lambdas[selection]
key_traversal = self.traverse(lmbd, **kwargs)
dims = [dim for keydims in key_traversal for dim in keydims]
else:
raise KeyError(("Invalid selection %r, valid selections include'all', 'value' and 'key' dimensions" % repr(selection)))
return [((dim.label if (label == 'long') else dim.name) if label else dim) for dim in dims]
|
Lists the available dimensions on the object
Provides convenient access to Dimensions on nested Dimensioned
objects. Dimensions can be selected by their type, i.e. 'key'
or 'value' dimensions. By default 'all' dimensions are
returned.
Args:
selection: Type of dimensions to return
The type of dimension, i.e. one of 'key', 'value',
'constant' or 'all'.
label: Whether to return the name, label or Dimension
Whether to return the Dimension objects (False),
the Dimension names (True/'name') or labels ('label').
Returns:
List of Dimension objects or their names or labels
|
codesearchnet
|
def mixture(val: Any, default: Any=RaiseTypeErrorIfNotProvided) -> Sequence[Tuple[(float, Any)]]:
getter = getattr(val, '_mixture_', None)
result = (NotImplemented if (getter is None) else getter())
if (result is not NotImplemented):
return result
if (default is not RaiseTypeErrorIfNotProvided):
return default
if (getter is None):
raise TypeError("object of type '{}' has no _mixture_ method.".format(type(val)))
raise TypeError("object of type '{}' does have a _mixture_ method, but it returned NotImplemented.".format(type(val)))
|
Return a sequence of tuples representing a probabilistic combination.
A mixture is described by an iterable of tuples of the form
(probability of object, object)
The probability components of the tuples must sum to 1.0 and be between
0 and 1 (inclusive).
Args:
val: The value whose mixture is being computed.
default: A default value if val does not support mixture.
Returns:
An iterable of tuples of size 2. The first element of the tuple is a
probability (between 0 and 1) and the second is the object that occurs
with that probability in the mixture. The probabilities will sum to 1.0.
|
codesearchnet
|
def process(self, element):
text_line = element.strip()
if not text_line:
self.empty_line_counter.inc(1)
words = re.findall("[\\w\\']+", text_line, re.UNICODE)
for w in words:
self.words_counter.inc()
self.word_lengths_counter.inc(len(w))
self.word_lengths_dist.update(len(w))
return words
|
Returns an iterator over the words of this element.
The element is a line of text. If the line is blank, note that, too.
Args:
element: the element being processed
Returns:
The processed element.
|
github-repos
|
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
try:
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmt')
event_data.cookie_name = self.COOKIE_NAME
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
|
juraj-google-style
|
def remove_server(self, name):
cmd = self.command_builder('no ntp server', value=name)
return self.configure(cmd)
|
Remove an NTP server entry from the node config
Args:
name (string): The IP address or FQDN of the NTP server.
Returns:
True if the operation succeeds, otherwise False.
|
juraj-google-style
|
def value(self):
if self.isenum():
if isinstance(self._value, self.enum_ref):
return self._value.value
return self._value
elif self.is_bitmask():
return self._value.bitmask
else:
return self._value
|
Return this type's value.
Returns:
object: The value of an enum, bitmask, etc.
|
codesearchnet
|
def _evolve_subsystem(self, state, qargs):
mat = np.reshape(self.data, self._shape)
state_size = len(state)
state_dims = self._automatic_dims(None, state_size)
if self.input_dims() != len(qargs) * (2,):
raise QiskitError(
"Operator input dimensions are not compatible with state subsystem dimensions."
)
if state.ndim == 1:
tensor = np.reshape(state, state_dims)
indices = [len(state_dims) - 1 - qubit for qubit in qargs]
tensor = self._einsum_matmul(tensor, mat, indices)
return np.reshape(tensor, state_size)
tensor = np.reshape(state, 2 * state_dims)
indices = [len(state_dims) - 1 - qubit for qubit in qargs]
right_shift = len(state_dims)
tensor = self._einsum_matmul(tensor, mat, indices)
tensor = self._einsum_matmul(
tensor, np.conj(mat), indices, shift=right_shift)
return np.reshape(tensor, [state_size, state_size])
|
Evolve a quantum state by the operator.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
|
juraj-google-style
|
def run(app: web.Application):
host = app['config']['host']
port = app['config']['port']
web.run_app(app, host=host, port=port)
|
Runs the application in an async context.
This function will block indefinitely until the application is shut down.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
|
codesearchnet
|
def from_conv_part_data(conv_part_data, self_user_id):
user_id = UserID(chat_id=conv_part_data.id.chat_id, gaia_id=conv_part_data.id.gaia_id)
return User(user_id, conv_part_data.fallback_name, None, None, [], ((self_user_id == user_id) or (self_user_id is None)))
|
Construct user from ``ConversationParticipantData`` message.
Args:
conv_part_id: ``ConversationParticipantData`` message.
self_user_id (~hangups.user.UserID or None): The ID of the current
user. If ``None``, assume ``conv_part_id`` is the current user.
Returns:
:class:`~hangups.user.User` object.
|
codesearchnet
|
def _validate_connection(self, action, uuid, key):
if (uuid not in self._connections):
self._logger.warn('Received message for device with no connection 0x%X', uuid)
return None
data = self._connections[uuid]
if (key != data['key']):
self._logger.warn('Received message for device with incorrect key, uuid=0x%X', uuid)
return None
return data['connection_id']
|
Validate that a message received for a device has the right key
If this action is valid the corresponding internal connection id to
be used with the DeviceManager is returned, otherwise None is returned
and an invalid message status is published.
Args:
slug (string): The slug for the device we're trying to connect to
uuid (int): The uuid corresponding to the slug
key (string): The key passed in when this device was first connected
to
Returns:
int: if the action is allowed, otherwise None
|
codesearchnet
|
def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:
if not isinstance(hidden_states, (tuple, list)):
raise TypeError('hidden_states should be a tuple or list of tensors')
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')
if self.reassemble_stage is not None:
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
output = self.fusion_stage(features)
return output
|
Args:
hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
|
github-repos
|
def update_object(self, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if (r.status_code != requests.codes.ok):
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code)
return self._parse_reply(r)
|
Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException
|
codesearchnet
|
def _init_log(level=logging.DEBUG):
log = logging.getLogger(__file__)
log.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s: %(message)s',
'%Y/%m/%d-%H:%M:%S')
handler.setFormatter(formatter)
log.addHandler(handler)
return log
|
Initialise the logging object.
Args:
level (int): Logging level.
Returns:
Logger: Python logging object.
|
juraj-google-style
|
def get_formatter(self):
if (not self.fmt):
self.fmt = '%(asctime)s.%(msecs)03d {host} {progname} (%(process)d): %(message)s'.format(host=self.hostname, progname=self.progname)
if (not self.datefmt):
self.datefmt = '%Y-%m-%dT%H:%M:%S'
return logging.Formatter(fmt=self.fmt, datefmt=self.datefmt)
|
Create a fully configured `logging.Formatter`
Example of formatted log message:
2017-08-27T20:19:24.424 cpm-example-gew1 progname (23123): hello
Returns:
(obj): Instance of `logging.Formatter`
|
codesearchnet
|
def config_get(config, *path, default=None):
o = object()
result = get_in(config, path, default=o)
if result is not o:
return result
else:
return default
|
Get a configuration option following a path through the config
Example usage:
>>> config_get(config,
'problem', 'problem_type_details', 'scorer',
default='accuracy')
Args:
config (dict): config dict
*path (list[str]): List of config sections and options to follow.
default (default=None): A default value to return in the case that
the option does not exist.
|
juraj-google-style
|
def xmoe2_v1():
hparams = xmoe2_dense(0)
moe.set_default_moe_hparams(hparams)
hparams.decoder_layers = (['local_att', 'local_att', 'drd', 'att', 'drd', 'local_att', 'local_att', 'hmoe'] * 4)[:(- 1)]
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.moe_hidden_size = 32768
hparams.mesh_shape = 'b0:4;b1:8'
hparams.layout = 'outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0'
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 4]
hparams.num_heads = 4
return hparams
|
Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
|
codesearchnet
|
def __init__(self, name, data=b'', data_type=definitions.REG_NONE, offset=0):
super(FakeWinRegistryValue, self).__init__()
self._data = data
self._data_type = data_type
self._data_size = len(data)
self._name = name
self._offset = offset
|
Initializes a Windows Registry value.
Args:
name (str): name of the Windows Registry value.
data (Optional[bytes]): value data.
data_type (Optional[int]): value data type.
offset (Optional[int]): offset of the value within the Windows Registry
file.
|
juraj-google-style
|
def depth_soil_specific_heat(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_soil_specific_heat`'.format(value))
self._depth_soil_specific_heat = value
|
Corresponds to IDD Field `depth_soil_specific_heat`
Args:
value (float): value for IDD Field `depth_soil_specific_heat`
Unit: J/kg-K,
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def __call__(self, shape, dtype=None):
dtype = standardize_dtype(dtype)
return ops.zeros(shape, dtype=dtype)
|
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes
are supported. If not specified, `keras.backend.floatx()`
is used, which default to `float32` unless you configured it
otherwise (via `keras.backend.set_floatx(float_dtype)`).
|
github-repos
|
def flatten_excel(path='.', ext='xlsx', sheetname=0, skiprows=None, header=0, date_parser=parse_date, verbosity=0, output_ext=None):
date_parser = date_parser or (lambda x: x)
dotted_ext, dotted_output_ext = None, None
if ext != None and output_ext != None:
dotted_ext = ('' if ext.startswith('.') else '.') + ext
dotted_output_ext = ('' if output_ext.startswith('.') else '.') + output_ext
table = {}
for file_properties in util.find_files(path, ext=ext or '', verbosity=verbosity):
file_path = file_properties['path']
if output_ext and (dotted_output_ext + '.') in file_path:
continue
df = dataframe_from_excel(file_path, sheetname=sheetname, header=header, skiprows=skiprows)
df = flatten_dataframe(df, verbosity=verbosity)
if dotted_ext != None and dotted_output_ext != None:
df.to_csv(file_path[:-len(dotted_ext)] + dotted_output_ext + dotted_ext)
return table
|
Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
|
juraj-google-style
|
def set_kernel_process_name(name):
if not isinstance(name, bytes):
name = name.encode('ascii', 'replace')
try:
with open('/proc/self/comm', 'wb') as proc_comm:
proc_comm.write(name[:15])
except EnvironmentError:
try:
import ctypes
except ImportError:
return
try:
libc = ctypes.CDLL('libc.so.6')
except EnvironmentError:
return
pr_set_name = ctypes.c_ulong(15)
zero = ctypes.c_ulong(0)
try:
libc.prctl(pr_set_name, name, zero, zero, zero)
except AttributeError:
return
|
Changes the Kernel's /proc/self/status process name on Linux.
The kernel name is NOT what will be shown by the ps or top command.
It is a 15 character string stored in the kernel's process table that
is included in the kernel log when a process is OOM killed.
The first 15 bytes of name are used. Non-ASCII unicode is replaced with '?'.
Does nothing if /proc/self/comm cannot be written or prctl() fails.
Args:
name: bytes|unicode, the Linux kernel's command name to set.
|
juraj-google-style
|
def paint(self):
snippet = {'fill-extrusion-opacity': VectorStyle.get_style_value(self.opacity), 'fill-extrusion-color': VectorStyle.get_style_value(self.color), 'fill-extrusion-base': VectorStyle.get_style_value(self.base), 'fill-extrusion-height': VectorStyle.get_style_value(self.height)}
if self.translate:
snippet['fill-extrusion-translate'] = self.translate
return snippet
|
Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
|
codesearchnet
|
def __call__(self, shape, dtype=None, **kwargs):
raise NotImplementedError
|
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided will return tensor
of `tf.float32`.
**kwargs: Additional keyword arguments. Accepted values:
`partition_shape` and `partition_offset`. Used when creating a single
partition in a partitioned variable. `partition_shape` is the shape of
the partition (i.e. the shape of the returned tensor) and
`partition_offset` is a tuple of `int` specifying the offset of this
partition w.r.t each axis. For example, a tensor of shape `(30, 100)`
can be partitioned into two partitions: `p0` of shape `(10, 100)` and
`p1` of shape `(20, 100)`; if the initializer is called with
`partition_shape=(20, 100)` and `partition_offset=(10, 0)`, it should
return the value for `p1`.
|
github-repos
|
def make_file_extension_assertion(extension):
def file_extension_assertion(file_path):
base, ext = os.path.splitext(file_path)
if ext.lower() != extension:
raise argparse.ArgumentTypeError('File must have ' + extension + ' extension')
return file_path
return file_extension_assertion
|
Function factory for file extension argparse assertion
Args:
extension (string): the file extension to assert
Returns:
string: the supplied extension, if assertion is successful.
|
juraj-google-style
|
def _handle_start_dag(self, request):
dag_name = self._queue_dag(name=request.payload['name'], data=request.payload['data'])
return Response(success=(dag_name is not None), uid=request.uid, payload={'dag_name': dag_name})
|
The handler for the start_dag request.
The start_dag request creates a new dag and adds it to the queue.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be started
'data': the data that is passed onto the start tasks
Returns:
Response: A response object containing the following fields:
- dag_name: The name of the started dag.
|
codesearchnet
|
def _get_update(self, variant):
update = {
'$inc': {
'homozygote': variant.get('homozygote', 0),
'hemizygote': variant.get('hemizygote', 0),
'observations': 1
},
'$set': {
'chrom': variant.get('chrom'),
'start': variant.get('pos'),
'end': variant.get('end'),
'ref': variant.get('ref'),
'alt': variant.get('alt'),
}
}
if variant.get('case_id'):
update['$push'] = {
'families': {
'$each': [variant.get('case_id')],
'$slice': -50
}
}
return update
|
Convert a variant to a proper update
Args:
variant(dict)
Returns:
update(dict)
|
juraj-google-style
|
def RemapOperator(opcode_name):
old_name_to_new_name = {'CONVOLUTION': 'CONV_2D', 'DEPTHWISE_CONVOLUTION': 'DEPTHWISE_CONV_2D', 'AVERAGE_POOL': 'AVERAGE_POOL_2D', 'MAX_POOL': 'MAX_POOL_2D', 'L2_POOL': 'L2_POOL_2D', 'SIGMOID': 'LOGISTIC', 'L2NORM': 'L2_NORMALIZATION', 'LOCAL_RESPONSE_NORM': 'LOCAL_RESPONSE_NORMALIZATION', 'Basic_RNN': 'RNN'}
return old_name_to_new_name[opcode_name] if opcode_name in old_name_to_new_name else opcode_name
|
Go from old schema op name to new schema op name.
Args:
opcode_name: String representing the ops (see :schema.fbs).
Returns:
Converted opcode_name from V1 to V2.
|
github-repos
|
def find_field(item_list, cond, comparator, target_field):
for item in item_list:
if comparator(item, cond) and target_field in item:
return item[target_field]
return None
|
Finds the value of a field in a dict object that satisfies certain
conditions.
Args:
item_list: A list of dict objects.
cond: A param that defines the condition.
comparator: A function that checks if an dict satisfies the condition.
target_field: Name of the field whose value to be returned if an item
satisfies the condition.
Returns:
Target value or None if no item satisfies the condition.
|
github-repos
|
def where(self, cond, other, **kwargs):
assert isinstance(
cond, type(self)
), "Must have the same DataManager subclass to perform this operation"
if isinstance(other, type(self)):
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
first_pass = cond._inter_manager_operations(
other, "left", where_builder_first_pass
)
final_pass = self._inter_manager_operations(
first_pass, "left", where_builder_second_pass
)
return self.__constructor__(final_pass.data, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
if isinstance(other, pandas.Series):
other.index = pandas.RangeIndex(len(other.index))
def where_builder_series(df, cond):
if axis == 0:
df.index = pandas.RangeIndex(len(df.index))
cond.index = pandas.RangeIndex(len(cond.index))
else:
df.columns = pandas.RangeIndex(len(df.columns))
cond.columns = pandas.RangeIndex(len(cond.columns))
return df.where(cond, other, **kwargs)
reindexed_self, reindexed_cond, a = self.copartition(
axis, cond, "left", False
)
reindexed_cond = reindexed_cond[0]
new_data = reindexed_self.inter_data_operation(
axis, lambda l, r: where_builder_series(l, r), reindexed_cond
)
return self.__constructor__(new_data, self.index, self.columns)
|
Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New DataManager with updated data and index.
|
juraj-google-style
|
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
|
Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
|
juraj-google-style
|
def set_topics(self, topics):
if set(topics).difference(self._topics):
future = self.cluster.request_update()
else:
future = Future().success(set(topics))
self._topics = set(topics)
return future
|
Set specific topics to track for metadata.
Arguments:
topics (list of str): topics to check for metadata
Returns:
Future: resolves after metadata request/response
|
codesearchnet
|
def filter_alias_create_namespace(namespace):
def filter_string(s):
return ' '.join(s.strip().split())
namespace.alias_name = filter_string(namespace.alias_name)
namespace.alias_command = filter_string(namespace.alias_command)
return namespace
|
Filter alias name and alias command inside alias create namespace to appropriate strings.
Args
namespace: The alias create namespace.
Returns:
Filtered namespace where excessive whitespaces are removed in strings.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.