code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def getDelOps(self, buid):
return (('prop:del', (buid, self.form.name, self.name, self.storinfo)),)
|
Get a list of storage operations to delete this property from the buid.
Args:
buid (bytes): The node buid.
Returns:
(tuple): The storage operations
|
codesearchnet
|
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
if is_forward_declaration:
return ((len(nesting_state.stack) >= 1) and isinstance(nesting_state.stack[(- 1)], _NamespaceInfo))
return ((len(nesting_state.stack) > 1) and nesting_state.stack[(- 1)].check_namespace_indentation and isinstance(nesting_state.stack[(- 2)], _NamespaceInfo))
|
Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
|
codesearchnet
|
def read_binary(self, key, b64decode=True, decode=False):
data = None
if key is not None:
data = self.db.read(key.strip())
if data is not None:
data = json.loads(data)
if b64decode:
data = base64.b64decode(data)
if decode:
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
data = data.decode('latin-1')
else:
self.tcex.log.warning(u'The key field was None.')
return data
|
Read method of CRUD operation for binary data.
Args:
key (string): The variable to read from the DB.
b64decode (bool): If true the data will be base64 decoded.
decode (bool): If true the data will be decoded to a String.
Returns:
(bytes|string): Results retrieved from DB.
|
juraj-google-style
|
def setup_spline(self, spline_options=None):
self.spline_options = spline_options
relative_energies = self.energies - self.energies[0]
if scipy_old_piecewisepolynomial:
if self.spline_options:
raise RuntimeError('Option for saddle point not available with'
'old scipy implementation')
self.spline = PiecewisePolynomial(
self.r, np.array([relative_energies, -self.forces]).T,
orders=3)
else:
if self.spline_options.get('saddle_point', '') == 'zero_slope':
imax = np.argmax(relative_energies)
self.spline = CubicSpline(x=self.r[:imax + 1],
y=relative_energies[:imax + 1],
bc_type=((1, 0.0), (1, 0.0)))
cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:],
bc_type=((1, 0.0), (1, 0.0)))
self.spline.extend(c=cspline2.c, x=cspline2.x[1:])
else:
self.spline = CubicSpline(x=self.r, y=relative_energies,
bc_type=((1, 0.0), (1, 0.0)))
|
Setup of the options for the spline interpolation
Args:
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
|
juraj-google-style
|
def _parse_pages_binding(details):
pages = _get_td_or_none(
details,
"ctl00_ContentPlaceHolder1_tblRowRozsahVazba"
)
if not pages:
return None, None
binding = None
if "/" in pages:
binding = pages.split("/")[1].strip()
pages = pages.split("/")[0].strip()
if not pages:
pages = None
return pages, binding
|
Parse number of pages and binding of the book.
Args:
details (obj): HTMLElement containing slice of the page with details.
Returns:
(pages, binding): Tuple with two string or two None.
|
juraj-google-style
|
def ebalance(sdat, tstart=None, tend=None):
tseries = sdat.tseries_between(tstart, tend)
(rbot, rtop) = misc.get_rbounds(sdat.steps.last)
if (rbot != 0):
coefsurf = ((rtop / rbot) ** 2)
volume = ((rbot * (((rtop / rbot) ** 3) - 1)) / 3)
else:
coefsurf = 1.0
volume = 1.0
(dtdt, time) = dt_dt(sdat, tstart, tend)
ftop = (tseries['ftop'].values * coefsurf)
fbot = tseries['fbot'].values
radio = tseries['H_int'].values
ebal = ((ftop[1:] - fbot[1:]) + (volume * (dtdt - radio[1:])))
return (ebal, time)
|
Energy balance.
Compute Nu_t - Nu_b + V*dT/dt as a function of time using an explicit
Euler scheme. This should be zero if energy is conserved.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: energy balance and time arrays.
|
codesearchnet
|
def profile_name_scope(self, options):
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
|
Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
|
github-repos
|
def from_text_vision_configs(cls, text_config: Pix2StructTextConfig, vision_config: Pix2StructVisionConfig, **kwargs):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
|
Instantiate a [`Pix2StructConfig`] (or a derived class) from pix2struct text model configuration and pix2struct
vision model configuration.
Returns:
[`Pix2StructConfig`]: An instance of a configuration object
|
github-repos
|
def tokenize(self, text):
if self.normalize_text:
text = unicodedata.normalize('NFKC', text)
output_tokens = []
for char in text:
if char not in self.vocab:
output_tokens.append(self.unk_token)
continue
output_tokens.append(char)
return output_tokens
|
Tokenizes a piece of text into characters.
For example, `input = "apple""` will return as output `["a", "p", "p", "l", "e"]`.
Args:
text: A single token or whitespace separated tokens.
This should have already been passed through *BasicTokenizer*.
Returns:
A list of characters.
|
github-repos
|
def list_vmss_skus(access_token, subscription_id, resource_group, vmss_name):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/skus', '?api-version=', COMP_API])
return do_get_next(endpoint, access_token)
|
List the VM skus available for a VM Scale Set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response. JSON body of VM skus.
|
codesearchnet
|
def encode(self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, None]=None, max_length: Optional[int]=None, stride: int=0, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]:
encoded_inputs = self.encode_plus(text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, padding_side=padding_side, return_tensors=return_tensors, **kwargs)
return encoded_inputs['input_ids']
|
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`.
Args:
text (`str`, `List[str]` or `List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
|
github-repos
|
def _request_reports(self, resource_param_name, resources, endpoint_name):
params = [{resource_param_name: resource, 'apikey': self._api_key} for resource in resources]
return self._requests.multi_get(self.BASE_DOMAIN + endpoint_name, query_params=params)
|
Sends multiples requests for the resources to a particular endpoint.
Args:
resource_param_name: a string name of the resource parameter.
resources: list of of the resources.
endpoint_name: VirusTotal endpoint URL suffix.
Returns:
A list of the responses.
|
juraj-google-style
|
def get_members(self, name):
grpid = re.search('(\\d+)', name).group()
command = ('show port-channel %s all-ports' % grpid)
config = self.node.enable(command, 'text')
return re.findall('\\b(?!Peer)Ethernet[\\d/]*\\b', config[0]['result']['output'])
|
Returns the member interfaces for the specified Port-Channel
Args:
name(str): The Port-channel interface name to return the member
interfaces for
Returns:
A list of physical interface names that belong to the specified
interface
|
codesearchnet
|
def parse(self, arguments):
if not isinstance(arguments, list):
arguments = [arguments]
if self.present:
values = self.value
else:
values = []
for item in arguments:
Flag.Parse(self, item)
values.append(self.value)
self.value = values
|
Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
|
juraj-google-style
|
def _get_config():
conf_file = os.path.join(_get_config_dir(), 'log_config.toml')
if os.path.exists(conf_file):
with open(conf_file) as fd:
raw_config = fd.read()
log_config = toml.loads(raw_config)
return log_config
conf_file = os.path.join(_get_config_dir(), 'log_config.yaml')
if os.path.exists(conf_file):
with open(conf_file) as fd:
raw_config = fd.read()
log_config = yaml.safe_load(raw_config)
return log_config
return None
|
Determines if there is a log config in the config directory
and returns it. If it does not exist, return None.
Returns:
log_config (dict): The dictionary to pass to logging.config.dictConfig
|
codesearchnet
|
def _add_node(self, node):
node_id = len(self.node_list)
self.node_to_id[node] = node_id
self.node_list.append(node)
self.adj_list[node_id] = []
self.reverse_adj_list[node_id] = []
return node_id
|
Add a new node to node_list and give the node an ID.
Args:
node: An instance of Node.
Returns:
node_id: An integer.
|
juraj-google-style
|
def _GetComparable(self, sub_comparable_string=''):
string_parts = []
string_parts.append(getattr(self.parent, 'comparable', ''))
string_parts.append('type: {0:s}'.format(self.type_indicator))
if sub_comparable_string:
string_parts.append(', {0:s}'.format(sub_comparable_string))
string_parts.append('\n')
return ''.join(string_parts)
|
Retrieves the comparable representation.
This is a convenience function for constructing comparables.
Args:
sub_comparable_string (str): sub comparable string.
Returns:
str: comparable representation of the path specification.
|
codesearchnet
|
def extract_labels(self, f, one_hot=False, num_classes=10):
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = self._read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = self._read32(bytestream)
buf = bytestream.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
if one_hot:
return self.dense_to_one_hot(labels, num_classes)
return labels
|
Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D unit8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
|
juraj-google-style
|
def extract_simple_optional_location_info(ir_blocks, complex_optional_roots, location_to_optional_roots):
location_to_preceding_optional_root_iteritems = six.iteritems({location: optional_root_locations_stack[(- 1)] for (location, optional_root_locations_stack) in six.iteritems(location_to_optional_roots)})
simple_optional_root_to_inner_location = {optional_root_location: inner_location for (inner_location, optional_root_location) in location_to_preceding_optional_root_iteritems if (optional_root_location not in complex_optional_roots)}
simple_optional_root_locations = set(simple_optional_root_to_inner_location.keys())
(_, non_folded_ir_blocks) = extract_folds_from_ir_blocks(ir_blocks)
simple_optional_root_info = {}
preceding_location = None
for current_block in non_folded_ir_blocks:
if isinstance(current_block, MarkLocation):
preceding_location = current_block.location
elif (isinstance(current_block, Traverse) and current_block.optional):
if (preceding_location in simple_optional_root_locations):
inner_location = simple_optional_root_to_inner_location[preceding_location]
(inner_location_name, _) = inner_location.get_location_name()
simple_optional_info_dict = {'inner_location_name': inner_location_name, 'edge_field': current_block.get_field_name()}
simple_optional_root_info[preceding_location] = simple_optional_info_dict
return simple_optional_root_info
|
Construct a map from simple optional locations to their inner location and traversed edge.
Args:
ir_blocks: list of IR blocks to extract optional data from
complex_optional_roots: list of @optional locations (location immmediately preceding
an @optional traverse) that expand vertex fields
location_to_optional_roots: dict mapping from location -> optional_roots where location is
within some number of @optionals and optional_roots is a list
of optional root locations preceding the successive @optional
scopes within which the location resides
Returns:
dict mapping from simple_optional_root_location -> dict containing keys
- 'inner_location_name': Location object correspoding to the unique MarkLocation present
within a simple optional (one that does not expand vertex fields)
scope
- 'edge_field': string representing the optional edge being traversed
where simple_optional_root_to_inner_location is the location preceding the @optional scope
|
codesearchnet
|
def _configure_tls_parameters(parameters):
cert = config.conf['tls']['certfile']
key = config.conf['tls']['keyfile']
if (cert and key):
_log.info('Authenticating with server using x509 (certfile: %s, keyfile: %s)', cert, key)
parameters.credentials = pika.credentials.ExternalCredentials()
else:
(cert, key) = (None, None)
if (SSLOptions is None):
parameters.ssl = True
parameters.ssl_options = {'keyfile': key, 'certfile': cert, 'ca_certs': config.conf['tls']['ca_cert'], 'cert_reqs': ssl.CERT_REQUIRED, 'ssl_version': ssl.PROTOCOL_TLSv1_2}
else:
ssl_context = ssl.create_default_context()
if config.conf['tls']['ca_cert']:
try:
ssl_context.load_verify_locations(cafile=config.conf['tls']['ca_cert'])
except ssl.SSLError as e:
raise ConfigurationException('The "ca_cert" setting in the "tls" section is invalid ({})'.format(e))
ssl_context.options |= ssl.OP_NO_SSLv2
ssl_context.options |= ssl.OP_NO_SSLv3
ssl_context.options |= ssl.OP_NO_TLSv1
ssl_context.options |= ssl.OP_NO_TLSv1_1
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.check_hostname = True
if (cert and key):
try:
ssl_context.load_cert_chain(cert, key)
except ssl.SSLError as e:
raise ConfigurationException('The "keyfile" setting in the "tls" section is invalid ({})'.format(e))
parameters.ssl_options = SSLOptions(ssl_context, server_hostname=parameters.host)
|
Configure the pika connection parameters for TLS based on the configuration.
This modifies the object provided to it. This accounts for whether or not
the new API based on the standard library's SSLContext is available for
pika.
Args:
parameters (pika.ConnectionParameters): The connection parameters to apply
TLS connection settings to.
|
codesearchnet
|
def cancel(self, subscription_id, data={}, **kwargs):
url = "{}/{}/cancel".format(self.base_url, subscription_id)
return self.post_url(url, data, **kwargs)
|
Cancel subscription given by subscription_id
Args:
subscription_id : Id for which subscription has to be cancelled
Returns:
Subscription Dict for given subscription id
|
juraj-google-style
|
def save_to_object(self):
tmpdir = tempfile.mkdtemp('save_to_object', dir=self.logdir)
checkpoint_prefix = self.save(tmpdir)
data = {}
base_dir = os.path.dirname(checkpoint_prefix)
for path in os.listdir(base_dir):
path = os.path.join(base_dir, path)
if path.startswith(checkpoint_prefix):
with open(path, 'rb') as f:
data[os.path.basename(path)] = f.read()
out = io.BytesIO()
data_dict = pickle.dumps({'checkpoint_name': os.path.basename(checkpoint_prefix), 'data': data})
if (len(data_dict) > 10000000.0):
logger.info('Checkpoint size is {} bytes'.format(len(data_dict)))
out.write(data_dict)
shutil.rmtree(tmpdir)
return out.getvalue()
|
Saves the current model state to a Python object. It also
saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
|
codesearchnet
|
def fix_reference_url(url):
new_url = url
new_url = fix_url_bars_instead_of_slashes(new_url)
new_url = fix_url_add_http_if_missing(new_url)
new_url = fix_url_replace_tilde(new_url)
try:
rfc3987.parse(new_url, rule='URI')
return new_url
except ValueError:
return url
|
Used to parse an incorect url to try to fix it with the most common ocurrences for errors.
If the fixed url is still incorrect, it returns ``None``.
Returns:
String containing the fixed url or the original one if it could not be fixed.
|
codesearchnet
|
def validate_slicing_string(slicing_string):
return bool(re.search('^\\[(\\d|,|\\s|:)+\\]$', slicing_string))
|
Validate a slicing string.
Check if the input string contains only brackets, digits, commas and
colons that are valid characters in numpy-style array slicing.
Args:
slicing_string: (str) Input slicing string to be validated.
Returns:
(bool) True if and only if the slicing string is valid.
|
github-repos
|
async def set(self, name, valu, init=False):
with s_editatom.EditAtom(self.snap.core.bldgbuids) as editatom:
retn = (await self._setops(name, valu, editatom, init))
if (not retn):
return False
(await editatom.commit(self.snap))
return True
|
Set a property on the node.
Args:
name (str): The name of the property.
valu (obj): The value of the property.
init (bool): Set to True to disable read-only enforcement
Returns:
(bool): True if the property was changed.
|
codesearchnet
|
def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
matches_by_order = ([0] * max_order)
possible_matches_by_order = ([0] * max_order)
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min((len(r) for r in references))
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = (translation_ngram_counts & merged_ref_ngram_counts)
for ngram in overlap:
matches_by_order[(len(ngram) - 1)] += overlap[ngram]
for order in range(1, (max_order + 1)):
possible_matches = ((len(translation) - order) + 1)
if (possible_matches > 0):
possible_matches_by_order[(order - 1)] += possible_matches
precisions = ([0] * max_order)
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.0) / (possible_matches_by_order[i] + 1.0))
elif (possible_matches_by_order[i] > 0):
precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i])
else:
precisions[i] = 0.0
if (min(precisions) > 0):
p_log_sum = sum((((1.0 / max_order) * math.log(p)) for p in precisions))
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = (float(translation_length) / reference_length)
if (ratio > 1.0):
bp = 1.0
else:
bp = math.exp((1 - (1.0 / ratio)))
bleu = (geo_mean * bp)
return (bleu, precisions, bp, ratio, translation_length, reference_length)
|
Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
|
codesearchnet
|
def CheckFile(self, filename):
result = True
artifact_reader = reader.YamlArtifactsReader()
try:
for artifact_definition in artifact_reader.ReadFile(filename):
try:
self._artifact_registry.RegisterDefinition(artifact_definition)
except KeyError:
logging.warning('Duplicate artifact definition: {0:s} in file: {1:s}'.format(artifact_definition.name, filename))
result = False
artifact_definition_supports_macos = (definitions.SUPPORTED_OS_DARWIN in artifact_definition.supported_os)
artifact_definition_supports_windows = (definitions.SUPPORTED_OS_WINDOWS in artifact_definition.supported_os)
for source in artifact_definition.sources:
if (source.type_indicator in (definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH)):
if ((definitions.SUPPORTED_OS_DARWIN in source.supported_os) or (artifact_definition_supports_macos and (not source.supported_os))):
if (not self._CheckMacOSPaths(filename, artifact_definition, source, source.paths)):
result = False
elif (artifact_definition_supports_windows or (definitions.SUPPORTED_OS_WINDOWS in source.supported_os)):
for path in source.paths:
if (not self._CheckWindowsPath(filename, artifact_definition, source, path)):
result = False
elif (source.type_indicator == definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
if ((filename != self.LEGACY_PATH) and self._HasDuplicateRegistryKeyPaths(filename, artifact_definition, source)):
result = False
for key_path in source.keys:
if (not self._CheckWindowsRegistryKeyPath(filename, artifact_definition, key_path)):
result = False
elif (source.type_indicator == definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
for key_value_pair in source.key_value_pairs:
if (not self._CheckWindowsRegistryKeyPath(filename, artifact_definition, key_value_pair['key'])):
result = False
except errors.FormatError as exception:
logging.warning('Unable to validate file: {0:s} with error: {1!s}'.format(filename, exception))
result = False
return result
|
Validates the artifacts definition in a specific file.
Args:
filename (str): name of the artifacts definition file.
Returns:
bool: True if the file contains valid artifacts definitions.
|
codesearchnet
|
def to_tensor_4x4(self) -> torch.Tensor:
tensor = self._trans.new_zeros((*self.shape, 4, 4))
tensor[..., :3, :3] = self._rots.get_rot_mats()
tensor[..., :3, 3] = self._trans
tensor[..., 3, 3] = 1
return tensor
|
Converts a transformation to a homogeneous transformation tensor.
Returns:
A [*, 4, 4] homogeneous transformation tensor
|
github-repos
|
def _validate_snapshot(path: str, metadata: snapshot_pb2.DistributedSnapshotMetadata, element_spec: Any, compression: str) -> None:
error_file = _pywrap_snapshot_utils.TF_DATA_SnapshotErrorFilePath(path)
if gfile.Exists(error_file):
with gfile.GFile(error_file, 'r') as f:
raise ValueError(f'Failed to load tf.data snapshot at {path}. The save job failed to write it. Status: {f.read()}')
snapshot_element_spec = _parse_element_spec(metadata.element_spec)
if element_spec and element_spec != snapshot_element_spec:
raise ValueError(f'Failed to load tf.data snapshot at {path}. User specified element_spec {element_spec}, but the actual element_spec is {snapshot_element_spec}.')
if compression and compression != metadata.compression:
raise ValueError(f'Failed to load tf.data snapshot at {path}. User specified compression {compression}, but the actual compression is {metadata.compression}.')
|
Validates a tf.data distributed snapshot.
Args:
path: Root path of the distributed snapshot.
metadata: The DistributedSnapshotMetadata of the snapshot.
element_spec: Dataset element_spec.
compression: Compression method used for saving.
Raises:
ValueError if the snapshot is invalid.
|
github-repos
|
def get_ordered_params(url):
if url not in URLHelper.__cache:
URLHelper.__cache[url] = urlparse(url)
params = URLHelper.query_string_to_dict(URLHelper.__cache[url].query)
return OrderedDict(sorted(params.items()))
|
Get the query parameters of the given URL in alphabetical order.
Args:
url (str): The URL to get the query parameters from.
Returns:
str: The query parameters
|
juraj-google-style
|
def generate_dequeue_op(self, tpu_device=0):
self.freeze()
if self._generated_dequeue_op and (not ops.inside_function()):
raise ValueError("Can't generate two dequeue Ops from the same queue")
self._generated_dequeue_op = True
full_name = '%s/dequeue' % self._name
sharded_shapes = [policy.get_sharded_shape(shape) for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]
with ops.device(tpu_name_util.core(tpu_device)):
values = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
return tag_sharding_attribute_for_dequeued_tensors(values, self._input_partition_dims)
|
Generate TPU dequeue ops.
Args:
tpu_device: The TPU device ordinal where the infeed instruction should be
placed.
Returns:
A list of Outputs corresponding to a partition of infeed dequeued
into XLA, suitable for use within a replicated block.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set; or if a dequeue op has already been generated.
|
github-repos
|
def jsonRender(self, def_buf):
try:
ret_dict = SerialBlock()
ret_dict[Field.Meter_Address] = self.getMeterAddress()
for fld in def_buf:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue]
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return ""
return json.dumps(ret_dict, indent=4)
|
Translate the passed serial block into string only JSON.
Args:
def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object.
Returns:
str: JSON rendering of meter record.
|
juraj-google-style
|
def design_stat_cooling(self, value="Cooling"):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `design_stat_cooling`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `design_stat_cooling`')
vals = set()
vals.add("Cooling")
if value not in vals:
raise ValueError('value {} is not an accepted value for '
'field `design_stat_cooling`'.format(value))
self._design_stat_cooling = value
|
Corresponds to IDD Field `design_stat_cooling`
Args:
value (str): value for IDD Field `design_stat_cooling`
Accepted values are:
- Cooling
Default value: Cooling
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def change_numbering(self, rename_dict, inplace=False):
output = (self if inplace else self.copy())
new_index = [rename_dict.get(key, key) for key in self.index]
output.index = new_index
if (not inplace):
return output
|
Return the reindexed version of Cartesian.
Args:
rename_dict (dict): A dictionary mapping integers on integers.
Returns:
Cartesian: A renamed copy according to the dictionary passed.
|
codesearchnet
|
def getall(self):
vlans_re = re.compile('(?<=^vlan\\s)(\\d+)', re.M)
response = dict()
for vid in vlans_re.findall(self.config):
response[vid] = self.get(vid)
return response
|
Returns a dict object of all Vlans in the running-config
Returns:
A dict object of Vlan attributes
|
codesearchnet
|
def replace_batch_norm(model):
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = RTDetrFrozenBatchNorm2d(module.num_features)
if not module.weight.device == torch.device('meta'):
new_module.weight.data.copy_(module.weight)
new_module.bias.data.copy_(module.bias)
new_module.running_mean.data.copy_(module.running_mean)
new_module.running_var.data.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
|
Recursively replace all `torch.nn.BatchNorm2d` with `RTDetrFrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
|
github-repos
|
def init_config_json(config_file):
json_data = None
try:
if os.path.exists(config_file):
with open(config_file) as json_file:
json_data = json.load(json_file)
return unicode_convert(json_data)
else:
return None
except:
(line, filename, synerror) = trace()
raise ArcRestHelperError({'function': 'init_config_json', 'line': line, 'filename': filename, 'synerror': synerror})
finally:
json_data = None
del json_data
gc.collect()
|
Deserializes a JSON configuration file.
Args:
config_file (str): The path to the JSON file.
Returns:
dict: A dictionary object containing the JSON data. If ``config_file`` does not exist, returns ``None``.
|
codesearchnet
|
def corpus_token_counts(text_filepattern, corpus_max_lines, split_on_newlines=True):
counts = collections.Counter()
for doc in _read_filepattern(text_filepattern, max_lines=corpus_max_lines, split_on_newlines=split_on_newlines):
counts.update(encode(_native_to_unicode(doc)))
mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts))
return counts
|
Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Returns:
a dictionary mapping token to count.
|
codesearchnet
|
def _add_pos_constrain(token_lst: List[Dict], pos_tags: List) -> List[Dict]:
result = []
for a_token in token_lst:
for pos in pos_tags:
a_token[attrs.POS] = POS_MAP[pos]
result.append(copy.deepcopy(a_token))
return result
|
Add pos tag constrain for some token type, create cross production
Args:
token_lst: List[Dict]
pos_tags: List
Returns: List[Dict]
|
juraj-google-style
|
def popular(self, **kwargs):
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the list of popular movies on The Movie Database. This list
refreshes every day.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict representation of the JSON returned from the API.
|
juraj-google-style
|
def json_to_params(fn=None, return_json=True):
def json_to_params_decorator(fn):
@handle_type_error
@wraps(fn)
def json_to_params_wrapper(*args, **kwargs):
data = decode_json_body()
if type(data) in [tuple, list]:
args = list(args) + data
elif type(data) == dict:
allowed_keys = set(data.keys()) - set(kwargs.keys())
for key in allowed_keys:
kwargs[key] = data[key]
elif type(data) in PRIMITIVE_TYPES:
args = list(args)
args.append(data)
if not return_json:
return fn(*args, **kwargs)
return encode_json_body(
fn(*args, **kwargs)
)
return json_to_params_wrapper
if fn:
return json_to_params_decorator(fn)
return json_to_params_decorator
|
Convert JSON in the body of the request to the parameters for the wrapped
function.
If the JSON is list, add it to ``*args``.
If dict, add it to ``**kwargs`` in non-rewrite mode (no key in ``**kwargs``
will be overwritten).
If single value, add it to ``*args``.
Args:
return_json (bool, default True): Should the decorator automatically
convert returned value to JSON?
|
juraj-google-style
|
def is_coord_subset(subset, superset, atol=1e-08):
c1 = np.array(subset)
c2 = np.array(superset)
is_close = np.all((np.abs((c1[(:, None, :)] - c2[(None, :, :)])) < atol), axis=(- 1))
any_close = np.any(is_close, axis=(- 1))
return np.all(any_close)
|
Tests if all coords in subset are contained in superset.
Doesn't use periodic boundary conditions
Args:
subset, superset: List of coords
Returns:
True if all of subset is in superset.
|
codesearchnet
|
def decode(self, encoded):
if self.enforce_reversible:
self.enforce_reversible = False
if (self.encode(self.decode(encoded)) != encoded):
raise ValueError(('Decoding is not reversible for "%s"' % encoded))
self.enforce_reversible = True
return encoded
|
Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded.
|
codesearchnet
|
def __init__(self, flags: Optional[Sequence[str]]=None, **kwargs) -> None:
logging.basicConfig()
if isinstance(flags, str):
raise ValueError('Flags must be an iterable of of strings, not a single string.')
self._flags = flags
parser = _BeamArgumentParser(allow_abbrev=False)
for cls in type(self).mro():
if cls == PipelineOptions:
break
elif '_add_argparse_args' in cls.__dict__:
cls._add_argparse_args(parser)
self._visible_options, _ = parser.parse_known_args(flags)
self._all_options = kwargs
for option_name in self._visible_option_list():
if option_name not in self._all_options:
self._all_options[option_name] = getattr(self._visible_options, option_name)
|
Initialize an options class.
The initializer will traverse all subclasses, add all their argparse
arguments and then parse the command line specified by flags or by default
the one obtained from sys.argv.
The subclasses of PipelineOptions do not need to redefine __init__.
Args:
flags: An iterable of command line arguments to be used. If not specified
then sys.argv will be used as input for parsing arguments.
**kwargs: Add overrides for arguments passed in flags. For overrides
of arguments, please pass the `option names` instead of
flag names.
Option names: These are defined as dest in the
parser.add_argument() for each flag. Passing flags
like {no_use_public_ips: True}, for which the dest is
defined to a different flag name in the parser,
would be discarded. Instead, pass the dest of
the flag (dest of no_use_public_ips is use_public_ips).
|
github-repos
|
def check_email_exists_by_subject(self, subject, match_recipient=None):
self._mail.select('inbox')
try:
matches = self.__search_email_by_subject(subject, match_recipient)
if (len(matches) <= 0):
return False
else:
return True
except Exception as e:
raise e
|
Searches for Email by Subject. Returns True or False.
Args:
subject (str): Subject to search for.
Kwargs:
match_recipient (str) : Recipient to match exactly. (don't care if not specified)
Returns:
True - email found, False - email not found
|
codesearchnet
|
def get_all(cls, keyvals, key='id', user_id=None):
if (len(keyvals) == 0):
return []
original_keyvals = keyvals
keyvals_set = list(set(keyvals))
resultset = cls.query.filter(getattr(cls, key).in_(keyvals_set))
key_result_mapping = {getattr(result, key): result for result in resultset.all()}
return [key_result_mapping.get(kv) for kv in original_keyvals]
|
Works like a map function from keyvals to instances.
Args:
keyvals(list): The list of values of the attribute.
key (str, optional): The attribute to search by. By default, it is
'id'.
Returns:
list: A list of model instances, in the same order as the list of
keyvals.
Examples:
>>> User.get_all([2,5,7, 8000, 11])
user2@i.com, user5@i.com, user7@i.com, None, user11@i.com
>>> User.get_all(['user35@i.com', 'user5@i.com'], key='email')
user35@i.com, user5@i.com
|
codesearchnet
|
def mean_area_distance(item_a, item_b, max_value):
mean_area_a = np.mean([item_a.size(t) for t in item_a.times])
mean_area_b = np.mean([item_b.size(t) for t in item_b.times])
return np.abs(mean_area_a - mean_area_b) / float(max_value)
|
Absolute difference in the means of the areas of each track over time.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
|
juraj-google-style
|
def raw_decrypt(self, ciphertext):
if not isinstance(ciphertext, int):
raise TypeError('Expected ciphertext to be an int, not: %s' %
type(ciphertext))
decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p
decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q
return self.crt(decrypt_to_p, decrypt_to_q)
|
Decrypt raw ciphertext and return raw plaintext.
Args:
ciphertext (int): (usually from :meth:`EncryptedNumber.ciphertext()`)
that is to be Paillier decrypted.
Returns:
int: Paillier decryption of ciphertext. This is a positive
integer < :attr:`public_key.n`.
Raises:
TypeError: if ciphertext is not an int.
|
juraj-google-style
|
def capture_update_from_model(cls, table_name, record_id, *, update_fields=()):
include_cols = ()
if update_fields:
model_cls = get_connected_model_for_table_name(table_name)
include_cols = cls._fieldnames_to_colnames(model_cls, update_fields)
raw_query = sql.SQL('\n SELECT {schema}.hc_capture_update_from_row(\n hstore({schema}.{table_name}.*),\n %(table_name)s,\n ARRAY[{include_cols}]::text[] -- cast to type expected by stored procedure\n ) AS id\n FROM {schema}.{table_name}\n WHERE id = %(record_id)s\n ').format(schema=sql.Identifier(settings.HEROKU_CONNECT_SCHEMA), table_name=sql.Identifier(table_name), include_cols=sql.SQL(', ').join((sql.Identifier(col) for col in include_cols)))
params = {'record_id': record_id, 'table_name': table_name}
result_qs = TriggerLog.objects.raw(raw_query, params)
return list(result_qs)
|
Create a fresh update record from the current model state in the database.
For read-write connected models, this will lead to the attempted update of the values of
a corresponding object in Salesforce.
Args:
table_name (str): The name of the table backing the connected model (without schema)
record_id (int): The primary id of the connected model
update_fields (Iterable[str]): If given, the names of fields that will be included in
the write record
Returns:
A list of the created TriggerLog entries (usually one).
Raises:
LookupError: if ``table_name`` does not belong to a connected model
|
codesearchnet
|
def _copy(self, filename, destination):
full_filename = os.path.abspath(os.path.expanduser(filename))
if os.path.isdir(full_filename):
shutil.copytree(full_filename, destination)
elif os.path.isfile(full_filename):
shutil.copyfile(full_filename, destination)
|
Copy a file or folder to the repository.
Will mount if needed.
Args:
filename: Path to copy.
destination: Remote path to copy file to.
|
juraj-google-style
|
def load_spacy_rule(file_path: str) -> Dict:
with open(file_path) as fp:
return json.load(fp)
|
A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules
|
codesearchnet
|
def make_connection(self, bind_user=None, bind_password=None, **kwargs):
return self._make_connection(bind_user, bind_password, contextualise=False, **kwargs)
|
Make a connection to the LDAP Directory.
Args:
bind_user (str): User to bind with. If `None`, AUTH_ANONYMOUS is
used, otherwise authentication specified with
config['LDAP_BIND_AUTHENTICATION_TYPE'] is used.
bind_password (str): Password to bind to the directory with
**kwargs (dict): Additional arguments to pass to the
``ldap3.Connection``
Returns:
ldap3.Connection: An unbound ldap3.Connection. You should handle exceptions
upon bind if you use this internal method.
|
codesearchnet
|
def run_repair_pdb(self, silent=False, force_rerun=False):
foldx_repair_pdb = 'foldx --command=RepairPDB --pdb={}'.format(self.pdb_file)
foldx_repair_outfile = '{}_Repair.pdb'.format(op.splitext(self.pdb_file)[0])
ssbio.utils.command_runner(shell_command=foldx_repair_pdb, force_rerun_flag=force_rerun, silent=silent, outfile_checker=foldx_repair_outfile, cwd=self.foldx_dir)
self.repaired_pdb_outfile = foldx_repair_outfile
|
Run FoldX RepairPDB on this PDB file.
Original command::
foldx --command=RepairPDB --pdb=4bxi.pdb
Args:
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists.
|
codesearchnet
|
def CreateStorageReaderForFile(cls, path):
if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(
path, check_readable_only=True):
return sqlite_reader.SQLiteStorageFileReader(path)
return None
|
Creates a storage reader based on the file.
Args:
path (str): path to the storage file.
Returns:
StorageReader: a storage reader or None if the storage file cannot be
opened or the storage format is not supported.
|
juraj-google-style
|
def timestamp(self):
return self._timestamp
|
Timestamp of when this tensor value was dumped.
Returns:
(`int`) The timestamp in microseconds.
|
github-repos
|
def add_untagged_ok(self, text: MaybeBytes, code: Optional[ResponseCode]=None) -> None:
response = ResponseOk(b'*', text, code)
self.add_untagged(response)
|
Add an untagged ``OK`` response.
See Also:
:meth:`.add_untagged`, :class:`ResponseOk`
Args:
text: The response text.
code: Optional response code.
|
codesearchnet
|
def get(self, recipe_id):
self.logger.debug(('Retrieving recipe by id: ' + recipe_id))
url = ('%(base_url)s/recipe/%(recipe_id)s' % {'base_url': self.base_url, 'recipe_id': recipe_id})
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json()
|
Retrieves an AnswerFactory Recipe by id
Args:
recipe_id The id of the recipe
Returns:
A JSON representation of the recipe
|
codesearchnet
|
async def send_code_request(self, phone, *, force_sms=False):
phone = (utils.parse_phone(phone) or self._phone)
phone_hash = self._phone_code_hash.get(phone)
if (not phone_hash):
try:
result = (await self(functions.auth.SendCodeRequest(phone, self.api_id, self.api_hash, types.CodeSettings())))
except errors.AuthRestartError:
return self.send_code_request(phone, force_sms=force_sms)
self._tos = result.terms_of_service
self._phone_code_hash[phone] = phone_hash = result.phone_code_hash
else:
force_sms = True
self._phone = phone
if force_sms:
result = (await self(functions.auth.ResendCodeRequest(phone, phone_hash)))
self._phone_code_hash[phone] = result.phone_code_hash
return result
|
Sends a code request to the specified phone number.
Args:
phone (`str` | `int`):
The phone to which the code will be sent.
force_sms (`bool`, optional):
Whether to force sending as SMS.
Returns:
An instance of :tl:`SentCode`.
|
codesearchnet
|
def __init__(self, input_reader=None, output_writer=None):
preferred_encoding = locale.getpreferredencoding()
if not input_reader:
input_reader = StdinInputReader(encoding=preferred_encoding)
if not output_writer:
output_writer = StdoutOutputWriter(encoding=preferred_encoding)
super(CLIVolumeScannerMediator, self).__init__()
self._encode_errors = 'strict'
self._input_reader = input_reader
self._output_writer = output_writer
self._preferred_encoding = locale.getpreferredencoding()
self._textwrapper = textwrap.TextWrapper()
|
Initializes a volume scanner mediator.
Args:
input_reader (Optional[CLIInputReader]): input reader, where None
indicates that the stdin input reader should be used.
output_writer (Optional[CLIOutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
|
juraj-google-style
|
def get_many(self, type: Type[T], query: Mapping[(str, Any)], streaming: bool=False) -> Iterable[T]:
LOGGER.info('Getting SourceHandlers for "{type}"'.format(type=type.__name__))
try:
handlers = self._get_types[type]
except KeyError:
try:
LOGGER.info('Building new SourceHandlers for "{type}"'.format(type=type.__name__))
handlers = self._get_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
if (handlers is None):
raise NoConversionError('No source can provide "{type}"'.format(type=type.__name__))
LOGGER.info('Creating new PipelineContext')
context = self._new_context()
LOGGER.info('Querying SourceHandlers for "{type}"'.format(type=type.__name__))
for handler in handlers:
try:
return handler.get_many(query, context, streaming)
except NotFoundError:
pass
raise NotFoundError('No source returned a query result!')
|
Gets a query from the data pipeline, which contains a request for multiple objects.
1) Extracts the query the sequence of data sources.
2) Inserts the results into the data sinks (if appropriate).
3) Transforms the results into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
streaming: Specifies whether the results should be returned as a generator (default False).
Returns:
The requested objects or a generator of the objects if streaming is True.
|
codesearchnet
|
def set_hosts(hosts, use_ssl=False, ssl_cert_path=None):
if (type(hosts) != list):
hosts = [hosts]
conn_params = {'hosts': hosts, 'timeout': 20}
if use_ssl:
conn_params['use_ssl'] = True
if ssl_cert_path:
conn_params['verify_certs'] = True
conn_params['ca_certs'] = ssl_cert_path
else:
conn_params['verify_certs'] = False
connections.create_connection(**conn_params)
|
Sets the Elasticsearch hosts to use
Args:
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
|
codesearchnet
|
def _start_event_client(self):
|
Starts a separate JsonRpc client to the same session for propagating
events.
This is an optional function that should only implement if the client
utilizes the snippet event mechanism.
Returns:
A JsonRpc Client object that connects to the same session as the
one on which this function is called.
|
github-repos
|
def push(self, x):
if not math.isnan(x):
self._n += 1
delta = x - self._mean
else:
delta = 0
if self._window_mode == WindowMode.SLIDING:
if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))):
self._n -= 1
delta += self._mean - old_x
super().push(x)
if self._n > 0:
self._mean += delta / self._n
else:
self._mean = 0
|
Pushes a new value and updates the incremental mean.
Args:
x: The new value to be pushed.
|
github-repos
|
def set_preferred_prefix_for_namespace(self, ns_uri, prefix, add_if_not_exist=False):
ni = self.__lookup_uri(ns_uri)
if (not prefix):
ni.preferred_prefix = None
elif (prefix in ni.prefixes):
ni.preferred_prefix = prefix
elif add_if_not_exist:
self.add_prefix(ns_uri, prefix, set_as_preferred=True)
else:
raise PrefixNotFoundError(prefix)
|
Sets the preferred prefix for ns_uri. If add_if_not_exist is True,
the prefix is added if it's not already registered. Otherwise,
setting an unknown prefix as preferred is an error. The default
is False. Setting to None always works, and indicates a preference
to use the namespace as a default. The given namespace must already
be in this set.
Args:
ns_uri (str): the namespace URI whose prefix is to be set
prefix (str): the preferred prefix to set
add_if_not_exist (bool): Whether to add the prefix if it is not
already set as a prefix of ``ns_uri``.
Raises:
NamespaceNotFoundError: If namespace ``ns_uri`` isn't in this set.
DuplicatePrefixError: If ``prefix`` already maps to a different
namespace.
|
codesearchnet
|
def __init__(self, is_found, key, value):
self.key = key
self.is_found = is_found
if self.is_found:
self.value = value
|
Creates a cached response object.
Args:
is_found (bool): True if the key was found in the cache, False
otherwise.
key (string): The key originally used to retrieve the value.
value (object)
|
juraj-google-style
|
def explore(config, mutations, resample_probability, custom_explore_fn):
new_config = copy.deepcopy(config)
for key, distribution in mutations.items():
if isinstance(distribution, dict):
new_config.update({
key: explore(config[key], mutations[key], resample_probability,
None)
})
elif isinstance(distribution, list):
if random.random() < resample_probability or \
config[key] not in distribution:
new_config[key] = random.choice(distribution)
elif random.random() > 0.5:
new_config[key] = distribution[max(
0,
distribution.index(config[key]) - 1)]
else:
new_config[key] = distribution[min(
len(distribution) - 1,
distribution.index(config[key]) + 1)]
else:
if random.random() < resample_probability:
new_config[key] = distribution()
elif random.random() > 0.5:
new_config[key] = config[key] * 1.2
else:
new_config[key] = config[key] * 0.8
if type(config[key]) is int:
new_config[key] = int(new_config[key])
if custom_explore_fn:
new_config = custom_explore_fn(new_config)
assert new_config is not None, \
"Custom explore fn failed to return new config"
logger.info("[explore] perturbed config from {} -> {}".format(
config, new_config))
return new_config
|
Return a config perturbed as specified.
Args:
config (dict): Original hyperparameter configuration.
mutations (dict): Specification of mutations to perform as documented
in the PopulationBasedTraining scheduler.
resample_probability (float): Probability of allowing resampling of a
particular variable.
custom_explore_fn (func): Custom explore fn applied after built-in
config perturbations are.
|
juraj-google-style
|
def HasCustomStr(component):
if hasattr(component, '__str__'):
class_attrs = inspectutils.GetClassAttrsDict(type(component)) or {}
str_attr = class_attrs.get('__str__')
if str_attr and str_attr.defining_class is not object:
return True
return False
|
Determines if a component has a custom __str__ method.
Uses inspect.classify_class_attrs to determine the origin of the object's
__str__ method, if one is present. If it defined by `object` itself, then
it is not considered custom. Otherwise it is. This means that the __str__
methods of primitives like ints and floats are considered custom.
Objects with custom __str__ methods are treated as values and can be
serialized in places where more complex objects would have their help screen
shown instead.
Args:
component: The object to check for a custom __str__ method.
Returns:
Whether `component` has a custom __str__ method.
|
github-repos
|
def _is_variant(self, gemini_variant, ind_objs):
indexes = (ind.ind_index for ind in ind_objs)
for index in indexes:
gt_call = gemini_variant['gt_types'][index]
if (gt_call == 1 or gt_call == 3):
return True
return False
|
Check if the variant is a variation in any of the individuals
Args:
gemini_variant (GeminiQueryRow): The gemini variant
ind_objs (list(puzzle.models.individual)): A list of individuals to check
Returns:
bool : If any of the individuals has the variant
|
juraj-google-style
|
def get_attr_value(self, attr_key, el_idx=0):
return self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key]
|
Return the value of the selected attribute in the selected element.
Args:
attr_key : str
Name of attribute for which to search
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
str : Value of the selected attribute in the selected element.
|
juraj-google-style
|
def check_time(timer_id):
if timer_id not in _g_timers:
_g_timers[timer_id] = Timer()
return 0
else:
return _g_timers[timer_id].since_last_check()
|
Add check points in a single line.
This method is suitable for running a task on a list of items. A timer will
be registered when the method is called for the first time.
:Example:
>>> import time
>>> import mmcv
>>> for i in range(1, 6):
>>> # simulate a code block
>>> time.sleep(i)
>>> mmcv.check_time('task1')
2.000
3.000
4.000
5.000
Args:
timer_id (str): Timer identifier.
|
juraj-google-style
|
def package_and_copy(package_root_dir, setup_py, output_tar_path):
if not output_tar_path.startswith('gs:
raise ValueError('output_tar_path needs to be a GCS path.')
if not os.path.isfile(setup_py):
raise ValueError('Supplied file "%s" does not exist.' % setup_py)
dest_setup_py = os.path.join(package_root_dir, 'setup.py')
if dest_setup_py != setup_py:
if os.path.isfile(dest_setup_py):
os.rename(dest_setup_py, dest_setup_py + '._bak_')
shutil.copyfile(setup_py, dest_setup_py)
tempdir = tempfile.mkdtemp()
previous_cwd = os.getcwd()
os.chdir(package_root_dir)
try:
sdist = ['python', dest_setup_py, 'sdist', '--format=gztar', '-d', tempdir]
subprocess.check_call(sdist)
source = os.path.join(tempdir, '*.tar.gz')
gscopy = ['gsutil', 'cp', source, output_tar_path]
subprocess.check_call(gscopy)
return
finally:
os.chdir(previous_cwd)
if dest_setup_py != setup_py:
os.remove(dest_setup_py)
if os.path.isfile(dest_setup_py + '._bak_'):
os.rename(dest_setup_py + '._bak_', dest_setup_py)
shutil.rmtree(tempdir)
|
Repackage an CloudML package and copy it to a staging dir.
Args:
package_root_dir: the root dir to install package from. Usually you can get the path
from inside your module using a relative path to __file__.
setup_py: the path to setup.py.
output_tar_path: the GCS path of the output tarball package.
Raises:
ValueError if output_tar_path is not a GCS path, or setup_py does not exist.
|
juraj-google-style
|
def _prepare_for_training(self, records, mini_batch_size=None, job_name=None):
super(AmazonAlgorithmEstimatorBase, self)._prepare_for_training(job_name=job_name)
feature_dim = None
if isinstance(records, list):
for record in records:
if record.channel == 'train':
feature_dim = record.feature_dim
break
if feature_dim is None:
raise ValueError('Must provide train channel.')
else:
feature_dim = records.feature_dim
self.feature_dim = feature_dim
self.mini_batch_size = mini_batch_size
|
Set hyperparameters needed for training.
Args:
* records (:class:`~RecordSet`): The records to train this ``Estimator`` on.
* mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a
default value will be used.
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable.
|
juraj-google-style
|
def Call(self, position, function_call):
self.EnsureGdbPosition(position[0], None, None)
if (not gdb.selected_thread().is_stopped()):
self.Interrupt(position)
result_value = gdb.parse_and_eval(function_call)
return self._UnpackGdbVal(result_value)
|
Perform a function call in the inferior.
WARNING: Since Gdb's concept of threads can't be directly identified with
python threads, the function call will be made from what has to be assumed
is an arbitrary thread. This *will* interrupt the inferior. Continuing it
after the call is the responsibility of the caller.
Args:
position: the context of the inferior to call the function from.
function_call: A string corresponding to a function call. Format:
'foo(0,0)'
Returns:
Thre return value of the called function.
|
codesearchnet
|
def Query(self, queue, limit=1):
if isinstance(queue, rdf_client.ClientURN):
queue = queue.Queue()
return self.data_store.QueueQueryTasks(queue, limit=limit)
|
Retrieves tasks from a queue without leasing them.
This is good for a read only snapshot of the tasks.
Args:
queue: The task queue that this task belongs to, usually client.Queue()
where client is the ClientURN object you want to schedule msgs on.
limit: Number of values to fetch.
Returns:
A list of Task() objects.
|
juraj-google-style
|
def _GetTimeElementsTuple(self, timestamp):
(year, month, day_of_month, hours, minutes, seconds) = (int((hexdigit[0] + hexdigit[1]), 16) for hexdigit in zip(timestamp[::2], timestamp[1::2]))
return ((year + 1970), (month + 1), day_of_month, hours, minutes, seconds)
|
Retrieves a time elements tuple from the timestamp.
A Symantec log timestamp consist of six hexadecimal octets, that represent:
First octet: Number of years since 1970
Second octet: Month, where January is represented by 0
Third octet: Day of the month
Fourth octet: Number of hours
Fifth octet: Number of minutes
Sixth octet: Number of seconds
For example, 200A13080122 represents November 19, 2002, 8:01:34 AM.
Args:
timestamp (str): hexadecimal encoded date and time values.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
|
codesearchnet
|
def format_speech_generation_kwargs(kwargs):
kwargs_text = {}
kwargs_speech = {}
for key, value in kwargs.items():
if key.startswith('text_'):
key = key[len('text_'):]
kwargs_text[key] = value
elif key.startswith('speech_'):
key = key[len('speech_'):]
kwargs_speech[key] = value
elif key == 'generation_config':
kwargs_text[key] = value
else:
if key not in kwargs_text:
kwargs_text[key] = value
if key not in kwargs_speech:
kwargs_speech[key] = value
return (kwargs_text, kwargs_speech)
|
Format kwargs for SeamlessM4Tv2 models that generate speech, attribute kwargs to either the text generation or the
speech generation models.
Args:
kwargs (`dict`)`:
Keyword arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
|
github-repos
|
def _prep_binary_mimetype(self):
if not self.mimetype and 'Content-Type' not in self.resource.headers.keys():
raise Exception('to create/update NonRDFSource, mimetype or Content-Type header is required')
elif self.mimetype and 'Content-Type' not in self.resource.headers.keys():
logger.debug('setting Content-Type header with provided mimetype: %s'
% self.mimetype)
self.resource.headers['Content-Type'] = self.mimetype
|
Sets Content-Type header based on headers and/or self.binary.mimetype values
Implicitly favors Content-Type header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers
|
juraj-google-style
|
def execute_no_wait(self, cmd, walltime, envs={}):
current_env = copy.deepcopy(self._envs)
current_env.update(envs)
try:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.userhome,
env=current_env,
shell=True,
preexec_fn=os.setpgrp
)
pid = proc.pid
except Exception as e:
print("Caught exception : {0}".format(e))
logger.warn("Execution of command [%s] failed due to \n %s ", (cmd, e))
return pid, proc
|
Synchronously execute a commandline string on the shell.
Args:
- cmd (string) : Commandline string to execute
- walltime (int) : walltime in seconds, this is not really used now.
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None.
|
juraj-google-style
|
def _AddUser(self, user):
self.logger.info('Creating a new user account for %s.', user)
command = self.useradd_cmd.format(user=user)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not create user %s. %s.', user, str(e))
return False
else:
self.logger.info('Created user account %s.', user)
return True
|
Configure a Linux user account.
Args:
user: string, the name of the Linux user account to create.
Returns:
bool, True if user creation succeeded.
|
juraj-google-style
|
def serve(args):
port = (args.serve_port or PORT)
host = '0.0.0.0'
dir_path = Path().absolute()
web_dir = (dir_path / 'site')
utils.set_routes()
if args.offline:
os.environ['MKINX_OFFLINE'] = 'true'
_ = subprocess.check_output('mkdocs build > /dev/null', shell=True)
utils.make_offline()
class MkinxHTTPHandler(SimpleHTTPRequestHandler):
'Class routing urls (paths) to projects (resources)\n '
def translate_path(self, path):
location = str(web_dir)
route = location
if ((len(path) != 0) and (path != '/')):
for (key, loc) in utils.get_routes():
if path.startswith(key):
location = loc
path = path[len(key):]
break
if ((location[(- 1)] == '/') or (not path) or (path[0] == '/')):
route = (location + path)
else:
route = ((location + '/') + path)
return route.split('?')[0]
success = False
count = 0
print('Waiting for server port...')
try:
while (not success):
try:
httpd = socketserver.TCPServer((host, port), MkinxHTTPHandler)
success = True
except OSError:
count += 1
finally:
if ((not success) and (count > 20)):
s = 'port {} seems occupied. Try with {} ? (y/n)'
if ('y' in input(s.format(port, (port + 1)))):
port += 1
count = 0
else:
print('You can specify a custom port with mkinx serve -s')
return
time.sleep(0.5)
except KeyboardInterrupt:
print('Aborting.')
return
httpd.allow_reuse_address = True
print('\nServing at http:
thread = threading.Thread(target=httpd.serve_forever)
thread.daemon = True
thread.start()
event_handler = utils.MkinxFileHandler(patterns=['*.rst', '*.md', '*.yml', '*.yaml'])
observer = Observer()
observer.schedule(event_handler, path=str(dir_path), recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
httpd.server_close()
observer.join()
|
Start a server which will watch .md and .rst files for changes.
If a md file changes, the Home Documentation is rebuilt. If a .rst
file changes, the updated sphinx project is rebuilt
Args:
args (ArgumentParser): flags from the CLI
|
codesearchnet
|
def get(self, key, value):
if (key == 'id'):
response = self._swimlane.request('get', 'app/{}'.format(value))
if (response.status_code == 204):
raise ValueError('No app with id "{}"'.format(value))
return App(self._swimlane, response.json())
else:
for app in self.list():
if (value and (value == app.name)):
return app
raise ValueError('No app with name "{}"'.format(value))
|
Get single app by one of id or name
Supports resource cache
Keyword Args:
id (str): Full app id
name (str): App name
Returns:
App: Corresponding App resource instance
Raises:
TypeError: No or multiple keyword arguments provided
ValueError: No matching app found on server
|
codesearchnet
|
def erf(x):
if any_symbolic_tensors((x,)):
return Erf().symbolic_call(x)
x = backend.convert_to_tensor(x)
return backend.math.erf(x)
|
Computes the error function of `x`, element-wise.
Args:
x: Input tensor.
Returns:
A tensor with the same dtype as `x`.
Example:
>>> x = np.array([-3.0, -2.0, -1.0, 0.0, 1.0])
>>> keras.ops.erf(x)
array([-0.99998 , -0.99532, -0.842701, 0., 0.842701], dtype=float32)
|
github-repos
|
def get_nic(access_token, subscription_id, resource_group, nic_name):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkInterfaces/', nic_name, '?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
Get details about a network interface.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
nic_name (str): Name of the NIC.
Returns:
HTTP response. NIC JSON body.
|
codesearchnet
|
def _get_by_id(cls, id, parent=None, **ctx_options):
return cls._get_by_id_async(id, parent=parent, **ctx_options).get_result()
|
Returns an instance of Model class by ID.
This is really just a shorthand for Key(cls, id, ...).get().
Args:
id: A string or integer key ID.
parent: Optional parent key of the model to get.
namespace: Optional namespace.
app: Optional app ID.
**ctx_options: Context options.
Returns:
A model instance or None if not found.
|
codesearchnet
|
def enable_streaming(self):
if (not self.connected):
raise HardwareError('Cannot enable streaming if we are not in a connected state')
if (self._reports is not None):
_clear_queue(self._reports)
return self._reports
self._reports = queue.Queue()
self._loop.run_coroutine(self.adapter.open_interface(0, 'streaming'))
return self._reports
|
Open the streaming interface and accumute reports in a queue.
This method is safe to call multiple times in a single device
connection. There is no way to check if the streaming interface is
opened or to close it once it is opened (apart from disconnecting from
the device).
The first time this method is called, it will open the streaming
interface and return a queue that will be filled asynchronously with
reports as they are received. Subsequent calls will just empty the
queue and return the same queue without interacting with the device at
all.
Returns:
queue.Queue: A queue that will be filled with reports from the device.
|
codesearchnet
|
def songs(self):
song_list = []
for chunk in self.songs_iter(page_size=49995):
song_list.extend(chunk)
return song_list
|
Get a listing of library songs.
Returns:
list: Song dicts.
|
codesearchnet
|
def _data_from_dotnotation(self, key, default=None):
if key is None:
raise KeyError('NoneType is not a valid key!')
doc = self._collection.find_one({"_id": ObjectId(self._workflow_id)})
if doc is None:
return default
for k in key.split('.'):
doc = doc[k]
return doc
|
Returns the MongoDB data from a key using dot notation.
Args:
key (str): The key to the field in the workflow document. Supports MongoDB's
dot notation for embedded fields.
default (object): The default value that is returned if the key
does not exist.
Returns:
object: The data for the specified key or the default value.
|
juraj-google-style
|
def load_spectrum(filename):
import f311
f = load_with_classes(filename, f311.classes_sp())
if f:
return f.spectrum
return None
|
Attempts to load spectrum as one of the supported types.
Returns:
a Spectrum, or None
|
codesearchnet
|
def _get_grouped_variables(vars_to_warm_start):
if isinstance(vars_to_warm_start, str) or vars_to_warm_start is None:
logging.info('Warm-starting variables only in TRAINABLE_VARIABLES.')
list_of_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=vars_to_warm_start)
elif isinstance(vars_to_warm_start, list):
if all((isinstance(v, str) for v in vars_to_warm_start)):
list_of_vars = []
for v in vars_to_warm_start:
list_of_vars += ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope=v)
elif all((checkpoint_utils._is_variable(v) for v in vars_to_warm_start)):
list_of_vars = vars_to_warm_start
else:
raise ValueError('If `vars_to_warm_start` is a list, it must be all `Variable` or all `str`. Given types are {}'.format([type(v) for v in vars_to_warm_start]))
else:
raise ValueError('`vars_to_warm_start must be a `list` or `str`. Given type is {}'.format(type(vars_to_warm_start)))
grouped_variables = {}
for v in list_of_vars:
t = [v] if not isinstance(v, list) else v
var_name = _infer_var_name(t)
grouped_variables.setdefault(var_name, []).append(v)
return grouped_variables
|
Collects and groups (possibly partitioned) variables into a dictionary.
The variables can be provided explicitly through vars_to_warm_start, or they
are retrieved from collections (see below).
Args:
vars_to_warm_start: One of the following:
- A regular expression (string) that captures which variables to
warm-start (see tf.compat.v1.get_collection). This expression will
only consider variables in the TRAINABLE_VARIABLES collection.
- A list of strings, each representing a full variable name to warm-start.
These will consider variables in GLOBAL_VARIABLES collection.
- A list of Variables to warm-start.
- `None`, in which case all variables in TRAINABLE_VARIABLES will be used.
Returns:
A dictionary mapping variable names (strings) to lists of Variables.
Raises:
ValueError: If vars_to_warm_start is not a string, `None`, a list of
`Variables`, or a list of strings.
|
github-repos
|
def get_workflow(workflow_id: str, workflow_version: str) -> dict:
name = "workflow_definitions:{}:{}".format(workflow_id, workflow_version)
workflow = DB.get_hash_dict(name)
workflow['stages'] = ast.literal_eval(workflow['stages'])
return workflow
|
Get a workflow definition from the Configuration Database.
Args:
workflow_id (str): Workflow identifier
workflow_version (str): Workflow version
Returns:
dict, Workflow definition dictionary
|
juraj-google-style
|
def _module_info_from_proto(module_info_def, import_scope=None):
graph = tf.get_default_graph()
def prepend_name_scope(name_scope):
return ops.prepend_name_scope(name_scope, import_scope)
def process_leafs(name):
return _path_to_graph_element(prepend_name_scope(name), graph)
connected_subgraphs = []
module_info = ModuleInfo(
module_name=module_info_def.module_name,
scope_name=prepend_name_scope(module_info_def.scope_name),
class_name=module_info_def.class_name,
connected_subgraphs=connected_subgraphs)
for connected_subgraph_def in module_info_def.connected_subgraphs:
connected_subgraph = ConnectedSubGraph(
module=module_info,
name_scope=prepend_name_scope(connected_subgraph_def.name_scope),
inputs=_nested_from_proto(
connected_subgraph_def.inputs, process_leafs),
outputs=_nested_from_proto(
connected_subgraph_def.outputs, process_leafs))
connected_subgraphs.append(connected_subgraph)
return module_info
|
Deserializes `module_info_def` proto.
Args:
module_info_def: An instance of `module_pb2.SonnetModule`.
import_scope: Optional `string`. Name scope to use.
Returns:
An instance of `ModuleInfo`.
Raises:
base_errors.ModuleInfoError: If the probobuf is of the wrong type or
if some of its fields are missing.
|
juraj-google-style
|
def log_softmax_v2(logits, axis=None, name=None):
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
|
Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
|
github-repos
|
def describe_file_set(modules):
descriptor = FileSet()
file_descriptors = []
for module in modules:
file_descriptors.append(describe_file(module))
if file_descriptors:
descriptor.files = file_descriptors
return descriptor
|
Build a file set from a specified Python modules.
Args:
modules: Iterable of Python module to describe.
Returns:
Initialized FileSet instance describing the modules.
|
juraj-google-style
|
def list_vdirs(site, app=_DEFAULT_APP):
ret = dict()
ps_cmd = ['Get-WebVirtualDirectory',
'-Site', r"'{0}'".format(site),
'-Application', r"'{0}'".format(app),
'|', "Select-Object PhysicalPath, @{ Name = 'name';",
r"Expression = { $_.path.Split('/')[-1] } }"]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
ret[item['name']] = {'sourcepath': item['physicalPath']}
if not ret:
log.warning('No vdirs found in output: %s', cmd_ret)
return ret
|
Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site
|
juraj-google-style
|
def match(obj, matchers=TYPES):
buf = get_bytes(obj)
for matcher in matchers:
if matcher.match(buf):
return matcher
return None
|
Matches the given input againts the available
file type matchers.
Args:
obj: path to file, bytes or bytearray.
Returns:
Type instance if type matches. Otherwise None.
Raises:
TypeError: if obj is not a supported type.
|
juraj-google-style
|
def minimum(station_code):
temp = None
fin = None
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
except IOError:
logger.info("File not found")
download_extract(_eere_url(station_code))
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
for line in fin:
value = re.search('Max Drybulb=(-?\\d+\\.\\d*)', line)
if value:
temp = float(value.groups()[0])
if not temp:
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'stat')))
for line in fin:
if line.find('Minimum Dry Bulb') is not -1:
return float(line[37:-1].split('\xb0')[0])
except IOError:
pass
if temp:
return temp
else:
raise Exception("Error: Minimum Temperature not found")
|
Extreme Minimum Design Temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius
|
juraj-google-style
|
def rtm(
self, url: Optional[str] = None, bot_id: Optional[str] = None
) -> Iterator[events.Event]:
while True:
bot_id = bot_id or self._find_bot_id()
url = url or self._find_rtm_url()
for event in self._incoming_from_rtm(url, bot_id):
yield event
url = None
|
Iterate over event from the RTM API
Args:
url: Websocket connection url
bot_id: Connecting bot ID
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
|
juraj-google-style
|
def connect(self, chip_name, speed='auto', verbose=False):
if verbose:
self.exec_command('EnableRemarks = 1')
self.exec_command('Device = %s' % chip_name)
if speed == 'auto':
self.set_speed(auto=True)
elif speed == 'adaptive':
self.set_speed(adaptive=True)
else:
self.set_speed(speed)
result = self._dll.JLINKARM_Connect()
if result < 0:
raise errors.JLinkException(result)
try:
self.halted()
except errors.JLinkException:
pass
for index in range(self.num_supported_devices()):
device = self.supported_device(index)
if device.name.lower() == chip_name.lower():
self._device = device
break
else:
raise errors.JLinkException('Unsupported device was connected to.')
return None
|
Connects the J-Link to its target.
Args:
self (JLink): the ``JLink`` instance
chip_name (str): target chip name
speed (int): connection speed, one of ``{5-12000, 'auto', 'adaptive'}``
verbose (bool): boolean indicating if connection should be verbose in logging
Returns:
``None``
Raises:
JLinkException: if connection fails to establish.
TypeError: if given speed is invalid
|
juraj-google-style
|
def _get_match(self, key):
return (self._get_string_match(key=key) or self._get_non_string_match(key=key))
|
Gets a MatchObject for the given key.
Args:
key (str): Key of the property to look-up.
Return:
MatchObject: The discovered match.
|
codesearchnet
|
def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0):
data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth}
r = self.session.get(nurls['getRegisterUserInfo'], params = data)
j = json.loads(r.text)
if j['message'] != 'success':
print "[*] Error getRegisterUserInfo: " + j['message']
return False
else:
self.useridx = j['resultvalue']['useridx']
return True
|
Get registerUserInfo
Args:
svctype: Platform information
auth: ???
Returns:
True: Success
False: Failed
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.