code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def port(self, check=False):
if not self.__ports:
self.refresh()
try:
port = self.__ports.pop()
if check:
while not self.__check_port(port):
self.release_port(port)
port = self.__ports.pop()
except (IndexError, KeyError):
raise IndexError("Could not find a free port,\nclosed ports: {closed}".format(closed=self.__closed))
self.__closed.add(port)
return port
|
return next opened port
Args:
check - check is port realy free
|
juraj-google-style
|
def get_cert_contents(kwargs):
paths = {
"certificate": kwargs.get("path_to_certificate"),
"private_key": kwargs.get("path_to_private_key"),
"chain": kwargs.get("path_to_chain"),
}
for key, value in paths.items():
if value is not None:
continue
path = input("Path to %s (skip): " % (key,))
if path == "skip" or not path.strip():
continue
paths[key] = path
parameters = {
"ServerCertificateName": kwargs.get("cert_name"),
}
for key, path in paths.items():
if not path:
continue
try:
contents = path.read()
except AttributeError:
with open(utils.full_path(path)) as read_file:
contents = read_file.read()
if key == "certificate":
parameters["CertificateBody"] = contents
elif key == "private_key":
parameters["PrivateKey"] = contents
elif key == "chain":
parameters["CertificateChain"] = contents
return parameters
|
Builds parameters with server cert file contents.
Args:
kwargs(dict): The keyword args passed to ensure_server_cert_exists,
optionally containing the paths to the cert, key and chain files.
Returns:
dict: A dictionary containing the appropriate parameters to supply to
upload_server_certificate. An empty dictionary if there is a
problem.
|
juraj-google-style
|
def _fromData(cls, header, tflags, data):
if (header.version >= header._V24):
if (tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN)):
datalen_bytes = data[:4]
data = data[4:]
if ((tflags & Frame.FLAG24_UNSYNCH) or header.f_unsynch):
try:
data = unsynch.decode(data)
except ValueError:
pass
if (tflags & Frame.FLAG24_ENCRYPT):
raise ID3EncryptionUnsupportedError
if (tflags & Frame.FLAG24_COMPRESS):
try:
data = zlib.decompress(data)
except zlib.error:
data = (datalen_bytes + data)
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError(('zlib: %s: %r' % (err, data)))
elif (header.version >= header._V23):
if (tflags & Frame.FLAG23_COMPRESS):
(usize,) = unpack('>L', data[:4])
data = data[4:]
if (tflags & Frame.FLAG23_ENCRYPT):
raise ID3EncryptionUnsupportedError
if (tflags & Frame.FLAG23_COMPRESS):
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError(('zlib: %s: %r' % (err, data)))
frame = cls()
frame._readData(header, data)
return frame
|
Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted.
|
codesearchnet
|
def TempDirPath(suffix = "", prefix = "tmp"):
precondition.AssertType(suffix, Text)
precondition.AssertType(prefix, Text)
return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=_TempRootPath())
|
Creates a temporary directory based on the environment configuration.
The directory will be placed in folder as specified by the `TEST_TMPDIR`
environment variable if available or fallback to `Test.tmpdir` of the current
configuration if not.
Args:
suffix: A suffix to end the directory name with.
prefix: A prefix to begin the directory name with.
Returns:
An absolute path to the created directory.
|
juraj-google-style
|
def exe_cmd(*cmds):
cmd = ' '.join(cmds)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
(out, err) = proc.communicate()
if (not err):
return out
return err
|
Executes commands in a new shell. Directing stderr to PIPE.
This is fastboot's own exe_cmd because of its peculiar way of writing
non-error info to stderr.
Args:
cmds: A sequence of commands and arguments.
Returns:
The output of the command run.
Raises:
Exception: An error occurred during the command execution.
|
codesearchnet
|
def parse_example_tensor(examples, train_config, keep_target):
csv_header = []
if keep_target:
csv_header = train_config['csv_header']
else:
csv_header = [name for name in train_config['csv_header']
if name != train_config['target_column']]
record_defaults = [[train_config['csv_defaults'][name]]
for name in csv_header]
tensors = tf.decode_csv(examples, record_defaults, name='csv_to_tensors')
tensors = [tf.expand_dims(x, axis=1) for x in tensors]
tensor_dict = dict(zip(csv_header, tensors))
return tensor_dict
|
Read the csv files.
Args:
examples: string tensor
train_config: training config
keep_target: if true, the target column is expected to exist and it is
returned in the features dict.
Returns:
Dict of feature_name to tensor. Target feature is in the dict.
|
juraj-google-style
|
def list(self, request):
kwargs = {'Bucket': request.bucket, 'Prefix': request.prefix}
if request.continuation_token is not None:
kwargs['ContinuationToken'] = request.continuation_token
try:
boto_response = self.client.list_objects_v2(**kwargs)
except Exception as e:
raise messages.S3ClientError(str(e), get_http_error_code(e))
if boto_response['KeyCount'] == 0:
message = 'Tried to list nonexistent S3 path: s3:
raise messages.S3ClientError(message, 404)
items = [messages.Item(etag=content['ETag'], key=content['Key'], last_modified=content['LastModified'], size=content['Size']) for content in boto_response['Contents']]
try:
next_token = boto_response['NextContinuationToken']
except KeyError:
next_token = None
response = messages.ListResponse(items, next_token)
return response
|
Retrieves a list of objects matching the criteria.
Args:
request: (ListRequest) input message
Returns:
(ListResponse) The response message.
|
github-repos
|
def __init__(self, cell, residual_fn=None, **kwargs):
super(ResidualWrapperBase, self).__init__(cell, **kwargs)
self._residual_fn = residual_fn
|
Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
**kwargs: dict of keyword arguments for base layer.
|
github-repos
|
def _getClassInstance(path, args=None):
if (not path.endswith('.py')):
return None
if (args is None):
args = {}
classname = AtomShieldsScanner._getClassName(path)
basename = os.path.basename(path).replace('.py', '')
sys.path.append(os.path.dirname(path))
try:
mod = __import__(basename, globals(), locals(), [classname], (- 1))
class_ = getattr(mod, classname)
instance = class_(**args)
except Exception as e:
AtomShieldsScanner._debug(('[!] %s' % e))
return None
finally:
sys.path.remove(os.path.dirname(path))
return instance
|
Returns a class instance from a .py file.
Args:
path (str): Absolute path to .py file
args (dict): Arguments passed via class constructor
Returns:
object: Class instance or None
|
codesearchnet
|
def blit_rect(
self,
console: tcod.console.Console,
x: int,
y: int,
width: int,
height: int,
bg_blend: int,
) -> None:
lib.TCOD_image_blit_rect(
self.image_c, _console(console), x, y, width, height, bg_blend
)
|
Blit onto a Console without scaling or rotation.
Args:
console (Console): Blit destination Console.
x (int): Console tile X position starting from the left at 0.
y (int): Console tile Y position starting from the top at 0.
width (int): Use -1 for Image width.
height (int): Use -1 for Image height.
bg_blend (int): Background blending mode to use.
|
juraj-google-style
|
def segment_similarity(A, B, T=CLOSE_DISTANCE_THRESHOLD):
l_a = len(A.points)
l_b = len(B.points)
idx = index.Index()
dex = 0
for i in range(l_a-1):
idx.insert(dex, bounding_box_from(A.points, i, i+1, T), obj=[A.points[i], A.points[i+1]])
dex = dex + 1
prox_acc = []
for i in range(l_b-1):
ti = B.points[i].gen2arr()
ti1 = B.points[i+1].gen2arr()
bb = bounding_box_from(B.points, i, i+1, T)
intersects = idx.intersection(bb, objects=True)
n_prox = []
i_prox = 0
a = 0
for x in intersects:
a = a + 1
pi = x.object[0].gen2arr()
pi1 = x.object[1].gen2arr()
prox = line_similarity(ti, ti1, pi, pi1, T)
i_prox = i_prox + prox
n_prox.append(prox)
if a != 0:
prox_acc.append(i_prox / a)
else:
prox_acc.append(0)
return np.mean(prox_acc), prox_acc
|
Computes the similarity between two segments
Args:
A (:obj:`Segment`)
B (:obj:`Segment`)
Returns:
float: between 0 and 1. Where 1 is very similar and 0 is completely different
|
juraj-google-style
|
def read_chunk_body(self):
bytes_left = self._bytes_left
if (bytes_left > 0):
size = min(bytes_left, self._read_size)
data = (yield from self._connection.read(size))
self._bytes_left -= len(data)
return (data, data)
elif (bytes_left < 0):
raise ProtocolError('Chunked-transfer overrun.')
elif bytes_left:
raise NetworkError('Connection closed.')
newline_data = (yield from self._connection.readline())
if (len(newline_data) > 2):
raise ProtocolError('Error reading newline after chunk.')
self._chunk_size = self._bytes_left = None
return (b'', newline_data)
|
Read a fragment of a single chunk.
Call :meth:`read_chunk_header` first.
Returns:
tuple: 2-item tuple with the content data and raw data.
First item is empty bytes string when chunk is fully read.
Coroutine.
|
codesearchnet
|
async def _on_report_notification(self, event):
conn_string = event.get('connection_string')
report = self._report_parser.deserialize_report(event.get('serialized_report'))
self.notify_event(conn_string, 'report', report)
|
Callback function called when a report event is received.
Args:
event (dict): The report_event
|
juraj-google-style
|
def validate_to_schema(nanopub, schema) -> Tuple[bool, List[Tuple[str, str]]]:
v = jsonschema.Draft4Validator(schema)
messages = []
errors = sorted(v.iter_errors(nanopub), key=lambda e: e.path)
for error in errors:
for suberror in sorted(error.context, key=lambda e: e.schema_path):
print(list(suberror.schema_path), suberror.message, sep=", ")
messages.append(("ERROR", suberror.message))
is_valid = True
if errors:
is_valid = False
return (is_valid, messages)
|
Validate nanopub against jsonschema for nanopub
Args:
nanopub (Mapping[str, Any]): nanopub dict
schema (Mapping[str, Any]): nanopub schema
Returns:
Tuple[bool, List[str]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)
e.g. [('ERROR', "'subject' is a required property")]
|
juraj-google-style
|
def reshape(x, newshape):
if any_symbolic_tensors((x,)):
return Reshape(newshape).symbolic_call(x)
return backend.numpy.reshape(x, newshape)
|
Gives a new shape to a tensor without changing its data.
Args:
x: Input tensor.
newshape: The new shape should be compatible with the original shape.
One shape dimension can be -1 in which case the value is
inferred from the length of the array and remaining dimensions.
Returns:
The reshaped tensor.
|
github-repos
|
def squad_v2_f1(y_true: List[List[str]], y_predicted: List[str]) -> float:
f1_total = 0.0
for ground_truth, prediction in zip(y_true, y_predicted):
prediction_tokens = normalize_answer(prediction).split()
f1s = []
for gt in ground_truth:
gt_tokens = normalize_answer(gt).split()
if len(gt_tokens) == 0 or len(prediction_tokens) == 0:
f1s.append(float(gt_tokens == prediction_tokens))
continue
common = Counter(prediction_tokens) & Counter(gt_tokens)
num_same = sum(common.values())
if num_same == 0:
f1s.append(0.0)
continue
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(gt_tokens)
f1 = (2 * precision * recall) / (precision + recall)
f1s.append(f1)
f1_total += max(f1s)
return 100 * f1_total / len(y_true) if len(y_true) > 0 else 0
|
Calculates F-1 score between y_true and y_predicted
F-1 score uses the best matching y_true answer
The same as in SQuAD-v2.0
Args:
y_true: list of correct answers (correct answers are represented by list of strings)
y_predicted: list of predicted answers
Returns:
F-1 score : float
|
juraj-google-style
|
def with_target_audience(self, target_audience):
return self.__class__(self._signer, service_account_email=self._service_account_email, token_uri=self._token_uri, target_audience=target_audience, additional_claims=self._additional_claims.copy())
|
Create a copy of these credentials with the specified target
audience.
Args:
target_audience (str): The intended audience for these credentials,
used when requesting the ID Token.
Returns:
google.auth.service_account.IDTokenCredentials: A new credentials
instance.
|
codesearchnet
|
def get_formatted_as_type(self, value, default=None, out_type=str):
if (value is None):
value = default
if isinstance(value, SpecialTagDirective):
result = value.get_value(self)
return types.cast_to_type(result, out_type)
if isinstance(value, str):
result = self.get_formatted_string(value)
result_type = type(result)
if (out_type is result_type):
return result
elif ((out_type is bool) and (result_type is str)):
return (result.lower() in ['true', '1', '1.0'])
else:
return out_type(result)
else:
return out_type(value)
|
Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type
|
codesearchnet
|
def get_config_dir(program='', system_wide=False):
config_homes = []
if system_wide:
if (os.name == 'nt'):
config_homes.append(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'))
else:
config_homes.append('/etc')
config_homes.append('/etc/xdg')
if (os.name == 'darwin'):
config_homes.append('/Library')
elif (os.name == 'nt'):
import winreg
config_homes.append(winreg.ExpandEnvironmentStrings('%LOCALAPPDATA%'))
config_homes.append(os.path.join(winreg.ExpandEnvironmentStrings('%APPDATA%'), 'Roaming'))
elif os.getenv('XDG_CONFIG_HOME'):
config_homes.append(os.getenv('XDG_CONFIG_HOME'))
else:
try:
from xdg import BaseDirectory
config_homes.append(BaseDirectory.xdg_config_home)
except ImportError:
config_homes.append(os.path.expanduser('~/.config'))
config_homes.append(os.path.expanduser('~'))
if (os.name == 'darwin'):
config_homes.append(os.path.expanduser('~/Library'))
if program:
def __find_homes(app, dirs):
homes = []
for home in dirs:
if os.path.isdir(os.path.join(home, app)):
homes.append(os.path.join(home, app))
if os.path.isdir(os.path.join(home, ('.' + app))):
homes.append(os.path.join(home, ('.' + app)))
if os.path.isdir(os.path.join(home, (app + '.d'))):
homes.append(os.path.join(home, (app + '.d')))
return homes
app_homes = __find_homes(program, config_homes)
if (program == 'vim'):
app_homes.extend(__find_homes('vimfiles', config_homes))
elif (program == 'chrome'):
app_homes.extend(__find_homes('google-chrome', config_homes))
elif (program in ['firefox', 'thunderbird']):
app_homes.extend(__find_homes(program, [os.path.expanduser('~/.mozilla')]))
return app_homes
return config_homes
|
Get the configuration directory.
Get the configuration directories, optionally for a specific program.
Args:
program (str) : The name of the program whose configuration directories have to be found.
system_wide (bool): Gets the system-wide configuration directories.
Returns:
list: A list of all matching configuration directories found.
|
codesearchnet
|
def transform_log_prob_fn(log_prob_fn: PotentialFn, bijector: BijectorNest, init_state: State=None) -> Union[(PotentialFn, Tuple[(PotentialFn, State)])]:
def wrapper(*args):
'Transformed wrapper.'
bijector_ = bijector
args = tf.nest.map_structure((lambda x: (0.0 + x)), args)
if (len(args) == 1):
args = args[0]
elif isinstance(bijector_, list):
bijector_ = tuple(bijector_)
original_space_args = tf.nest.map_structure((lambda b, x: b.forward(x)), bijector_, args)
original_space_args = original_space_args
(original_space_log_prob, extra) = call_fn(log_prob_fn, original_space_args)
event_ndims = tf.nest.map_structure((lambda x: (tf.rank(x) - tf.rank(original_space_log_prob))), args)
return ((original_space_log_prob + sum(tf.nest.flatten(tf.nest.map_structure((lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e)), bijector_, args, event_ndims)))), [original_space_args, extra])
if (init_state is None):
return wrapper
else:
return (wrapper, tf.nest.map_structure((lambda b, s: b.inverse(s)), bijector, init_state))
|
Transforms a log-prob function using a bijector.
This takes a log-prob function and creates a new log-prob function that now
takes takes state in the domain of the bijector, forward transforms that state
and calls the original log-prob function. It then returns the log-probability
that correctly accounts for this transformation.
The forward-transformed state is pre-pended to the original log-prob
function's extra returns and returned as the new extra return.
For convenience you can also pass the initial state (in the original space),
and this function will return the inverse transformed as the 2nd return value.
You'd use this to initialize MCMC operators that operate in the transformed
space.
Args:
log_prob_fn: Log prob fn.
bijector: Bijector(s), must be of the same structure as the `log_prob_fn`
inputs.
init_state: Initial state, in the original space.
Returns:
transformed_log_prob_fn: Transformed log prob fn.
transformed_init_state: If `init_state` is provided. Initial state in the
transformed space.
|
codesearchnet
|
def path_get_destination(p: tcod.path.AStar) -> Tuple[(int, int)]:
x = ffi.new('int *')
y = ffi.new('int *')
lib.TCOD_path_get_destination(p._path_c, x, y)
return (x[0], y[0])
|
Get the current destination position.
Args:
p (AStar): An AStar instance.
Returns:
Tuple[int, int]: An (x, y) point.
|
codesearchnet
|
def apply_grad_cartesian_tensor(grad_X, zmat_dist):
columns = ['bond', 'angle', 'dihedral']
C_dist = zmat_dist.loc[(:, columns)].values.T
try:
C_dist = C_dist.astype('f8')
C_dist[([1, 2], :)] = np.radians(C_dist[([1, 2], :)])
except (TypeError, AttributeError):
C_dist[([1, 2], :)] = sympy.rad(C_dist[([1, 2], :)])
cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T
from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian
return Cartesian(atoms=zmat_dist['atom'], coords=cart_dist, index=zmat_dist.index)
|
Apply the gradient for transformation to cartesian space onto zmat_dist.
Args:
grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.
The mathematical details of the index layout is explained in
:meth:`~chemcoord.Cartesian.get_grad_zmat()`.
zmat_dist (:class:`~chemcoord.Zmat`):
Distortions in Zmatrix space.
Returns:
:class:`~chemcoord.Cartesian`: Distortions in cartesian space.
|
codesearchnet
|
def clone_source_dir(source_dir, dest_dir):
if os.path.isdir(dest_dir):
print('removing', dest_dir)
shutil.rmtree(dest_dir)
shutil.copytree(source_dir, dest_dir)
|
Copies the source Protobuf files into a build directory.
Args:
source_dir (str): source directory of the Protobuf files
dest_dir (str): destination directory of the Protobuf files
|
codesearchnet
|
def on_predict_batch_end(self, batch, logs=None):
|
Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
|
github-repos
|
def process_git_configs(git_short=''):
LOG.info('Processing application.json files from GitLab "%s".', git_short)
file_lookup = FileLookup(git_short=git_short)
app_configs = process_configs(file_lookup, (RUNWAY_BASE_PATH + '/application-master-{env}.json'), (RUNWAY_BASE_PATH + '/pipeline.json'))
commit_obj = file_lookup.project.commits.get('master')
config_commit = commit_obj.attributes['id']
LOG.info('Commit ID used: %s', config_commit)
app_configs['pipeline']['config_commit'] = config_commit
return app_configs
|
Retrieve _application.json_ files from GitLab.
Args:
git_short (str): Short Git representation of repository, e.g.
forrest/core.
Returns:
collections.defaultdict: Configurations stored for each environment
found.
|
codesearchnet
|
def replace_with_aqlm_linear(model, quantization_config=None, linear_weights_not_to_quantize=None, current_key_name=None, has_been_replaced=False):
if not is_aqlm_available():
raise ValueError('AQLM is not available. Please install it with `pip install aqlm[cpu,gpu]`')
if not is_accelerate_available():
raise ValueError(f"AQLM requires Accelerate to be installed: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`")
if linear_weights_not_to_quantize is None:
linear_weights_not_to_quantize = []
from accelerate import init_empty_weights
from aqlm import QuantizedLinear
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if isinstance(module, nn.Linear):
if '.'.join(current_key_name) + '.weight' not in linear_weights_not_to_quantize:
with init_empty_weights():
in_features = module.in_features
out_features = module.out_features
model._modules[name] = QuantizedLinear(in_features, out_features, bias=module.bias is not None, in_group_size=quantization_config.in_group_size, out_group_size=quantization_config.out_group_size, num_codebooks=quantization_config.num_codebooks, nbits_per_codebook=quantization_config.nbits_per_codebook)
has_been_replaced = True
model._modules[name].source_cls = type(module)
model._modules[name].requires_grad_(False)
if len(list(module.children())) > 0:
_, has_been_replaced = replace_with_aqlm_linear(module, quantization_config=quantization_config, linear_weights_not_to_quantize=linear_weights_not_to_quantize, current_key_name=current_key_name, has_been_replaced=has_been_replaced)
current_key_name.pop(-1)
return (model, has_been_replaced)
|
Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers.
`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the
conversion has been successful or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`AqlmConfig`):
The quantization config object that contains the quantization parameters.
linear_weights_not_to_quantize (`list[str]`, *optional*):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
current_key_name (`list`, *optional*):
A list that contains the current key name. This is used for recursion and should not be passed by the user.
has_been_replaced (`bool`, *optional*):
A boolean that indicates if the conversion has been successful or not. This is used for recursion and
should not be passed by the user.
|
github-repos
|
def batch_insert_into(self, insert_intos: Iterable[Tuple[(int, ops.Operation)]]) -> None:
copy = self.copy()
for (i, op) in insert_intos:
copy._moments[i] = copy._moments[i].with_operation(op)
self._device.validate_circuit(copy)
self._moments = copy._moments
|
Inserts operations into empty spaces in existing moments.
If any of the insertions fails (due to colliding with an existing
operation), this method fails without making any changes to the circuit.
Args:
insert_intos: A sequence of (moment_index, new_operation)
pairs indicating a moment to add a new operation into.
ValueError:
One of the insertions collided with an existing operation.
IndexError:
Inserted into a moment index that doesn't exist.
|
codesearchnet
|
def __init__(self, name, aliases=None, description=None, urls=None):
super(DataTypeDefinitionWithMembers, self).__init__(
name, aliases=aliases, description=description, urls=urls)
self._byte_size = None
self.members = []
self.sections = []
|
Initializes a data type definition.
Args:
name (str): name.
aliases (Optional[list[str]]): aliases.
description (Optional[str]): description.
urls (Optional[list[str]]): URLs.
|
juraj-google-style
|
def console_put_char(
con: tcod.console.Console,
x: int,
y: int,
c: Union[int, str],
flag: int = BKGND_DEFAULT,
) -> None:
lib.TCOD_console_put_char(_console(con), x, y, _int(c), flag)
|
Draw the character c at x,y using the default colors and a blend mode.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
c (Union[int, AnyStr]): Character to draw, can be an integer or string.
flag (int): Blending mode to use, defaults to BKGND_DEFAULT.
|
juraj-google-style
|
class TFOPTPreTrainedModel(TFPreTrainedModel):
config_class = OPTConfig
base_model_prefix = 'model'
|
TFOPT Pretrained Model that inheritates from transformers.TFPreTrainedModel
Args:
config: OPTConfig
|
github-repos
|
def update_resource_fields(self, data, data_to_add):
for key, value in data_to_add.items():
if not data.get(key):
data[key] = value
return data
|
Update resource data with new fields.
Args:
data: resource data
data_to_update: dict of data to update resource data
Returnes:
Returnes dict
|
juraj-google-style
|
def by_image_seq(blocks, image_seq):
return list(filter((lambda block: (blocks[block].ec_hdr.image_seq == image_seq)), blocks))
|
Filter blocks to return only those associated with the provided image_seq number.
Argument:
List:blocks -- List of block objects to sort.
Int:image_seq -- image_seq number found in ec_hdr.
Returns:
List -- List of block indexes matching image_seq number.
|
codesearchnet
|
def _calculate_replicas_with_values(strategy, input_workers, optional_list):
worker_has_values = []
for worker, optionals in zip(input_workers.worker_devices, optional_list):
with ops.device(worker):
device_has_values = [math_ops.cast(v.has_value(), dtypes.int64) for v in optionals]
worker_has_values.append(math_ops.reduce_sum(device_has_values, keepdims=True))
client_has_values = math_ops.reduce_sum(worker_has_values, keepdims=True)
if strategy.extended._in_multi_worker_mode():
global_has_values = strategy.reduce(reduce_util.ReduceOp.SUM, client_has_values, axis=None)
return array_ops.reshape(global_has_values, [])
else:
return array_ops.reshape(client_has_values, [])
|
Computes the number of replicas that have values.
Args:
strategy: the `tf.distribute.Strategy`.
input_workers: the `InputWorkers`.
optional_list: a list of lists `tf.experimental.Optional`. The values from
each compute device grouped by the input device.
Returns:
A scalar Tensor.
|
github-repos
|
def remove(self, path, relative=False):
if not relative:
path = self.relpath(path)
self._remove(self.get_client_kwargs(path))
|
Remove an object.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
|
juraj-google-style
|
def enter_loop_section(self, section_id, entry_node):
assert section_id not in self.section_entry
assert section_id not in self.continues
self.continues[section_id] = set()
node = self.add_ordinary_node(entry_node)
self.section_entry[section_id] = node
|
Enters a loop section.
Loop sections define an entry node. The end of the section always flows back
to the entry node. These admit continue jump nodes which also flow to the
entry node.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_continue_node
entry_node: ast.AST, the entry node into the loop (e.g. the test node for
while loops)
|
github-repos
|
def get_without_ethernet(self, id_or_uri):
uri = self._client.build_uri(id_or_uri) + "/withoutEthernet"
return self._client.get(uri)
|
Gets the logical downlink with the specified ID without ethernet.
Args:
id_or_uri: Can be either the logical downlink id or the logical downlink uri.
Returns:
dict
|
juraj-google-style
|
def set_uid(self, uid, schema=None):
try:
(uid, schema) = author_id_normalize_and_schema(uid, schema)
except UnknownUIDSchema:
pass
self._ensure_field('ids', [])
self.obj['ids'] = [id_ for id_ in self.obj['ids'] if (id_.get('schema') != schema)]
self._add_uid(uid, schema)
|
Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
|
codesearchnet
|
def expand(self, pcoll: beam.PCollection[ExampleT]) -> Union[beam.PCollection[MLTransformOutputT], tuple[beam.PCollection[MLTransformOutputT], beam.PCollection[beam.Row]]]:
upstream_errors = []
_ = [self._validate_transform(transform) for transform in self.transforms]
if self._artifact_mode == ArtifactMode.PRODUCE:
ptransform_partitioner = _MLTransformToPTransformMapper(transforms=self.transforms, artifact_location=self._parent_artifact_location, artifact_mode=self._artifact_mode, pipeline_options=pcoll.pipeline.options)
ptransform_list = ptransform_partitioner.create_and_save_ptransform_list()
else:
ptransform_list = _MLTransformToPTransformMapper.load_transforms_from_artifact_location(self._parent_artifact_location)
for i in range(len(ptransform_list)):
if hasattr(ptransform_list[i], 'artifact_mode'):
ptransform_list[i].artifact_mode = self._artifact_mode
transform_name = None
for ptransform in ptransform_list:
if self._with_exception_handling:
if hasattr(ptransform, 'with_exception_handling'):
ptransform = ptransform.with_exception_handling(**self._exception_handling_args)
pcoll, bad_results = pcoll | ptransform
if isinstance(bad_results, RunInferenceDLQ):
bad_results = bad_results.failed_inferences
transform_name = ptransform.annotations()['model_handler']
elif not isinstance(bad_results, beam.PCollection):
raise NotImplementedError(f'Unexpected type for bad_results: {type(bad_results)}')
bad_results = bad_results | beam.Map(lambda x: _map_errors_to_beam_row(x, transform_name))
upstream_errors.append(bad_results)
else:
pcoll = pcoll | ptransform
_ = pcoll.pipeline | 'MLTransformMetricsUsage' >> MLTransformMetricsUsage(self)
if self._with_exception_handling:
bad_pcoll = upstream_errors | beam.Flatten()
return (pcoll, bad_pcoll)
return pcoll
|
This is the entrypoint for the MLTransform. This method will
invoke the process_data() method of the ProcessHandler instance
to process the incoming data.
process_data takes in a PCollection and applies the PTransforms
necessary to process the data and returns a PCollection of
transformed data.
Args:
pcoll: A PCollection of ExampleT type.
Returns:
A PCollection of MLTransformOutputT type
|
github-repos
|
def add_stream(self, stream, path, compress, flags):
self.data_fileobj.seek(self.last_offset)
if (compress == 'bz2'):
stream = bz2_compress_stream(stream)
elif (compress == 'xz'):
stream = xz_compress_stream(stream)
elif (compress is None):
pass
else:
raise ValueError('Unsupported compression type: {}'.format(compress))
size = write_to_file(stream, self.data_fileobj)
if (os.sep == '\\'):
path = path.replace('\\', '/')
e = dict(name=six.u(path), offset=self.last_offset, size=size, flags=flags)
self.entries.append(e)
self.last_offset += e['size']
|
Add the contents of an iterable to the MAR file.
Args:
stream (iterable): yields blocks of data
path (str): name of this file in the MAR file
compress (str): One of 'xz', 'bz2', or None. Defaults to None.
flags (int): permission of this file in the MAR file
|
codesearchnet
|
def read_header(filename, return_idxs=False):
with open(filename, 'rb') as fh:
header_dict = {}
header_idxs = {}
keyword, value, idx = read_next_header_keyword(fh)
try:
assert keyword == b'HEADER_START'
except AssertionError:
raise RuntimeError("Not a valid blimpy file.")
while True:
keyword, value, idx = read_next_header_keyword(fh)
if keyword == b'HEADER_END':
break
else:
header_dict[keyword] = value
header_idxs[keyword] = idx
if return_idxs:
return header_idxs
else:
return header_dict
|
Read blimpy header and return a Python dictionary of key:value pairs
Args:
filename (str): name of file to open
Optional args:
return_idxs (bool): Default False. If true, returns the file offset indexes
for values
returns
|
juraj-google-style
|
def _consume(self, message):
try:
self.validate(message)
except RuntimeWarning as e:
self.log.warn('Received invalid message {0}'.format(e))
return
if (isinstance(message, dict) and ('headers' in message) and ('body' in message)):
message['body']['headers'] = message['headers']
if hasattr(self, 'replay_name'):
for m in check_for_replay(self.replay_name, self.name_to_seq_id, message, self.hub.config):
try:
self.validate(m)
return super(FedmsgConsumer, self)._consume(m)
except RuntimeWarning as e:
self.log.warn('Received invalid message {}'.format(e))
else:
return super(FedmsgConsumer, self)._consume(message)
|
Called when a message is consumed.
This private method handles some administrative setup and teardown
before calling the public interface `consume` typically implemented
by a subclass.
When `moksha.blocking_mode` is set to `False` in the config, this
method always returns `None`. The argued message is stored in an
internal queue where the consumer's worker threads should eventually
pick it up.
When `moksha.blocking_mode` is set to `True` in the config, this
method should return True or False, indicating whether the message
was handled or not. Specifically, in the event that the inner
`consume` method raises an exception of any kind, this method
should return `False` indicating that the message was not
successfully handled.
Args:
message (dict): The message as a dictionary.
Returns:
bool: Should be interpreted as whether or not the message was
handled by the consumer, or `None` if `moksha.blocking_mode` is
set to False.
|
codesearchnet
|
def get_file_behaviour(self, resources):
api_name = 'virustotal-file-behaviour'
api_endpoint = 'file/behaviour'
return self._extract_all_responses(resources, api_endpoint, api_name)
|
Retrieves a report about the behaviour of a md5, sha1, and/or sha2 hash of
a file when executed in a sandboxed environment (Cuckoo sandbox).
Args:
resources: list of string hashes.
|
juraj-google-style
|
def set_pyftpsync_logger(logger=True):
global _logger
prev_logger = _logger
if (logger is True):
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger('pyftpsync')
_logger.setLevel(logging.DEBUG)
else:
_logger = logger
return prev_logger
|
Define target for common output.
Args:
logger (bool | None | logging.Logger):
Pass None to use `print()` to stdout instead of logging.
Pass True to create a simple standard logger.
|
codesearchnet
|
def __init__(self, texts, text_type=None):
self.texts = texts
self.text_type = text_type
|
String of text and a corresponding type to use to style that text.
Args:
texts: (list[str]), list of strs or TypedText objects
that should be styled using text_type.
text_type: (TextTypes), the semantic type of the text that
will be used to style text.
|
github-repos
|
def get_field_tag(proto: message.Message, fields: FieldTypes) -> Sequence[chunk_pb2.FieldIndex]:
field_tags = []
for _, field_desc, map_key, list_index in _walk_fields(proto, fields):
field_tags.append(chunk_pb2.FieldIndex(field=field_desc.number))
if map_key is not None:
key_type = field_desc.message_type.fields_by_name['key'].type
field_tags.append(chunk_pb2.FieldIndex(map_key=_map_key_proto(key_type, map_key)))
elif list_index is not None:
field_tags.append(chunk_pb2.FieldIndex(index=list_index))
return field_tags
|
Generates FieldIndex proto for a nested field within a proto.
Args:
proto: Parent proto of any message type.
fields: List of string/int/map key fields, e.g. ["nodes", "attr", "value"]
can represent `proto.nodes.attr["value"]`.
Returns:
A list of FieldIndex protos with the same length as `fields`.
|
github-repos
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
|
github-repos
|
def ConsumeFloat(self):
try:
result = ParseFloat(self.token)
except ValueError as e:
raise self._ParseError(str(e))
self.NextToken()
return result
|
Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
|
codesearchnet
|
def _add_weight(self, name, initial_value, dtype=None):
variable = variable_v1.VariableV1(initial_value=initial_value, name=name, dtype=dtype, trainable=False, use_resource=True, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE)
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key
key = (name, graph_key)
if self._weights.get(key, None) is not None:
raise RuntimeError('Duplicate variables detected. {}'.format(key))
self._weights[key] = variable
self._handle_deferred_dependencies(name=name, trackable=variable)
return variable
|
Adds a weight to this loss scale.
Args:
name: Variable name.
initial_value: The variable's initial value.
dtype: The type of the variable.
Returns:
A variable.
Raises:
RuntimeError: If a weight with `name` has already been added.
|
github-repos
|
def __getDecision(self, result, multiple=False, **values):
values = self.__toString(values)
__valueKeyWithHeaderIndex = self.__valueKeyWithHeaderIndex(values)
errors = self.__checkDecisionParameters(result, **values)
if errors:
view.Tli.showErrors('ParametersError', errors)
machingData = {}
for line in self.decisions:
match = True
for index in __valueKeyWithHeaderIndex:
if (line[index] != __valueKeyWithHeaderIndex[index]):
if (line[index] != self.__wildcardSymbol):
match = False
break
if match:
if multiple:
for header in result:
if (header not in machingData):
machingData[header] = [line[self.header.index(header)]]
else:
machingData[header].append(line[self.header.index(header)])
else:
for header in result:
machingData[header] = line[self.header.index(header)]
return machingData
if multiple:
if machingData:
return machingData
return dict(((key, None) for key in result))
|
The main method for decision picking.
Args:
result (array of str): What values you want to get in return array.
multiple (bolean, optional): Do you want multiple result if it finds many maching decisions.
**values (dict): What should finder look for, (headerString : value).
Returns: Maped result values with finded elements in row/row.
|
codesearchnet
|
def _GetMountpoints(only_physical=True):
partitions = psutil.disk_partitions(all=not only_physical)
return set(partition.mountpoint for partition in partitions)
|
Fetches a list of mountpoints.
Args:
only_physical: Determines whether only mountpoints for physical devices
(e.g. hard disks) should be listed. If false, mountpoints for things such
as memory partitions or `/dev/shm` will be returned as well.
Returns:
A set of mountpoints.
|
juraj-google-style
|
def locate_file(start_path, file_name):
if os.path.isfile(start_path):
start_dir_path = os.path.dirname(start_path)
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
raise exceptions.FileNotFound("invalid path: {}".format(start_path))
file_path = os.path.join(start_dir_path, file_name)
if os.path.isfile(file_path):
return os.path.abspath(file_path)
if os.path.abspath(start_dir_path) in [os.getcwd(), os.path.abspath(os.sep)]:
raise exceptions.FileNotFound("{} not found in {}".format(file_name, start_path))
return locate_file(os.path.dirname(start_dir_path), file_name)
|
locate filename and return absolute file path.
searching will be recursive upward until current working directory.
Args:
start_path (str): start locating path, maybe file path or directory path
Returns:
str: located file path. None if file not found.
Raises:
exceptions.FileNotFound: If failed to locate file.
|
juraj-google-style
|
def creating_schema_and_index(self, models, func):
waiting_models = []
self.base_thread.do_with_submit(func, models, waiting_models, threads=self.threads)
if waiting_models:
print("WAITING MODELS ARE CHECKING...")
self.creating_schema_and_index(waiting_models, func)
|
Executes given functions with given models.
Args:
models: models to execute
func: function name to execute
Returns:
|
juraj-google-style
|
def __init__(self, location=None, parent=None, **kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(ZipPathSpec, self).__init__(
location=location, parent=parent, **kwargs)
|
Initializes a path specification.
Note that the zip file path specification must have a parent.
Args:
location (Optional[str]): ZIP file internal location string prefixed
with a path separator character.
parent (Optional[PathSpec]): parent path specification.
Raises:
ValueError: when parent is not set.
|
juraj-google-style
|
def _open_interface(self, conn_id, iface, callback):
try:
context = self.conns.get_context(conn_id)
except ArgumentError:
callback(conn_id, self.id, False, 'Could not find connection information')
return
self.conns.begin_operation(conn_id, 'open_interface', callback, self.get_config('default_timeout'))
topics = context['topics']
open_iface_message = {'key': context['key'], 'type': 'command', 'operation': 'open_interface', 'client': self.name, 'interface': iface}
self.client.publish(topics.action, open_iface_message)
|
Open an interface on this device
Args:
conn_id (int): the unique identifier for the connection
iface (string): the interface name to open
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
|
codesearchnet
|
def _PrintEventsStatus(self, events_status):
if events_status:
table_view = views.CLITabularTableView(column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates', 'MACB grouped', 'Total'], column_sizes=[15, 15, 15, 15, 15, 0])
table_view.AddRow(['', events_status.number_of_filtered_events, events_status.number_of_events_from_time_slice, events_status.number_of_duplicate_events, events_status.number_of_macb_grouped_events, events_status.total_number_of_events])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
|
Prints the status of the events.
Args:
events_status (EventsStatus): events status.
|
codesearchnet
|
def Deserialize(self, reader):
self.Magic = reader.ReadUInt32()
self.Command = reader.ReadFixedString(12).decode('utf-8')
self.Length = reader.ReadUInt32()
if self.Length > self.PayloadMaxSizeInt:
raise Exception("invalid format- payload too large")
self.Checksum = reader.ReadUInt32()
self.Payload = reader.ReadBytes(self.Length)
checksum = Message.GetChecksum(self.Payload)
if checksum != self.Checksum:
raise ChecksumException("checksum mismatch")
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
|
juraj-google-style
|
def assert_equal(first, second, msg=None, extras=None):
_call_unittest_assertion(_pyunit_proxy.assertEqual, first, second, msg=msg, extras=extras)
|
Asserts the equality of objects, otherwise fail the test.
Error message is "first != second" by default. Additional explanation can
be supplied in the message.
Args:
first: The first object to compare.
second: The second object to compare.
msg: A string that adds additional info about the failure.
extras: An optional field for extra information to be included in
test result.
|
github-repos
|
def bit_for_bit(model_path, bench_path, config):
fname = model_path.split(os.path.sep)[(- 1)]
if (not (os.path.isfile(bench_path) and os.path.isfile(model_path))):
return elements.error('Bit for Bit', (('File named ' + fname) + ' has no suitable match!'))
try:
model_data = Dataset(model_path)
bench_data = Dataset(bench_path)
except (FileNotFoundError, PermissionError):
return elements.error('Bit for Bit', (('File named ' + fname) + ' could not be read!'))
if (not (netcdf.has_time(model_data) and netcdf.has_time(bench_data))):
return elements.error('Bit for Bit', (('File named ' + fname) + ' could not be read!'))
headers = ['Max Error', 'Index of Max Error', 'RMS Error', 'Plot']
stats = LIVVDict()
for (i, var) in enumerate(config['bit_for_bit_vars']):
if ((var in model_data.variables) and (var in bench_data.variables)):
m_vardata = model_data.variables[var][:]
b_vardata = bench_data.variables[var][:]
diff_data = (m_vardata - b_vardata)
if diff_data.any():
stats[var]['Max Error'] = np.amax(np.absolute(diff_data))
stats[var]['Index of Max Error'] = str(np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape))
stats[var]['RMS Error'] = np.sqrt((np.sum(np.square(diff_data).flatten()) / diff_data.size))
pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data)
else:
stats[var]['Max Error'] = stats[var]['RMS Error'] = 0
pf = stats[var]['Index of Max Error'] = 'N/A'
stats[var]['Plot'] = pf
else:
stats[var] = {'Max Error': 'No Match', 'RMS Error': 'N/A', 'Plot': 'N/A'}
model_data.close()
bench_data.close()
return elements.bit_for_bit('Bit for Bit', headers, stats)
|
Checks whether the given files have bit for bit solution matches
on the given variable list.
Args:
model_path: absolute path to the model dataset
bench_path: absolute path to the benchmark dataset
config: the configuration of the set of analyses
Returns:
A dictionary created by the elements object corresponding to
the results of the bit for bit testing
|
codesearchnet
|
def run_inference(self, batch: Sequence[str], pipeline: Pipeline, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
inference_args = {} if not inference_args else inference_args
predictions = self._inference_fn(batch, pipeline, inference_args)
return _convert_to_result(batch, predictions)
|
Runs inferences on a batch of examples passed as a string resource.
These can either be string sentences, or string path to images or
audio files.
Args:
batch: A sequence of strings resources.
pipeline: A Hugging Face Pipeline.
inference_args: Non-batchable arguments required as inputs to the model's
inference function.
Returns:
An Iterable of type PredictionResult.
|
github-repos
|
def color(self, color):
self._data['color'] = color
request = self._base_request
request['color'] = color
return self._tc_requests.update(request, owner=self.owner)
|
Updates the security labels color.
Args:
color:
|
codesearchnet
|
def check_task(taskid, timeout=DEFAULT_TASK_TIMEOUT, wait=2):
max_attempts = int(timeout / wait)
try:
return retry_call(
partial(_check_task, taskid),
max_attempts=max_attempts,
wait=wait,
exceptions=(AssertionError, ValueError), )
except ValueError:
raise SpinnakerTaskInconclusiveError('Task failed to complete in {0} seconds: {1}'.format(timeout, taskid))
|
Wrap check_task.
Args:
taskid (str): Existing Spinnaker Task ID.
timeout (int, optional): Consider Task failed after given seconds.
wait (int, optional): Seconds to pause between polling attempts.
Returns:
str: Task status.
Raises:
AssertionError: API did not respond with a 200 status code.
:obj:`foremast.exceptions.SpinnakerTaskInconclusiveError`: Task did not
reach a terminal state before the given time out.
|
juraj-google-style
|
def join_sources(source_module: DeploymentModule, contract_name: str):
joined_file = Path(__file__).parent.joinpath('joined.sol')
remapping = {module: str(path) for module, path in contracts_source_path().items()}
command = [
'./utils/join-contracts.py',
'--import-map',
json.dumps(remapping),
str(contracts_source_path_of_deployment_module(
source_module,
).joinpath(contract_name + '.sol')),
str(joined_file),
]
working_dir = Path(__file__).parent.parent
try:
subprocess.check_call(command, cwd=working_dir)
except subprocess.CalledProcessError as ex:
print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.')
raise ex
return joined_file.read_text()
|
Use join-contracts.py to concatenate all imported Solidity files.
Args:
source_module: a module name to look up contracts_source_path()
contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
|
juraj-google-style
|
def typical_or_extreme_period_name(self, value=None):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `typical_or_extreme_period_name`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `typical_or_extreme_period_name`')
self._typical_or_extreme_period_name = value
|
Corresponds to IDD Field `typical_or_extreme_period_name`
Args:
value (str): value for IDD Field `typical_or_extreme_period_name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def __init__(self, method, stop_if_false=False):
self.method = method
self.stopped = threading.Event()
self.thread = None
self.stop_if_false = stop_if_false
|
Initializes the Interval.
Args:
method: A callable to execute, it should take no arguments.
stop_if_false: If True, the interval will exit if the method returns
False.
|
juraj-google-style
|
def check_media_service_name_availability(access_token, subscription_id, msname):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/microsoft.media/CheckNameAvailability?',
'api-version=', MEDIA_API])
ms_body = {'name': msname}
ms_body['type'] = 'mediaservices'
body = json.dumps(ms_body)
return do_post(endpoint, body, access_token)
|
Check media service name availability.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
msname (str): media service name.
Returns:
HTTP response.
|
juraj-google-style
|
def generate_identifier(sender, instance, **kwargs):
identifier = Concept.create_identifier(instance.query)
qs = Concept.objects.filter(identifier=identifier, lang=instance.lang)
if instance.pk:
qs = qs.exclude(pk=instance.pk)
if qs.count() > 0:
raise ValueError("Concept identifier conflict")
instance.identifier = identifier
|
Generate and set identifier of concept before saving object to DB
Args:
sender (class): should be Concept
instance (Concept): saving concept
|
juraj-google-style
|
def _grad_fn(func_graph, grads):
assert len(func_graph.outputs) == len(grads)
ys = []
grad_ys = []
for y, grad_y in zip(func_graph.outputs, grads):
if not backprop_util.IsTrainable(y):
continue
ys.append(y)
grad_ys.append(grad_y)
result = gradients_util._GradientsHelper(ys, func_graph.inputs, grad_ys=grad_ys, src_graph=func_graph)
return result
|
The gradient function for each conditional branch.
This function builds the gradient graph of the corresponding forward-pass
conditional branch in `func_graph`. This is done by differentiating
func_graph's outputs w.r.t. its inputs.
Args:
func_graph: FuncGraph. The corresponding forward-pass function.
grads: The list of input gradient Tensors.
Returns:
The output gradient Tensors.
|
github-repos
|
def fetch_url(self, url):
url_path = urlparse.urlsplit(url).path
dst_path = os.path.basename(url_path)
dst_path = self.paths.prefixed(dst_path)
with LogTask(('Downloading %s' % url)):
urllib.urlretrieve(url=os.path.expandvars(url), filename=dst_path)
return dst_path
|
Retrieves the given url to the prefix
Args:
url(str): Url to retrieve
Returns:
str: path to the downloaded file
|
codesearchnet
|
def create(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO:
return self._path_open(path, 'wb', mime_type, compression_type)
|
Returns a write channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
|
github-repos
|
def short_repr(obj, max_len=40):
obj_repr = repr(obj)
if len(obj_repr) <= max_len:
return obj_repr
return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr))
|
Returns a short, term-friendly string representation of the object.
Args:
obj: An object for which to return a string representation.
max_len: Maximum length of the returned string. Longer reprs will be turned
into a brief descriptive string giving the type and length of obj.
|
juraj-google-style
|
def search(self, query):
results = self.skype.conn('GET', SkypeConnection.API_DIRECTORY, auth=SkypeConnection.Auth.SkypeToken, params={'searchstring': query, 'requestId': '0'}).json().get('results', [])
return [SkypeUser.fromRaw(self.skype, json.get('nodeProfileData', {})) for json in results]
|
Search the Skype Directory for a user.
Args:
query (str): name to search for
Returns:
SkypeUser list: collection of possible results
|
codesearchnet
|
def _valid_dtypes(self):
return set([dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
|
Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
|
github-repos
|
def build_losses(self, logits_real, logits_fake):
with tf.name_scope('GAN_loss'):
score_real = tf.sigmoid(logits_real)
score_fake = tf.sigmoid(logits_fake)
tf.summary.histogram('score-real', score_real)
tf.summary.histogram('score-fake', score_fake)
with tf.name_scope('discrim'):
d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real')
d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake')
d_pos_acc = tf.reduce_mean(tf.cast((score_real > 0.5), tf.float32), name='accuracy_real')
d_neg_acc = tf.reduce_mean(tf.cast((score_fake < 0.5), tf.float32), name='accuracy_fake')
d_accuracy = tf.add((0.5 * d_pos_acc), (0.5 * d_neg_acc), name='accuracy')
self.d_loss = tf.add((0.5 * d_loss_pos), (0.5 * d_loss_neg), name='loss')
with tf.name_scope('gen'):
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss')
g_accuracy = tf.reduce_mean(tf.cast((score_fake > 0.5), tf.float32), name='accuracy')
add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)
|
Build standard GAN loss and set `self.g_loss` and `self.d_loss`.
D and G play two-player minimax game with value function V(G,D)
min_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))]
Args:
logits_real (tf.Tensor): discrim logits from real samples
logits_fake (tf.Tensor): discrim logits from fake samples produced by generator
|
codesearchnet
|
def check_type(o, acceptable_types, may_be_none=True):
if (not isinstance(acceptable_types, tuple)):
acceptable_types = (acceptable_types,)
if (may_be_none and (o is None)):
pass
elif isinstance(o, acceptable_types):
pass
else:
error_message = 'We were expecting to receive an instance of one of the following types: {types}{none}; but instead we received {o} which is a {o_type}.'.format(types=', '.join([repr(t.__name__) for t in acceptable_types]), none=("or 'None'" if may_be_none else ''), o=o, o_type=repr(type(o).__name__))
raise TypeError(error_message)
|
Object is an instance of one of the acceptable types or None.
Args:
o: The object to be inspected.
acceptable_types: A type or tuple of acceptable types.
may_be_none(bool): Whether or not the object may be None.
Raises:
TypeError: If the object is None and may_be_none=False, or if the
object is not an instance of one of the acceptable types.
|
codesearchnet
|
def mesh_axis_to_cumprod(self, tensor_shape):
tensor_layout = self.tensor_layout(tensor_shape)
ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims)
ta2cumprod = tensor_shape.cumprod
return [None if ta is None else ta2cumprod[ta] for ta in ma2ta]
|
For each mesh axis, give the product of previous tensor axes.
Args:
tensor_shape: Shape.
Returns:
list with length self.ndims where each element is an integer or None.
|
juraj-google-style
|
def convert_to_bq_name(name: str) -> str:
return BQ_REGEX.sub('_', name).lower()
|
Tranform the given string into a valid BigQuery name -
convert non-alphanumeric characters to an underscore (_)
and lowercase the result for consistency.
Args:
* name: original name
Returns:
* Transformed valid name
|
github-repos
|
def orthorhombic(a: float, b: float, c: float):
return Lattice.from_parameters(a, b, c, 90, 90, 90)
|
Convenience constructor for an orthorhombic lattice.
Args:
a (float): *a* lattice parameter of the orthorhombic cell.
b (float): *b* lattice parameter of the orthorhombic cell.
c (float): *c* lattice parameter of the orthorhombic cell.
Returns:
Orthorhombic lattice of dimensions a x b x c.
|
codesearchnet
|
def get_service_name(self, service_id: str) -> str:
if (not self._manager):
raise RuntimeError('Only the Swarm manager node can retrieve all the services details.')
service = self._client.services.get(service_id)
return service.name
|
Get the name of the docker service.
Only the manager nodes can retrieve service name
Args:
service_id (string): List of service ID
Returns:
string, name of the docker service
|
codesearchnet
|
def SkipAhead(self, file_object, number_of_characters):
lines_size = len(self.lines)
while number_of_characters >= lines_size:
number_of_characters -= lines_size
self.lines = ''
self.ReadLines(file_object)
lines_size = len(self.lines)
if lines_size == 0:
return
self.lines = self.lines[number_of_characters:]
|
Skips ahead a number of characters.
Args:
file_object (dfvfs.FileIO): file-like object.
number_of_characters (int): number of characters.
|
juraj-google-style
|
def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
if self.config.normalize_before:
spectrogram = (spectrogram - self.mean) / self.scale
is_batched = spectrogram.dim() == 3
if not is_batched:
spectrogram = spectrogram.unsqueeze(0)
hidden_states = spectrogram.transpose(2, 1)
hidden_states = self.conv_pre(hidden_states)
for i in range(self.num_upsamples):
hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)
hidden_states = self.upsampler[i](hidden_states)
res_state = self.resblocks[i * self.num_kernels](hidden_states)
for j in range(1, self.num_kernels):
res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
hidden_states = res_state / self.num_kernels
hidden_states = nn.functional.leaky_relu(hidden_states)
hidden_states = self.conv_post(hidden_states)
hidden_states = torch.tanh(hidden_states)
if not is_batched:
waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)
else:
waveform = hidden_states.squeeze(1)
return waveform
|
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
|
github-repos
|
def sort_variants(vcf_handle):
logger.debug("Creating temp file")
temp_file = NamedTemporaryFile(delete=False)
temp_file.close()
logger.debug("Opening temp file with codecs")
temp_file_handle = codecs.open(
temp_file.name,
mode='w',
encoding='utf-8',
errors='replace'
)
try:
with codecs.open(temp_file.name,mode='w',encoding='utf-8',errors='replace') as f:
for line in vcf_handle:
if not line.startswith('
line = line.rstrip().split('\t')
chrom = line[0]
priority = get_chromosome_priority(chrom)
print_line = "{0}\t{1}\n".format(priority, '\t'.join(line))
f.write(print_line)
sort_variant_file(temp_file.name)
with codecs.open(temp_file.name,mode='r',encoding='utf-8',errors='replace') as f:
for line in f:
line = line.rstrip().split('\t')
yield '\t'.join(line[1:])
except Exception as err:
logger.error("Something went wrong")
logger.error(err)
finally:
logger.debug("Deleting temp file")
os.remove(temp_file.name)
logger.debug("Temp file deleted")
|
Sort the variants of a vcf file
Args:
vcf_handle
mode (str): position or rank score
Returns:
sorted_variants (Iterable): An iterable with sorted variants
|
juraj-google-style
|
def similarity(self, track):
idx = index.Index()
i = 0
for (i, segment) in enumerate(self.segments):
idx.insert(i, segment.bounds(), obj=segment)
final_siml = []
final_diff = []
for (i, segment) in enumerate(track.segments):
query = idx.intersection(segment.bounds(), objects=True)
res_siml = []
res_diff = []
for result in query:
(siml, diff) = segment_similarity(segment, result.object)
res_siml.append(siml)
res_diff.append((result.id, i, diff))
if (len(res_siml) > 0):
final_siml.append(max(res_siml))
final_diff.append(res_diff[np.argmax(res_siml)])
else:
final_siml.append(0)
final_diff.append([])
return (np.mean(final_siml), final_diff)
|
Compares two tracks based on their topology
This method compares the given track against this
instance. It only verifies if given track is close
to this one, not the other way arround
Args:
track (:obj:`Track`)
Returns:
Two-tuple with global similarity between tracks
and an array the similarity between segments
|
codesearchnet
|
def _get_resource_params(self, resource, for_update=False):
if isinstance(resource, CollectionResource):
return self._get_collection_params(resource)
if isinstance(resource, ExperimentResource):
return self._get_experiment_params(resource, for_update)
if isinstance(resource, CoordinateFrameResource):
return self._get_coordinate_params(resource, for_update)
if isinstance(resource, ChannelResource):
return self._get_channel_params(resource, for_update)
raise TypeError('resource is not supported type.')
|
Get dictionary containing all parameters for the given resource.
When getting params for a coordinate frame update, only name and
description are returned because they are the only fields that can
be updated.
Args:
resource (intern.resource.boss.resource.BossResource): A sub-class
whose parameters will be extracted into a dictionary.
for_update (bool): True if params will be used for an update.
Returns:
(dictionary): A dictionary containing the resource's parameters as
required by the Boss API.
Raises:
TypeError if resource is not a supported class.
|
codesearchnet
|
def _WsdlHasMethod(self, method_name):
return (method_name in self.suds_client.wsdl.services[0].ports[0].methods)
|
Determine if the wsdl contains a method.
Args:
method_name: The name of the method to search.
Returns:
True if the method is in the WSDL, otherwise False.
|
codesearchnet
|
def FromTrimmedData(byts):
block = Block()
block.__is_trimmed = True
ms = StreamManager.GetStream(byts)
reader = BinaryReader(ms)
block.DeserializeUnsigned(reader)
reader.ReadByte()
witness = Witness()
witness.Deserialize(reader)
block.Script = witness
bc = GetBlockchain()
tx_list = []
for tx_hash in reader.ReadHashes():
tx = bc.GetTransaction(tx_hash)[0]
if (not tx):
raise Exception('Could not find transaction!\n Are you running code against a valid Blockchain instance?\n Tests that accesses transactions or size of a block but inherit from NeoTestCase instead of BlockchainFixtureTestCase will not work.')
tx_list.append(tx)
if (len(tx_list) < 1):
raise Exception(('Invalid block, no transactions found for block %s ' % block.Index))
block.Transactions = tx_list
StreamManager.ReleaseStream(ms)
return block
|
Deserialize a block from raw bytes.
Args:
byts:
Returns:
Block:
|
codesearchnet
|
def get_rel_timestamps(self, node_name, output_slot, debug_op, device_name=None):
device_name = self._infer_device_name(device_name, node_name)
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum[device_name]:
raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key "%s" does not exist in the debug dump' % watch_key)
return self._watch_key_to_rel_time[device_name][watch_key]
|
Get the relative timestamp from for a debug-dumped tensor.
Relative timestamp means (absolute timestamp - `t0`), where `t0` is the
absolute timestamp of the first dumped tensor in the dump root. The tensor
may be dumped multiple times in the dump root directory, so a list of
relative timestamps (`numpy.ndarray`) is returned.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
device_name: (`str`) name of the device. If there is only one device or if
the specified debug_watch_key exists on only one device, this argument
is optional.
Returns:
(`list` of `int`) list of relative timestamps.
Raises:
WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not
exist in the debug dump data.
|
github-repos
|
def keep_artifacts(self, **kwargs):
path = '%s/%s/artifacts/keep' % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path)
|
Prevent artifacts from being deleted when expiration is set.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the request could not be performed
|
juraj-google-style
|
def get_issue(self, issue_id, params=None):
return self._get((self.API_URL + 'issue/{}'.format(issue_id)), params=params)
|
Returns a full representation of the issue for the given issue key.
The issue JSON consists of the issue key and a collection of fields. Additional information like links to
workflow transition sub-resources, or HTML rendered values of the fields supporting HTML rendering can be
retrieved with expand request parameter specified.
The fields request parameter accepts a comma-separated list of fields to include in the response. It can be used
to retrieve a subset of fields. By default all fields are returned in the response. A particular field can be
excluded from the response if prefixed with a "-" (minus) sign. Parameter can be provided multiple times on a
single request.
By default, all fields are returned in the response. Note: this is different from a JQL search - only navigable
fields are returned by default (*navigable).
Args:
issue_id:
params:
Returns:
|
codesearchnet
|
class Softmax(Layer):
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._build_at_init()
def call(self, inputs, mask=None):
if mask is not None:
adder = (1.0 - backend.cast(mask, inputs.dtype)) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
outputs = backend.numpy.exp(inputs - backend.math.logsumexp(inputs, axis=self.axis, keepdims=True))
else:
outputs = activations.softmax(inputs, axis=self.axis[0])
else:
outputs = activations.softmax(inputs, axis=self.axis)
if mask is not None:
outputs = backend.numpy.multiply(outputs, backend.cast(mask, outputs.dtype))
return outputs
def get_config(self):
config = super().get_config()
config.update({'axis': self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>> softmax_layer = keras.layers.Softmax()
>>> input = np.array([1.0, 2.0, 1.0])
>>> result = softmax_layer(input)
>>> result
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
|
github-repos
|
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
raise NotImplementedError
|
Calls the wrapped cell and performs the wrapping logic.
This method is called from the wrapper's `call` or `__call__` methods.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
|
github-repos
|
def upsert(self):
required_parameters = []
self._stackParameters = []
try:
self._initialize_upsert()
except Exception:
return False
try:
available_parameters = self._parameters.keys()
for parameter_name in self._template.get('Parameters', {}):
required_parameters.append(str(parameter_name))
logging.info(' required parameters: ' + str(required_parameters))
logging.info('available parameters: ' + str(available_parameters))
parameters = []
for required_parameter in required_parameters:
parameter = {}
parameter['ParameterKey'] = str(required_parameter)
required_parameter = str(required_parameter)
if required_parameter in self._parameters:
parameter['ParameterValue'] = self._parameters[required_parameter]
else:
parameter['ParameterValue'] = self._parameters[required_parameter.lower()]
parameters.append(parameter)
if not self._analyze_stuff():
sys.exit(1)
if self._config.get('dryrun', False):
logging.info('Generating change set')
set_id = self._generate_change_set(parameters)
if set_id:
self._describe_change_set(set_id)
logging.info('This was a dryrun')
sys.exit(0)
self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')})
self._tags.append({"Key": "ANSWER", "Value": str(42)})
if self._updateStack:
stack = self._cloudFormation.update_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown')))
else:
stack = self._cloudFormation.create_stack(
StackName=self._config.get('environment', {}).get('stack_name', None),
TemplateURL=self._templateUrl,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
Tags=self._tags,
ClientRequestToken=str(uuid.uuid4())
)
logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown')))
except Exception as x:
if self._verbose:
logging.error(x, exc_info=True)
else:
logging.error(x, exc_info=False)
return False
return True
|
The main event of the utility. Create or update a Cloud Formation
stack. Injecting properties where needed
Args:
None
Returns:
True if the stack create/update is started successfully else
False if the start goes off in the weeds.
Exits:
If the user asked for a dryrun exit(with a code 0) the thing here. There is no
point continuing after that point.
|
juraj-google-style
|
def fillCreate(self, qry_str):
count = 0
for fld in self.m_all_fields:
fld_type = self.m_all_fields[fld][MeterData.TypeValue]
fld_len = self.m_all_fields[fld][MeterData.SizeValue]
qry_spec = self.mapTypeToSql(fld_type, fld_len)
if count > 0:
qry_str += ", \n"
qry_str = qry_str + ' ' + fld + ' ' + qry_spec
count += 1
qry_str += (",\n\t" + Field.Time_Stamp + " BIGINT,\n\t" +
"Raw_A VARCHAR(512),\n\t" +
"Raw_B VARCHAR(512)\n)")
return qry_str
|
Return query portion below CREATE.
Args:
qry_str (str): String as built.
Returns:
string: Passed string with fields appended.
|
juraj-google-style
|
async def check_record(self, record, timeout=60):
start_time = time.time()
(name, rr_data, r_type, ttl) = self._extract_record_data(record)
r_type_code = async_dns.types.get_code(r_type)
resolvable_record = False
retries = 0
sleep_time = 5
while ((not resolvable_record) and (timeout > (retries * sleep_time))):
retries += 1
resolver_res = (await self._resolver.query(name, r_type_code))
possible_ans = resolver_res.an
resolvable_record = (await self._check_resolver_ans(possible_ans, name, rr_data, ttl, r_type_code))
if (not resolvable_record):
(await asyncio.sleep(sleep_time))
if (not resolvable_record):
logging.info(f'Sending metric record-checker-failed: {record}.')
else:
final_time = float((time.time() - start_time))
success_msg = f'This record: {record} took {final_time} to register.'
logging.info(success_msg)
|
Measures the time for a DNS record to become available.
Query a provided DNS server multiple times until the reply matches the
information in the record or until timeout is reached.
Args:
record (dict): DNS record as a dict with record properties.
timeout (int): Time threshold to query the DNS server.
|
codesearchnet
|
def _update(self, item, feed_item):
pass
|
Performs an update in DCM.
Since this method is not allowed for creative assets because those cannot be
updated, this method reimplements _update from BaseDAO but doesn't do
anything to prevent an error.
Args:
item: The item to update in DCM.
feed_item: The feed item representing the creative asset in the Bulkdozer
feed.
|
github-repos
|
def delete(self, webhookId):
check_type(webhookId, basestring, may_be_none=False)
self._session.delete(API_ENDPOINT + '/' + webhookId)
|
Delete a webhook, by ID.
Args:
webhookId(basestring): The ID of the webhook to be deleted.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
|
juraj-google-style
|
def find_unique(self, product_type, short_name, include_hidden=False):
prods = self.find_all(product_type, short_name, include_hidden)
if (len(prods) == 0):
raise BuildError('Could not find product by name in find_unique', name=short_name, type=product_type)
if (len(prods) > 1):
raise BuildError('Multiple providers of the same product in find_unique', name=short_name, type=product_type, products=prods)
if self._tracking:
self._resolved_products.append(prods[0])
return prods[0]
|
Find the unique provider of a given product by its short name.
This function will ensure that the product is only provided by exactly
one tile (either this tile or one of its dependencies and raise a
BuildError if not.
Args:
product_type (str): The type of product that we are looking for, like
firmware_image, library etc.
short_name (str): The short name of the product that we wish to find,
usually its os.path.basename()
include_hidden (bool): Return products that are hidden and not selected
as visible in the depends section of this tile's module settings.
This defaults to False.
Returns:
ProductInfo: The information of the one unique provider of this product.
|
codesearchnet
|
def _load_attributes(self, mft_config, attrs_view):
offset = 0
load_attrs = mft_config.attribute_load_list
while (attrs_view[offset:offset+4] != b'\xff\xff\xff\xff'):
attr_type, attr_len, non_resident = _get_attr_info(attrs_view[offset:])
if attr_type in load_attrs:
attr = Attribute.create_from_binary(non_resident, mft_config.load_dataruns, attrs_view[offset:])
if not attr.header.attr_type_id is AttrTypes.DATA:
self.attrs[attr.header.attr_type_id].append(attr)
else:
self._add_data_attribute(attr)
offset += attr_len
|
Loads all the attributes of an entry.
Once executed, all the attributes should have been loaded in the
attribute *attrs* instance attribute.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
attrs_view (memoryview(bytearray)) - A binary stream that starts at
the first attribute until the end of the entry
|
juraj-google-style
|
def contact(self, id):
try:
json = self.skype.conn('POST', '{0}/users/batch/profiles'.format(SkypeConnection.API_USER), json={'usernames': [id]}, auth=SkypeConnection.Auth.SkypeToken).json()
contact = SkypeContact.fromRaw(self.skype, json[0])
if (contact.id not in self.contactIds):
self.contactIds.append(contact.id)
return self.merge(contact)
except SkypeApiException as e:
if ((len(e.args) >= 2) and (getattr(e.args[1], 'status_code', None) == 403)):
return None
raise
|
Retrieve all details for a specific contact, including fields such as birthday and mood.
Args:
id (str): user identifier to lookup
Returns:
SkypeContact: resulting contact object
|
codesearchnet
|
def __init__(self, instrumentation_key, *args, **kwargs):
if not instrumentation_key:
raise Exception('Instrumentation key was required but not provided')
telemetry_channel = kwargs.get('telemetry_channel')
if 'telemetry_channel' in kwargs:
del kwargs['telemetry_channel']
self.client = applicationinsights.TelemetryClient(instrumentation_key, telemetry_channel)
super(LoggingHandler, self).__init__(*args, **kwargs)
|
Initialize a new instance of the class.
Args:
instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.
|
juraj-google-style
|
def to_api_repr(self):
answer = {'mode': self.mode.upper(), 'name': self.name, 'type': self.field_type.upper(), 'description': self.description}
if (self.field_type.upper() == 'RECORD'):
answer['fields'] = [f.to_api_repr() for f in self.fields]
return answer
|
Return a dictionary representing this schema field.
Returns:
dict: A dictionary representing the SchemaField in a serialized
form.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.