code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, context=None):
if context is None:
context = google.datalab.Context.default()
self._context = context
self._api = _api.Api(context)
self._project_id = context.project_id if context else self._api.project_id
|
Initializes an instance of a BucketList.
Args:
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
|
juraj-google-style
|
def _MergeOptional(self, a, b):
if a and b:
if a != b:
raise MergeError("values must be identical if both specified "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return a or b
|
Tries to merge two values which may be None.
If both values are not None, they are required to be the same and the
merge is trivial. If one of the values is None and the other is not None,
the merge results in the one which is not None. If both are None, the merge
results in None.
Args:
a: The first value.
b: The second value.
Returns:
The merged value.
Raises:
MergeError: If both values are not None and are not the same.
|
juraj-google-style
|
def list_events(self):
event_dir_dict = collections.defaultdict(set)
for event_file in self._glob_events_files(self._paths, recursive=True):
dir = os.path.dirname(event_file)
try:
for record in tf_record.tf_record_iterator(event_file):
event = event_pb2.Event.FromString(record)
if ((event.summary is None) or (event.summary.value is None)):
continue
for value in event.summary.value:
if ((value.simple_value is None) or (value.tag is None)):
continue
event_dir_dict[value.tag].add(dir)
except tf.errors.DataLossError:
continue
return dict(event_dir_dict)
|
List all scalar events in the directory.
Returns:
A dictionary. Key is the name of a event. Value is a set of dirs that contain that event.
|
codesearchnet
|
def get_options_from_str(obj_str, **kwargs):
if isinstance(obj_str, list):
return obj_str
try:
obj = get_obj_frm_str(obj_str, **kwargs)
if obj:
return list(obj)
except AttributeError:
pass
return []
|
Returns a list of options from a python object string
args:
obj_str: python list of options or a python object path
Example: "rdfframework.connections.ConnManager[{param1}]"
kwargs:
* kwargs used to format the 'obj_str'
|
juraj-google-style
|
def _set_subject(self, subject):
def test_uri(value):
' test to see if the value is a uri or bnode\n\n Returns: Uri or Bnode '
if (not isinstance(value, (Uri, BlankNode))):
try:
if value.startswith('_:'):
return BlankNode(value)
else:
return Uri(value)
except:
return BlankNode()
else:
return value
if isinstance(subject, dict):
self.subject = test_uri(subject['s'])
if isinstance(subject['o'], list):
for item in subject['o']:
self.add_property(subject['p'], item)
else:
self.add_property(subject['p'], subject['o'])
else:
self.subject = test_uri(subject)
|
sets the subject value for the class instance
Args:
subject(dict, Uri, str): the subject for the class instance
|
codesearchnet
|
def is_subtype_of(self, other):
try:
self.sanity_check_type(other)
nest.assert_same_structure(self._element_spec, other._element_spec)
except (TypeError, ValueError):
return False
self_elements = nest.flatten(self._element_spec)
other_elements = nest.flatten(other._element_spec)
return all((self_element.is_subtype_of(other_element) for self_element, other_element in zip(self_elements, other_elements)))
|
Returns True if `self` is subtype of `other`.
Args:
other: A `TypeSpec`.
|
github-repos
|
class InstructBlipEncoder(nn.Module):
def __init__(self, config: InstructBlipConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`InstructBlipEncoderLayer`].
Args:
config (`InstructBlipConfig`):
The corresponding vision configuration for the `InstructBlipEncoder`.
|
github-repos
|
def CheckKeyCompatibility(cls, key_path):
key_path_upper = key_path.upper()
for key_path_prefix in cls._COMPATIBLE_REGISTRY_KEY_PATH_PREFIXES:
if key_path_upper.startswith(key_path_prefix):
return True
logger.warning('Key path: "{0:s}" is currently not supported'.format(key_path))
return False
|
Checks if a Windows Registry key path is supported by dfWinReg.
Args:
key_path (str): path of the Windows Registry key.
Returns:
bool: True if key is compatible or False if not.
|
codesearchnet
|
def addFixedEffect(self, F=None, A=None, Ftest=None):
if A is None:
A = sp.eye(self.P)
if F is None:
F = sp.ones((self.N,1))
if self.Ntest is not None:
Ftest = sp.ones((self.Ntest,1))
assert A.shape[1]==self.P, 'VarianceDecomposition:: A has incompatible shape'
assert F.shape[0]==self.N, 'VarianceDecimposition:: F has incompatible shape'
if Ftest is not None:
assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)'
assert Ftest.shape[0]==self.Ntest, 'VarianceDecimposition:: Ftest has incompatible shape'
assert Ftest.shape[1]==F.shape[1], 'VarianceDecimposition:: Ftest has incompatible shape'
self.sample_designs.append(F)
self.sample_test_designs.append(Ftest)
self.trait_designs.append(A)
self._desync()
|
add fixed effect term to the model
Args:
F: sample design matrix for the fixed effect [N,K]
A: trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P]
Ftest: sample design matrix for test samples [Ntest,K]
|
juraj-google-style
|
def __init__(self, _args):
super(TcExProfile, self).__init__(_args)
self._input_permutations = []
self._output_permutations = []
self.data_dir = os.path.join(self.args.outdir, 'data')
self.profile_dir = os.path.join(self.args.outdir, 'profiles')
self.profiles = {}
|
Initialize Class properties.
Args:
_args (namespace): The argparser args Namespace.
|
juraj-google-style
|
def get(self, uid: int) -> FrozenSet[Flag]:
recent = (_recent_set if (uid in self._recent) else frozenset())
flags = self._flags.get(uid)
return (recent if (flags is None) else (flags | recent))
|
Return the session flags for the mailbox session.
Args:
uid: The message UID value.
|
codesearchnet
|
def filter(self, limit=None, to=None, category=None):
if category and not to:
msg_slice = itertools.islice((x for x in self.store if x[2] == category), limit)
elif to and not category:
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1])), limit)
elif to and category:
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1]) and x[2] == category), limit)
else:
msg_slice = self.all(limit=limit)
return msg_slice
return list(msg_slice)[::-1]
|
Returns the events that match the filters
Args:
limit (int, optional): the max length of the events to return (Default value = None)
to (str, optional): only events that have been sent or received by 'to' (Default value = None)
category (str, optional): only events belonging to the category (Default value = None)
Returns:
list: a list of filtered events
|
juraj-google-style
|
def _save_tensor_value_to_cache_op(self, cache_idx, updates, graph):
updates = self._merge_tensor_signatures(updates)
updates = array_ops.reshape(updates, [1, self._num_signature_dimensions()])
indices = constant_op.constant([cache_idx])
cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph)
return state_ops.scatter_update(cache, indices, updates).op
|
Returns an op that will save the given updates to an entry in the cache.
Args:
cache_idx: The cache index of the tensor within the cache.
updates: A dictionary of the signature updates.
graph: A TensorFlow graph.
Returns:
Cache update operation.
|
github-repos
|
def forward_loss(self, pixel_values, pred, mask, interpolate_pos_encoding: bool=False):
target = self.patchify(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
if self.config.norm_pix_loss:
mean = tf.reduce_mean(target, axis=-1, keepdims=True)
var = tf.math.reduce_variance(target, axis=-1, keepdims=True)
target = (target - mean) / (var + 1e-06) ** 0.5
loss = (pred - target) ** 2
loss = tf.reduce_mean(loss, axis=-1)
loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)
loss = tf.reshape(loss, (1,))
return loss
|
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, height, width, num_channels)`):
Pixel values.
pred (`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
Predicted pixel values.
mask (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (1) and which are not (0).
interpolate_pos_encoding (`bool`, *optional*, default `False`):
interpolation flag passed during the forward pass.
Returns:
`tf.Tensor`: Pixel reconstruction loss.
|
github-repos
|
def add_vlan_int(self, vlan_id):
config = ET.Element('config')
vlinterface = ET.SubElement(config, 'interface-vlan', xmlns='urn:brocade.com:mgmt:brocade-interface')
interface = ET.SubElement(vlinterface, 'interface')
vlan = ET.SubElement(interface, 'vlan')
name = ET.SubElement(vlan, 'name')
name.text = vlan_id
try:
self._callback(config)
return True
except Exception as error:
logging.error(error)
return False
|
Add VLAN Interface. VLAN interfaces are required for VLANs even when
not wanting to use the interface for any L3 features.
Args:
vlan_id: ID for the VLAN interface being created. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
|
codesearchnet
|
def get_num_bytes(self, batch: Sequence[scipy.sparse.csr_matrix]) -> int:
return sum((sys.getsizeof(element) for element in batch))
|
Returns:
The number of bytes of data for a batch.
|
github-repos
|
def change_wavelength(self, wavelength):
for axis in self.axes:
if issubclass(type(axis), Slabs):
axis.change_wavelength(wavelength)
self.xx, self.xy, self.yx, self.yy, self.zz = self.axes
self._wl = wavelength
|
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
|
juraj-google-style
|
def setup_components_and_tf_funcs(self, custom_getter=None):
if custom_getter is None:
def custom_getter(getter, name, registered=False, **kwargs):
if registered:
self.registered_variables.add(name)
elif name in self.registered_variables:
registered = True
variable = getter(name=name, **kwargs)
if registered:
pass
elif name in self.all_variables:
assert variable is self.all_variables[name]
if kwargs.get('trainable', True):
assert variable is self.variables[name]
if 'variables' in self.summary_labels:
tf.contrib.summary.histogram(name=name, tensor=variable)
else:
self.all_variables[name] = variable
if kwargs.get('trainable', True):
self.variables[name] = variable
if 'variables' in self.summary_labels:
tf.contrib.summary.histogram(name=name, tensor=variable)
return variable
self.fn_initialize = tf.make_template(
name_='initialize',
func_=self.tf_initialize,
custom_getter_=custom_getter
)
self.fn_preprocess = tf.make_template(
name_='preprocess',
func_=self.tf_preprocess,
custom_getter_=custom_getter
)
self.fn_actions_and_internals = tf.make_template(
name_='actions-and-internals',
func_=self.tf_actions_and_internals,
custom_getter_=custom_getter
)
self.fn_observe_timestep = tf.make_template(
name_='observe-timestep',
func_=self.tf_observe_timestep,
custom_getter_=custom_getter
)
self.fn_action_exploration = tf.make_template(
name_='action-exploration',
func_=self.tf_action_exploration,
custom_getter_=custom_getter
)
return custom_getter
|
Allows child models to create model's component objects, such as optimizer(s), memory(s), etc..
Creates all tensorflow functions via tf.make_template calls on all the class' "tf_"-methods.
Args:
custom_getter: The `custom_getter_` object to use for `tf.make_template` when creating TensorFlow functions.
If None, use a default custom_getter_.
Returns: The custom_getter passed in (or a default one if custom_getter was None).
|
juraj-google-style
|
def onTagDel(self, name, func):
if '*' in name:
self.ontagdelglobs.add(name, func)
else:
self.ontagdels[name].append(func)
|
Register a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
|
juraj-google-style
|
def get_course_and_course_run(self, course_run_id):
course_id = parse_course_key(course_run_id)
course = self.get_course_details(course_id)
course_run = None
if course:
course_run = None
course_runs = [course_run for course_run in course['course_runs'] if course_run['key'] == course_run_id]
if course_runs:
course_run = course_runs[0]
return course, course_run
|
Return the course and course run metadata for the given course run ID.
Arguments:
course_run_id (str): The course run ID.
Returns:
tuple: The course metadata and the course run metadata.
|
juraj-google-style
|
def _find_penultimate_layer(model, layer_idx, penultimate_layer_idx):
if penultimate_layer_idx is None:
for idx, layer in utils.reverse_enumerate(model.layers[:layer_idx - 1]):
if isinstance(layer, Wrapper):
layer = layer.layer
if isinstance(layer, (_Conv, _Pooling1D, _Pooling2D, _Pooling3D)):
penultimate_layer_idx = idx
break
if penultimate_layer_idx is None:
raise ValueError('Unable to determine penultimate `Conv` or `Pooling` '
'layer for layer_idx: {}'.format(layer_idx))
if layer_idx < 0:
layer_idx = len(model.layers) + layer_idx
if penultimate_layer_idx > layer_idx:
raise ValueError('`penultimate_layer_idx` needs to be before `layer_idx`')
return model.layers[penultimate_layer_idx]
|
Searches for the nearest penultimate `Conv` or `Pooling` layer.
Args:
model: The `keras.models.Model` instance.
layer_idx: The layer index within `model.layers`.
penultimate_layer_idx: The pre-layer to `layer_idx`. If set to None, the nearest penultimate
`Conv` or `Pooling` layer is used.
Returns:
The penultimate layer.
|
juraj-google-style
|
def from_text_vision_configs(cls, text_config: Siglip2TextConfig, vision_config: Siglip2VisionConfig, **kwargs):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
|
Instantiate a [`Siglip2Config`] (or a derived class) from siglip2 text model configuration and siglip2 vision
model configuration.
Returns:
[`Siglip2Config`]: An instance of a configuration object
|
github-repos
|
def grab_data(self, f_start=None, f_stop=None, t_start=None, t_stop=None, if_id=0):
self.freqs = self.populate_freqs()
self.timestamps = self.populate_timestamps()
if (f_start is None):
f_start = self.freqs[0]
if (f_stop is None):
f_stop = self.freqs[(- 1)]
i0 = np.argmin(np.abs((self.freqs - f_start)))
i1 = np.argmin(np.abs((self.freqs - f_stop)))
if (i0 < i1):
plot_f = self.freqs[i0:(i1 + 1)]
plot_data = np.squeeze(self.data[(t_start:t_stop, ..., i0:(i1 + 1))])
else:
plot_f = self.freqs[i1:(i0 + 1)]
plot_data = np.squeeze(self.data[(t_start:t_stop, ..., i1:(i0 + 1))])
return (plot_f, plot_data)
|
Extract a portion of data by frequency range.
Args:
f_start (float): start frequency in MHz
f_stop (float): stop frequency in MHz
if_id (int): IF input identification (req. when multiple IFs in file)
Returns:
(freqs, data) (np.arrays): frequency axis in MHz and data subset
|
codesearchnet
|
def _ConvertAttributeValueToDict(cls, attribute_value):
if isinstance(attribute_value, py2to3.BYTES_TYPE):
encoded_value = binascii.b2a_qp(attribute_value)
encoded_value = codecs.decode(encoded_value, 'ascii')
attribute_value = {'__type__': 'bytes', 'stream': '{0:s}'.format(encoded_value)}
elif isinstance(attribute_value, (list, tuple)):
json_list = []
for list_element in attribute_value:
json_dict = cls._ConvertAttributeValueToDict(list_element)
json_list.append(json_dict)
if isinstance(attribute_value, list):
attribute_value = json_list
else:
attribute_value = {'__type__': 'tuple', 'values': json_list}
elif isinstance(attribute_value, collections.Counter):
attribute_value = cls._ConvertCollectionsCounterToDict(attribute_value)
elif isinstance(attribute_value, dfvfs_path_spec.PathSpec):
attribute_value = cls._ConvertPathSpecToDict(attribute_value)
elif isinstance(attribute_value, containers_interface.AttributeContainer):
attribute_value = cls._ConvertAttributeContainerToDict(attribute_value)
return attribute_value
|
Converts an attribute value into a JSON dictionary.
Args:
attribute_value (object): an attribute value.
Returns:
dict|list: The JSON serialized object which can be a dictionary or a list.
|
codesearchnet
|
def AddClass(self, class_name, gtfs_class):
if (class_name in self._class_mapping):
raise problems.DuplicateMapping(class_name)
self._class_mapping[class_name] = gtfs_class
|
Adds an entry to the list of known classes.
Args:
class_name: A string with name through which gtfs_class is to be made
accessible.
gtfs_class: The class to be added.
Raises:
DuplicateMapping if class_name is already present in the class mapping.
|
codesearchnet
|
def argument_parser(args):
parser = argparse.ArgumentParser(prog='nagaram', description='Finds Scabble anagrams.', formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False)
parser.add_argument('-h', '--help', dest='help', action='store_true', default=False)
parser.add_argument('--sowpods', dest='sowpods', action='store_true', default=False)
parser.add_argument('--length', '-l', dest='length', action='store_true', default=False)
parser.add_argument('--starts-with', '-s', dest='starts_with', metavar='chars', default='', nargs=1, type=str)
parser.add_argument('--ends-with', '-e', dest='ends_with', metavar='chars', default='', nargs=1, type=str)
parser.add_argument('--version', '-v', action='version', version='Nagaram {0} (Released: {1})'.format(nagaram.__version__, nagaram.__release_date__))
parser.add_argument(dest='wordlist', metavar='letters to find anagrams with (? for anything, _ for blanks)', nargs=argparse.REMAINDER)
settings = parser.parse_args(args)
if settings.help:
raise SystemExit(nagaram.__doc__.strip())
if (not settings.wordlist):
raise SystemExit(parser.print_usage())
if settings.starts_with:
settings.starts_with = settings.starts_with[0]
if settings.ends_with:
settings.ends_with = settings.ends_with[0]
return (settings.wordlist, settings.sowpods, settings.length, settings.starts_with, settings.ends_with)
|
Argparse logic, command line options.
Args:
args: sys.argv[1:], everything passed to the program after its name
Returns:
A tuple of:
a list of words/letters to search
a boolean to declare if we want to use the sowpods words file
a boolean to declare if we want to output anagrams by length
a string of starting characters to find anagrams based on
a string of ending characters to find anagrams based on
Raises:
SystemExit if the user passes invalid arguments, --version or --help
|
codesearchnet
|
def get_weights(model_hparams, vocab_size, hidden_dim=None):
if hidden_dim is None:
hidden_dim = model_hparams.hidden_size
num_shards = model_hparams.symbol_modality_num_shards
shards = []
for i in range(num_shards):
shard_size = (vocab_size
1 if i < vocab_size % num_shards else 0)
var_name = "weights_%d" % i
shards.append(
tf.get_variable(
var_name, [shard_size, hidden_dim],
initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
if num_shards == 1:
ret = shards[0]
else:
ret = tf.concat(shards, 0)
if not tf.executing_eagerly():
ret = common_layers.convert_gradient_to_tensor(ret)
return ret
|
Create or get concatenated embedding or softmax variable.
Args:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size
Returns:
a list of num_shards Tensors.
|
juraj-google-style
|
def _check_job_status(self, job, desc, status_key_name):
status = desc[status_key_name]
status = _STATUS_CODE_TABLE.get(status, status)
if ((status != 'Completed') and (status != 'Stopped')):
reason = desc.get('FailureReason', '(No reason provided)')
job_type = status_key_name.replace('JobStatus', ' job')
raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason))
|
Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails.
|
codesearchnet
|
def detect_extracellular_compartment(model):
extracellular_key = Counter()
for reaction in model.reactions:
equation = reaction.equation
if (equation is None):
continue
if (len(equation.compounds) == 1):
(compound, _) = equation.compounds[0]
compartment = compound.compartment
extracellular_key[compartment] += 1
if (len(extracellular_key) == 0):
return None
else:
(best_key, _) = extracellular_key.most_common(1)[0]
logger.info('{} is extracellular compartment'.format(best_key))
return best_key
|
Detect the identifier for equations with extracellular compartments.
Args:
model: :class:`NativeModel`.
|
codesearchnet
|
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = Match('^(.*)\\[\\s*(?:=|&[^\\w])', line)
if match:
(line, _, pos) = CloseExpression(clean_lines, linenum, len(match.group(1)))
if ((pos >= 0) and Match('^\\s*[{(]', line[pos:])):
error(filename, linenum, 'build/c++11', 4, 'Default lambda captures are an unapproved C++ feature.')
|
Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
codesearchnet
|
def GetNames(cls):
names = []
for plugin_class in cls._plugins.values():
name = getattr(plugin_class, 'ARTIFACT_DEFINITION_NAME', None)
if name:
names.append(name)
return names
|
Retrieves the names of the registered artifact definitions.
Returns:
list[str]: registered artifact definitions names.
|
codesearchnet
|
def _WriteFileEntry(self, file_entry, data_stream_name, destination_file):
source_file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if (not source_file_object):
return
try:
with open(destination_file, 'wb') as destination_file_object:
source_file_object.seek(0, os.SEEK_SET)
data = source_file_object.read(self._COPY_BUFFER_SIZE)
while data:
destination_file_object.write(data)
data = source_file_object.read(self._COPY_BUFFER_SIZE)
finally:
source_file_object.close()
|
Writes the contents of the source file entry to a destination file.
Note that this function will overwrite an existing file.
Args:
file_entry (dfvfs.FileEntry): file entry whose content is to be written.
data_stream_name (str): name of the data stream whose content is to be
written.
destination_file (str): path of the destination file.
|
codesearchnet
|
def pop_callback(self, callback):
return self.callback_handler.pop_callback(callback)
|
Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it.
If the callback is not found, returns `None` (and no error is raised).
Args:
callback (`type` or [`~transformers.TrainerCallback]`):
A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
first case, will pop the first member of that class found in the list of callbacks.
Returns:
[`~transformers.TrainerCallback`]: The callback removed, if found.
|
github-repos
|
def log2(value: types.FloatTensor) -> types.FloatTensor:
return tf.math.log(value) / tf.math.log(tf.constant(2, dtype=value.dtype))
|
Returns the point-wise base-2 logarithm a given `Tensor`.
```python
import tensorflow as tf
import tf_quant_finance as tff
# Example: Computing the base-2 logarithm of a given vector.
tff.math.qmc.utils.log2(tf.constant([1, 2, 4, 8, 16], dtype=tf.float32))
# ==> tf.Tensor([0., 1., 2., 3., 4.], shape=(5,), dtype=float32)
```
Args:
value: Positive scalar `Tensor` of real values.
Returns:
`Tensor` with the same `shape` and `dtype` as `value` equal to `ln(value) /
ln(2)`.
|
github-repos
|
def _build_job_meta(cls, job_dir):
meta_file = os.path.join(job_dir, JOB_META_FILE)
meta = parse_json(meta_file)
if not meta:
job_name = job_dir.split("/")[-1]
user = os.environ.get("USER", None)
meta = {
"job_id": job_name,
"job_name": job_name,
"user": user,
"type": "ray",
"start_time": os.path.getctime(job_dir),
"end_time": None,
"best_trial_id": None,
}
if meta.get("start_time", None):
meta["start_time"] = timestamp2date(meta["start_time"])
return meta
|
Build meta file for job.
Args:
job_dir (str): Directory path of the job.
Return:
A dict of job meta info.
|
juraj-google-style
|
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
agg_grads = []
num_devices = len(avail_devices)
group_size = num_devices
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
group_0_device_grads = single_grads[group_0_begin:group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(group_0_device_grads, False, False)
group_1_device_grads = single_grads[group_1_begin:group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(group_1_device_grads, False, False)
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy([group_0_agg_grads, group_1_agg_grads], False, False)
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append([(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
|
Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
|
github-repos
|
def attachment_to_multidim_measurement(attachment, name=None):
data = json.loads(attachment.data)
name = (name or data.get('name'))
attachment_dims = data.get('dimensions', [])
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if (attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME):
try:
attachment_outcome_str = test_runs_pb2.Status.Name(int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get(attachment_outcome_str)
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
if (attachment_values and (len(dims) == len(attachment_values[0]))):
units_ = dims[(- 1)].unit
dimensions = dims[:(- 1)]
else:
units_ = None
dimensions = dims
measured_value = measurements.DimensionedMeasuredValue(name=name, num_dimensions=len(dimensions))
for row in attachment_values:
coordinates = tuple(row[:(- 1)])
val = row[(- 1)]
measured_value[coordinates] = val
measurement = measurements.Measurement(name=name, units=units_, dimensions=tuple(dimensions), measured_value=measured_value, outcome=outcome)
return measurement
|
Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
|
codesearchnet
|
def save_spectre_plot(self, filename="spectre.pdf", img_format="pdf",
sigma=0.05, step=0.01):
d, plt = self.get_spectre_plot(sigma, step)
plt.savefig(filename, format=img_format)
|
Save matplotlib plot of the spectre to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
|
juraj-google-style
|
def _remove_squeezable_dimensions(predictions, labels, weights):
predictions = ops.convert_to_tensor(predictions)
if labels is not None:
labels, predictions = confusion_matrix.remove_squeezable_dimensions(labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is None:
return (predictions, labels, None)
weights = ops.convert_to_tensor(weights)
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0:
return (predictions, labels, weights)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
if predictions_rank is not None and weights_rank is not None:
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif predictions_rank - weights_rank == 1:
weights = array_ops.expand_dims(weights, [-1])
else:
weights_rank_tensor = array_ops.rank(weights)
rank_diff = weights_rank_tensor - array_ops.rank(predictions)
def _maybe_expand_weights():
return cond.cond(math_ops.equal(rank_diff, -1), lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)
if weights_rank is not None and (not weights_shape.dims[-1].is_compatible_with(1)):
maybe_squeeze_weights = lambda: weights
else:
maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])
def _maybe_adjust_weights():
return cond.cond(math_ops.equal(rank_diff, 1), maybe_squeeze_weights, _maybe_expand_weights)
weights = cond.cond(math_ops.equal(weights_rank_tensor, 0), lambda: weights, _maybe_adjust_weights)
return (predictions, labels, weights)
|
Squeeze or expand last dim if needed.
Squeezes last dim of `predictions` or `labels` if their rank differs by 1
(using confusion_matrix.remove_squeezable_dimensions).
Squeezes or expands last dim of `weights` if its rank differs by 1 from the
new rank of `predictions`.
If `weights` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Optional label `Tensor` whose dimensions match `predictions`.
weights: Optional weight scalar or `Tensor` whose dimensions match
`predictions`.
Returns:
Tuple of `predictions`, `labels` and `weights`. Each of them possibly has
the last dimension squeezed, `weights` could be extended by one dimension.
|
github-repos
|
def _make_request(self, method, path, data=None, **kwargs):
_logger.debug("Method for request is %s" % method)
url = self._construct_full_url(path)
_logger.debug("URL for request is %s" % url)
self._auth_info.populate_request_data(kwargs)
_logger.debug("The arguments are %s" % kwargs)
if self._auth_info._headers:
kwargs.setdefault('headers', {}).update(self._auth_info._headers)
res = requests.request(method, url, data=data, **kwargs)
if res.ok:
_logger.debug("Request was successful.")
return res.content.decode('utf-8')
if hasattr(res, 'content'):
_logger.debug("Response was %s:%s", res.status_code, res.content)
raise self._exception_for(res.status_code)(
res.content, http_code=res.status_code
)
else:
msg = "No response from URL: %s" % res.request.url
_logger.error(msg)
raise NoResponseError(msg)
|
Make a request.
Use the `requests` module to actually perform the request.
Args:
`method`: The method to use.
`path`: The path to the resource.
`data`: Any data to send (for POST and PUT requests).
`kwargs`: Other parameters for `requests`.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
|
juraj-google-style
|
class AverageScore(ScoreAggregation):
def __init__(self, **kwargs):
super().__init__(agg_func=statistics.mean, **kwargs)
|
Aggregates anomaly scores by calculating their average.
This `AggregationFn` computes the average of the anomaly scores from a
collection of `AnomalyPrediction` objects.
Args:
**kwargs: Additional keyword arguments to pass to the base
`ScoreAggregation` class.
|
github-repos
|
def decode(byte_str, allow_none=False):
if ((byte_str is None) and allow_none):
return ''
if (not isinstance(byte_str, bytes)):
raise ValueError('The argument {} must be a bytes object.'.format(byte_str))
if (sys.version_info >= (3, 0)):
return byte_str.decode('ascii')
else:
return byte_str
|
Make this unicode in Python 3, otherwise leave it as bytes.
Args:
byte_str: The byte string to decode.
allow_none: If true, then we will allow byte_str to be None in which
case we will return an empty string. TODO(rkn): Remove this flag.
This is only here to simplify upgrading to flatbuffers 1.10.0.
Returns:
A byte string in Python 2 and a unicode string in Python 3.
|
codesearchnet
|
class StableDropout(nn.Module):
def __init__(self, drop_prob):
super().__init__()
self.drop_prob = drop_prob
self.count = 0
self.context_stack = None
def forward(self, x):
if self.training and self.drop_prob > 0:
return XDropout.apply(x, self.get_context())
return x
def clear_context(self):
self.count = 0
self.context_stack = None
def init_context(self, reuse_mask=True, scale=1):
if self.context_stack is None:
self.context_stack = []
self.count = 0
for c in self.context_stack:
c.reuse_mask = reuse_mask
c.scale = scale
def get_context(self):
if self.context_stack is not None:
if self.count >= len(self.context_stack):
self.context_stack.append(DropoutContext())
ctx = self.context_stack[self.count]
ctx.dropout = self.drop_prob
self.count += 1
return ctx
else:
return self.drop_prob
|
Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities
|
github-repos
|
def persist_perf(run, session, svg_path):
from benchbuild.utils import schema as s
with open(svg_path, 'r') as svg_file:
svg_data = svg_file.read()
session.add(s.Metadata(name='perf.flamegraph', value=svg_data, run_id=run.id))
|
Persist the flamegraph in the database.
The flamegraph exists as a SVG image on disk until we persist it in the
database.
Args:
run: The run we attach these perf measurements to.
session: The db transaction we belong to.
svg_path: The path to the SVG file we want to store.
|
codesearchnet
|
def parse(self, content):
declarations = self.REGEX_IMPORT_RULE.findall(
self.remove_comments(content)
)
return self.flatten_rules(declarations)
|
Parse a stylesheet document with a regex (``REGEX_IMPORT_RULE``)
to extract all import rules and return them.
Args:
content (str): A SCSS source.
Returns:
list: Finded paths in import rules.
|
juraj-google-style
|
def round(x, decimals=0):
if any_symbolic_tensors((x,)):
return Round(decimals).symbolic_call(x)
return backend.numpy.round(x, decimals)
|
Evenly round to the given number of decimals.
Args:
x: Input tensor.
decimals: Number of decimal places to round to. Defaults to `0`.
Returns:
Output tensor.
|
github-repos
|
def _encode_queries(self, program: cfg.Program) -> list[dict[str, Any]]:
metrics = program.calculate_metrics()
solvers = metrics.solver_metrics
enc_queries = []
query_id = -1
for solver_idx, solver in enumerate(solvers):
for query in solver.query_metrics:
query_id += 1
steps = []
for step in query.steps:
steps.append({'_type': 'QueryStep', 'node': step.node, 'depth': step.depth, 'bindings': step.bindings})
enc_queries.append({'_type': 'Query', 'solver_idx': solver_idx, 'start_node': query.start_node, 'end_node': query.end_node, 'initial_binding_count': query.initial_binding_count, 'shortcircuited': query.shortcircuited, 'from_cache': query.from_cache, 'steps': steps})
return enc_queries
|
Encodes information about solver queries from a Program's metrics.
The queries are numbered in the order they were recorded.
Args:
program: a cfg.Program.
Returns:
A list of dictionaries that correspond to SerializedQuery.
|
github-repos
|
async def _handle_watermark_notification(self, watermark_notification):
conv_id = watermark_notification.conversation_id.id
res = parsers.parse_watermark_notification(watermark_notification)
await self.on_watermark_notification.fire(res)
try:
conv = await self._get_or_fetch_conversation(conv_id)
except exceptions.NetworkError:
logger.warning(
'Failed to fetch conversation for watermark notification: %s',
conv_id
)
else:
await conv.on_watermark_notification.fire(res)
|
Receive WatermarkNotification and update the conversation.
Args:
watermark_notification: hangouts_pb2.WatermarkNotification instance
|
juraj-google-style
|
def _create_m_objective(w, X):
(clusters, cells) = w.shape
genes = X.shape[0]
w_sum = w.sum(1)
def objective(m):
m = m.reshape((X.shape[0], w.shape[0]))
d = (m.dot(w) + eps)
temp = (X / d)
w2 = w.dot(temp.T)
deriv = (w_sum - w2.T)
return ((np.sum((d - (X * np.log(d)))) / genes), (deriv.flatten() / genes))
return objective
|
Creates an objective function and its derivative for M, given W and X
Args:
w (array): clusters x cells
X (array): genes x cells
|
codesearchnet
|
def update(self, data=None, timeout=-1, force=True):
uri = self.data['uri']
resource = deepcopy(self.data)
resource.update(data)
self.data = self._helper.update(resource, uri, force, timeout)
return self
|
Updates server profile template.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
force: Force the update operation.
Returns:
A dict with the updated resource data.
|
juraj-google-style
|
def parse_report_file(input_, nameservers=None, dns_timeout=2.0, strip_attachment_payloads=False, parallel=False):
if (type(input_) == str):
file_object = open(input_, 'rb')
elif (type(input_) == bytes):
file_object = BytesIO(input_)
else:
file_object = input_
content = file_object.read()
try:
report = parse_aggregate_report_file(content, nameservers=nameservers, dns_timeout=dns_timeout, parallel=parallel)
results = OrderedDict([('report_type', 'aggregate'), ('report', report)])
except InvalidAggregateReport:
try:
sa = strip_attachment_payloads
results = parse_report_email(content, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=sa, parallel=parallel)
except InvalidDMARCReport:
raise InvalidDMARCReport('Not a valid aggregate or forensic report')
return results
|
Parses a DMARC aggregate or forensic file at the given path, a
file-like object. or bytes
Args:
input_: A path to a file, a file like object, or bytes
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
dns_timeout (float): Sets the DNS timeout in seconds
strip_attachment_payloads (bool): Remove attachment payloads from
forensic report results
parallel (bool): Parallel processing
Returns:
OrderedDict: The parsed DMARC report
|
codesearchnet
|
def read(self):
try:
buf = os.read(self._fd, 8)
except OSError as e:
raise LEDError(e.errno, ('Reading LED brightness: ' + e.strerror))
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, ('Rewinding LED brightness: ' + e.strerror))
return int(buf)
|
Read the brightness of the LED.
Returns:
int: Current brightness.
Raises:
LEDError: if an I/O or OS error occurs.
|
codesearchnet
|
def call(self, x, y, bias, cache=None):
q = self.q_dense_layer(x)
k = self.k_dense_layer(y)
v = self.v_dense_layer(y)
if (cache is not None):
k = tf.concat([cache['k'], k], axis=1)
v = tf.concat([cache['v'], v], axis=1)
cache['k'] = k
cache['v'] = v
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
depth = (self.hidden_size
q *= (depth ** (- 0.5))
logits = tf.matmul(q, k, transpose_b=True)
logits += bias
weights = tf.nn.softmax(logits, name='attention_weights')
if self.train:
mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT, value=self.attention_dropout)
weights = tf.nn.dropout(weights, (1.0 - self.attention_dropout))
attention_output = tf.matmul(weights, v)
attention_output = self.combine_heads(attention_output)
attention_output = self.output_dense_layer(attention_output)
return attention_output
|
Apply attention mechanism to x and y.
Args:
x: a tensor with shape [batch_size, length_x, hidden_size]
y: a tensor with shape [batch_size, length_y, hidden_size]
bias: attention bias that will be added to the result of the dot product.
cache: (Used during prediction) dictionary with tensors containing results
of previous attentions. The dictionary must have the items:
{"k": tensor with shape [batch_size, i, key_channels],
"v": tensor with shape [batch_size, i, value_channels]}
where i is the current decoded length.
Returns:
Attention layer output with shape [batch_size, length_x, hidden_size]
|
codesearchnet
|
def parse_value(self, text: str) -> Optional[bool]:
if (text == 'true'):
return True
if (text == 'false'):
return False
|
Parse boolean value.
Args:
text: String representation of the value.
|
codesearchnet
|
def mach53(msg):
d = hex2bin(data(msg))
if d[23] == '0':
return None
mach = bin2int(d[24:33]) * 0.008
return round(mach, 3)
|
MACH number, DBS 5,3 message
Args:
msg (String): 28 bytes hexadecimal message
Returns:
float: MACH number
|
juraj-google-style
|
def synchronize_clock(self, offset):
self.time_offset = offset - self.uptime
self.is_utc = True
if self.has_rtc:
self.stored_offset = self.time_offset
|
Persistently synchronize the clock to UTC time.
Args:
offset (int): The number of seconds since 1/1/2000 00:00Z
|
juraj-google-style
|
def add_reader(self, fd: IFileLike, callback: typing.Callable[([IFileLike], typing.Any)]) -> None:
raise NotImplementedError()
|
Add a file descriptor to the processor and wait for READ.
Args:
fd (IFileLike): Any obect that exposes a 'fileno' method that
returns a valid file descriptor integer.
callback (typing.Callable[[IFileLike], typing.Any]): A function
that consumes the IFileLike object whenever the READ event is
fired.
|
codesearchnet
|
def handle_enterprise_logistration(backend, user, **kwargs):
request = backend.strategy.request
enterprise_customer = get_enterprise_customer_for_running_pipeline(
request,
{
'backend': backend.name,
'kwargs': kwargs
}
)
if enterprise_customer is None:
return
enterprise_customer_user, _ = EnterpriseCustomerUser.objects.update_or_create(
enterprise_customer=enterprise_customer,
user_id=user.id
)
enterprise_customer_user.update_session(request)
|
Perform the linking of user in the process of logging to the Enterprise Customer.
Args:
backend: The class handling the SSO interaction (SAML, OAuth, etc)
user: The user object in the process of being logged in with
**kwargs: Any remaining pipeline variables
|
juraj-google-style
|
def _GetResponseClass(self, method_descriptor):
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class
|
Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
|
juraj-google-style
|
def __init__(self, finders=None):
if finders is None:
self.finders = [LocalPackageFinder(), InstalledPackageFinder()]
else:
self.finders = [f() for f in finders]
|
Initialization method.
Args:
finders (list of classes):
list of package finder classes (not instances) in a specific
order. Default: [LocalPackageFinder, InstalledPackageFinder].
|
juraj-google-style
|
def set_button_map(self, button_map):
assert (self.finger_count > 0), 'This device does not support tapping'
return self._libinput.libinput_device_config_tap_set_button_map(self._handle, button_map)
|
Set the finger number to button number mapping for tap-to-click.
The default mapping on most devices is to have a 1, 2 and 3 finger tap
to map to the left, right and middle button, respectively. A device may
permit changing the button mapping but disallow specific maps. In this
case :attr:`~libinput.constant.ConfigStatus.UNSUPPORTED` is returned,
the caller is expected to handle this case correctly.
Changing the button mapping may not take effect immediately, the device
may wait until it is in a neutral state before applying any changes.
The mapping may be changed when tap-to-click is disabled. The new
mapping takes effect when tap-to-click is enabled in the future.
If :attr:`finger_count` is 0, this method raises :exc:`AssertionError`.
Args:
button_map (~libinput.constant.TapButtonMap): The new
finger-to-button number mapping.
Returns:
~libinput.constant.ConfigStatus: A config status code.
Raises:
AssertionError
|
codesearchnet
|
def get_public_datasets_and_tokens(self):
datasets = {}
tokens = self.get_public_tokens()
for t in tokens:
dataset = self.get_token_dataset(t)
if dataset in datasets:
datasets[dataset].append(t)
else:
datasets[dataset] = [t]
return datasets
|
NOTE: VERY SLOW!
Get a dictionary relating key:dataset to value:[tokens] that rely
on that dataset.
Arguments:
None
Returns:
dict: relating key:dataset to value:[tokens]
|
juraj-google-style
|
def _escaped_token_to_subtoken_ids(self, escaped_token):
return [
self._subtoken_string_to_id[subtoken]
for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
]
|
Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers.
|
juraj-google-style
|
def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
with open(client_secrets_file, 'r') as json_file:
client_config = json.load(json_file)
return cls.from_client_config(client_config, scopes=scopes, **kwargs)
|
Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
|
juraj-google-style
|
def __init__(self, function_name):
params = {
'FUNCTION_NAME': function_name
}
super().__init__(
'int', 'lib_nmsimplex', [],
resource_filename('mot', 'data/opencl/lib_nmsimplex.cl'),
var_replace_dict=params)
|
The NMSimplex algorithm as a reusable library component.
Args:
function_name (str): the name of the evaluation function to call, defaults to 'evaluate'.
This should point to a function with signature:
``double evaluate(local mot_float_type* x, void* data_void);``
|
juraj-google-style
|
def do_decode(cls, obj, obj_type):
if inspect.isclass(obj_type) and issubclass(
obj_type, ConjureBeanType
):
return cls.decode_conjure_bean_type(obj, obj_type)
elif inspect.isclass(obj_type) and issubclass(
obj_type, ConjureUnionType
):
return cls.decode_conjure_union_type(obj, obj_type)
elif inspect.isclass(obj_type) and issubclass(
obj_type, ConjureEnumType
):
return cls.decode_conjure_enum_type(obj, obj_type)
elif isinstance(obj_type, DictType):
return cls.decode_dict(obj, obj_type.key_type, obj_type.value_type)
elif isinstance(obj_type, ListType):
return cls.decode_list(obj, obj_type.item_type)
elif isinstance(obj_type, OptionalType):
return cls.decode_optional(obj, obj_type.item_type)
return cls.decode_primitive(obj, obj_type)
|
Decodes json into the specified type
Args:
obj: the json object to decode
element_type: a class object which is the type we're decoding into.
|
juraj-google-style
|
def docx_process_simple_text(text: str, width: int) -> str:
if width:
return '\n'.join(textwrap.wrap(text, width=width))
else:
return text
|
Word-wraps text.
Args:
text: text to process
width: width to word-wrap to (or 0 to skip word wrapping)
Returns:
wrapped text
|
juraj-google-style
|
def ParseOptions(cls, options, analysis_plugin):
if not isinstance(analysis_plugin, tagging.TaggingAnalysisPlugin):
raise errors.BadConfigObject(
'Analysis plugin is not an instance of TaggingAnalysisPlugin')
tagging_file = cls._ParseStringOption(options, 'tagging_file')
if not tagging_file:
raise errors.BadConfigOption(
'Tagging analysis plugin requires a tagging file.')
tagging_file_path = tagging_file
if not os.path.isfile(tagging_file_path):
data_location = getattr(options, 'data_location', None)
if data_location:
tagging_file_path = os.path.join(data_location, tagging_file)
if not os.path.isfile(tagging_file_path):
raise errors.BadConfigOption(
'No such tagging file: {0:s}.'.format(tagging_file))
try:
analysis_plugin.SetAndLoadTagFile(tagging_file_path)
except UnicodeDecodeError:
raise errors.BadConfigOption(
'Invalid tagging file: {0:s} encoding must be UTF-8.'.format(
tagging_file))
except errors.TaggingFileError as exception:
raise errors.BadConfigOption(
'Unable to read tagging file: {0:s} with error: {1!s}'.format(
tagging_file, exception))
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
analysis_plugin (AnalysisPlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
|
juraj-google-style
|
def has_arg(fn, arg_name):
if (sys.version_info < (3,)):
if (isinstance(fn, types.FunctionType) or isinstance(fn, types.MethodType)):
arg_spec = inspect.getargspec(fn)
else:
try:
arg_spec = inspect.getargspec(fn.__call__)
except AttributeError:
return False
return (arg_name in arg_spec.args)
elif (sys.version_info < (3, 6)):
arg_spec = inspect.getfullargspec(fn)
return ((arg_name in arg_spec.args) or (arg_name in arg_spec.kwonlyargs))
else:
try:
signature = inspect.signature(fn)
except ValueError:
signature = inspect.signature(fn.__call__)
parameter = signature.parameters.get(arg_name)
if (parameter is None):
return False
return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY))
|
Checks if a callable accepts a given keyword argument.
Args:
fn: callable to inspect
arg_name: string, keyword argument name to check
Returns:
bool, whether `fn` accepts a `arg_name` keyword argument.
|
codesearchnet
|
def _read_file(filename):
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError(f'File {filename} does not exist.')
with file_io.FileIO(filename, 'rb') as f:
file_content = f.read()
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception:
pass
try:
text_format.Merge(file_content, graph_def)
except text_format.ParseError as e:
raise IOError(f'Cannot parse file {filename}: {str(e)}.')
return graph_def
|
Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
|
github-repos
|
def is_locator(self, path, relative=False):
if (not relative):
path = self.relpath(path)
return (path and ('/' not in path.rstrip('/')))
|
Returns True if path refer to a locator.
Depending the storage, locator may be a bucket or container name,
a hostname, ...
args:
path (str): path or URL.
relative (bool): Path is relative to current root.
Returns:
bool: True if locator.
|
codesearchnet
|
def build(cls, local_scheduler=True, **task_params):
luigi.build([cls(**task_params)], local_scheduler=local_scheduler)
|
Instantiate the task and build it with luigi
Args:
local_scheduler (bool): use a local scheduler (True, default) or a remote scheduler
task_params: parameters to pass to task for instantiation
|
codesearchnet
|
def FullUpdateFromMap(self, cache, new_map, force_write=False):
error_count = 0
if len(new_map) == 0 and (not force_write):
raise error.EmptyMap('Source map empty during full update, aborting. Use --force-write to override.')
error_count = cache.WriteMap(map_data=new_map, force_write=force_write)
if error_count == 0:
self.WriteModifyTimestamp(new_map.GetModifyTimestamp())
self.WriteUpdateTimestamp()
return error_count
|
Write a new map into the provided cache (overwrites).
Args:
cache: A nss_cache.caches.Cache object.
new_map: A nss_cache.maps.Map object.
force_write: A boolean indicating empty maps are okay to write, defaults
to False which means do not write them.
Returns:
0 if succesful, non-zero indicating number of failures otherwise.
Raises:
EmptyMap: Update is an empty map, not raised if force_write=True.
|
github-repos
|
def key_for_entity_group(cls, key):
return model.Key(cls.KIND_NAME, cls.ID, parent=key.root())
|
Return the key for the entity group containing key.
Args:
key: a key for an entity group whose __entity_group__ key you want.
Returns:
The __entity_group__ key for the entity group containing key.
|
codesearchnet
|
def build_machine(network=None, machine_type=None, preemptible=None, service_account=None, boot_disk_size_gb=None, disks=None, accelerators=None, labels=None, cpu_platform=None, nvidia_driver_version=None):
return {'network': network, 'machineType': machine_type, 'preemptible': preemptible, 'serviceAccount': service_account, 'bootDiskSizeGb': boot_disk_size_gb, 'disks': disks, 'accelerators': accelerators, 'labels': labels, 'cpuPlatform': cpu_platform, 'nvidiaDriverVersion': nvidia_driver_version}
|
Build a VirtualMachine object for a Pipeline request.
Args:
network (dict): Network details for the pipeline to run in.
machine_type (str): GCE Machine Type string for the pipeline.
preemptible (bool): Use a preemptible VM for the job.
service_account (dict): Service account configuration for the VM.
boot_disk_size_gb (int): Boot disk size in GB.
disks (list[dict]): List of disks to mount.
accelerators (list[dict]): List of accelerators to attach to the VM.
labels (dict[string, string]): Labels for the VM.
cpu_platform (str): The CPU platform to request.
nvidia_driver_version (str): The NVIDIA driver version to use when attaching
an NVIDIA GPU accelerator.
Returns:
An object representing a VirtualMachine.
|
codesearchnet
|
def _create_dir_path(self, file_hash, path=None, hash_list=None):
if (hash_list is None):
hash_list = list(file_hash)
if (not hash_list):
raise IOError('Directory structure is too full!')
if (not path):
path = os.path.join(self.path, hash_list.pop(0))
if (not os.path.exists(path)):
os.mkdir(path)
return self._create_dir_path(file_hash=file_hash, path=path, hash_list=hash_list)
files = os.listdir(path)
if (file_hash in files):
return path
if (len(files) < self.dir_limit):
return path
return self._create_dir_path(file_hash=file_hash, path=os.path.join(path, hash_list.pop(0)), hash_list=hash_list)
|
Create proper filesystem paths for given `file_hash`.
Args:
file_hash (str): Hash of the file for which the path should be
created.
path (str, default None): Recursion argument, don't set this.
hash_list (list, default None): Recursion argument, don't set this.
Returns:
str: Created path.
|
codesearchnet
|
def prune_neighbors(self):
def _neighbor_check(neighbors, valid):
if (not (neighbors == neighbors)):
return np.nan
valid_keys = (set(valid) & set(neighbors.keys()))
d = dict([(k, v) for (k, v) in neighbors.items() if (k in valid_keys)])
return d
fixed = self.copy()
valid = self.get_valid_cell_indecies()
valid = pd.DataFrame(self).merge(valid, on=self.frame_columns).set_index((self.frame_columns + ['cell_index']))
valid = valid.apply((lambda x: _neighbor_check(x['neighbors'], x['valid'])), 1).reset_index().rename(columns={0: 'new_neighbors'})
fixed = fixed.merge(valid, on=(self.frame_columns + ['cell_index'])).drop(columns='neighbors').rename(columns={'new_neighbors': 'neighbors'})
fixed.microns_per_pixel = self.microns_per_pixel
fixed.db = self.db
return fixed
|
If the CellDataFrame has been subsetted, some of the cell-cell contacts may no longer be part of the the dataset. This prunes those no-longer existant connections.
Returns:
CellDataFrame: A CellDataFrame with only valid cell-cell contacts
|
codesearchnet
|
def __init__(self, couplinglist=None):
self.graph = nx.DiGraph()
self._dist_matrix = None
self._qubit_list = None
if couplinglist is not None:
for source, target in couplinglist:
self.add_edge(source, target)
|
Create coupling graph. By default, the generated coupling has no nodes.
Args:
couplinglist (list or None): An initial coupling graph, specified as
an adjacency list containing couplings, e.g. [[0,1], [0,2], [1,2]].
|
juraj-google-style
|
def add_note(self, note):
if (type(note) == str):
return self.update_note({'content': note})
elif ((type(note) == dict) and ('content' in note)):
return self.update_note(note)
else:
return ('No string or valid note.', (- 1))
|
Wrapper method to add a note
The method can be passed the note as a dict with the `content`
property set, which is then directly send to the web service for
creation. Alternatively, only the body as string can also be passed. In
this case the parameter is used as `content` for the new note.
Arguments:
- note (dict or string): the note to add
Returns:
A tuple `(note, status)`
- note (dict): the newly created note
- status (int): 0 on success and -1 otherwise
|
codesearchnet
|
def _GetUrl(self, url_id, cache, database):
url_cache_results = cache.GetResults('url')
if not url_cache_results:
result_set = database.Query(self.URL_CACHE_QUERY)
cache.CacheQueryResults(
result_set, 'url', 'id', ('url', 'rev_host'))
url_cache_results = cache.GetResults('url')
url, reverse_host = url_cache_results.get(url_id, ['', ''])
if not url:
return ''
hostname = self._ReverseHostname(reverse_host)
return '{0:s} ({1:s})'.format(url, hostname)
|
Retrieves an URL from a reference to an entry in the from_visit table.
Args:
url_id (str): identifier of the visited URL.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: URL and hostname.
|
juraj-google-style
|
def map(self, map_fn, desc=None):
if (desc is None):
desc = getattr(map_fn, '__name__', '')
desc = u'map({})'.format(desc)
return self.transform((lambda xs: (map_fn(x) for x in xs)), desc=desc)
|
Return a copy of this query, with the values mapped through `map_fn`.
Args:
map_fn (callable): A callable that takes a single argument and returns a new value.
Keyword Args:
desc (str): A description of the mapping transform, for use in log message.
Defaults to the name of the map function.
Returns:
Query
|
codesearchnet
|
def add_alias(self, alias, name, op=None):
alias = self.aliases.find_by_name(alias)
name = self.aliases.find_by_name(name)
if alias == name:
return
elif alias in self and name in self:
self._merge(alias, name, op)
elif alias not in self and name not in self:
self.aliases.merge(alias, name)
elif alias in self:
root = self.aliases.merge(alias, name)
self._copy_item(alias, root)
elif name in self:
root = self.aliases.merge(alias, name)
self._copy_item(name, root)
|
Alias 'alias' to 'name'.
After aliasing, we will think `alias` and `name`, they represent the same
name. We will merge the values if `op` is provided.
Args:
alias: A string.
name: A string.
op: The function used to merge the values.
|
github-repos
|
def pixel_image(shape, sd=None, init_val=None):
if ((sd is not None) and (init_val is not None)):
warnings.warn('`pixel_image` received both an initial value and a sd argument. Ignoring sd in favor of the supplied initial value.')
sd = (sd or 0.01)
init_val = (init_val or np.random.normal(size=shape, scale=sd).astype(np.float32))
return tf.Variable(init_val)
|
A naive, pixel-based image parameterization.
Defaults to a random initialization, but can take a supplied init_val argument
instead.
Args:
shape: shape of resulting image, [batch, width, height, channels].
sd: standard deviation of param initialization noise.
init_val: an initial value to use instead of a random initialization. Needs
to have the same shape as the supplied shape argument.
Returns:
tensor with shape from first argument.
|
codesearchnet
|
def fetch(self, order_id, data={}, **kwargs):
return super(Order, self).fetch(order_id, data, **kwargs)
|
Fetch Order for given Id
Args:
order_id : Id for which order object has to be retrieved
Returns:
Order dict for given order Id
|
codesearchnet
|
def print_info(self, capture):
self.frame_offset += 1
(ret, frame) = capture.read()
if ret:
print('Capture Information')
print('\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2]))
print('\tColor channels: {}'.format((frame.shape[2] if (len(frame.shape) > 2) else 1)))
print('\tColor range: {}-{}'.format(np.min(frame), np.max(frame)))
print('\tdtype: {}'.format(frame.dtype))
else:
print('No source found.')
|
Prints information about the unprocessed image.
Reads one frame from the source to determine image colors, dimensions
and data types.
Args:
capture: the source to read from.
|
codesearchnet
|
def sym_descendants(self, where: Optional[Callable[[Any], bool]]=None, option: DescendantQueryOption=DescendantQueryOption.ALL, include_self: bool=False) -> List[Any]:
descendants = []
where = where or (lambda x: True)
def visit(k, v, p):
del k, p
if not where(v):
return TraverseAction.ENTER
if not include_self and self is v:
return TraverseAction.ENTER
if option == DescendantQueryOption.IMMEDIATE:
descendants.append(v)
return TraverseAction.CONTINUE
leaf_descendants = []
if isinstance(v, Symbolic):
leaf_descendants = v.sym_descendants(where, option)
if option is DescendantQueryOption.ALL or not leaf_descendants:
descendants.append(v)
descendants.extend(leaf_descendants)
return TraverseAction.CONTINUE
traverse(self, visit)
return descendants
|
Returns all descendants of specific classes.
Args:
where: Optional callable object as the filter of descendants to return.
option: Descendant query options, indicating whether all matched,
immediate matched or only the matched leaf nodes will be returned.
include_self: If True, `self` will be included in the query, otherwise
only strict descendants are included.
Returns:
A list of objects that match the descendant_cls.
|
github-repos
|
def to_dict(self) -> dict[str, Any]:
output = copy.deepcopy(self.__dict__)
output['image_processor_type'] = self.__class__.__name__
return output
|
Serializes this instance to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this image processor instance.
|
github-repos
|
def decode_jwt_payload(self, access_token=None):
c = self.get_credentials()
jwt = (access_token or c.access_token)
try:
(_, payload, _) = jwt.split('.')
rem = (len(payload) % 4)
if (rem > 0):
payload += ('=' * (4 - rem))
try:
decoded_jwt = b64decode(payload).decode('utf-8')
except TypeError as e:
raise PanCloudError(('Failed to base64 decode JWT: %s' % e))
else:
try:
x = loads(decoded_jwt)
except ValueError as e:
raise PanCloudError(('Invalid JSON: %s' % e))
except (AttributeError, ValueError) as e:
raise PanCloudError(('Invalid JWT: %s' % e))
return x
|
Extract payload field from JWT.
Args:
access_token (str): Access token to decode. Defaults to ``None``.
Returns:
dict: JSON object that contains the claims conveyed by the JWT.
|
codesearchnet
|
def assemble_common_meta(common_meta_dfs, fields_to_remove, sources, remove_all_metadata_fields, error_report_file):
all_meta_df, all_meta_df_with_dups = build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields)
if not all_meta_df.index.is_unique:
all_report_df = build_mismatched_common_meta_report([x.shape for x in common_meta_dfs],
sources, all_meta_df, all_meta_df_with_dups)
unique_duplicate_ids = all_report_df.index.unique()
if error_report_file is not None:
all_report_df.to_csv(error_report_file, sep="\t")
msg = .format(unique_duplicate_ids, all_report_df)
raise MismatchCommonMetadataConcatException(msg)
all_meta_df_sorted = all_meta_df.sort_index(axis=0)
return all_meta_df_sorted
|
Assemble the common metadata dfs together. Both indices are sorted.
Fields that are not in all the dfs are dropped.
Args:
common_meta_dfs (list of pandas dfs)
fields_to_remove (list of strings): fields to be removed from the
common metadata because they don't agree across files
Returns:
all_meta_df_sorted (pandas df)
|
juraj-google-style
|
def __init__(self, origin):
super(ShellItemsParser, self).__init__()
self._origin = origin
self._path_segments = []
|
Initializes the parser.
Args:
origin (str): origin of the event.
|
juraj-google-style
|
def search_group_by_id(self, groupID) -> Group:
for g in self.groups:
if g.id == groupID:
return g
return None
|
searches a group by given id
Args:
groupID(str): groupID the group to search for
Returns
the group object or None if it couldn't find a group
|
juraj-google-style
|
def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs):
if (max_procs is None):
max_procs = cpu_count()
kw_arr = saturate_kwargs(keys=keys, **kwargs)
if (len(kw_arr) == 0):
return
if isinstance(affinity, int):
win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity)
task_queue = queue.Queue()
while (len(kw_arr) > 0):
for _ in range(max_procs):
if (len(kw_arr) == 0):
break
kw = kw_arr.pop(0)
p = Process(target=func, kwargs=kw)
p.start()
sys.stdout.flush()
task_queue.put(p)
if show_proc:
signature = ', '.join([f'{k}={v}' for (k, v) in kw.items()])
print(f'[{func.__name__}] ({signature})')
while (not task_queue.empty()):
p = task_queue.get()
p.join()
|
Provide interface for multiprocessing
Args:
func: callable functions
keys: keys in kwargs that want to use process
max_procs: max number of processes
show_proc: whether to show process
affinity: CPU affinity
**kwargs: kwargs for func
|
codesearchnet
|
def _get_feature_variable_for_type(self, feature_key, variable_key, variable_type, user_id, attributes):
if (not validator.is_non_empty_string(feature_key)):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key'))
return None
if (not validator.is_non_empty_string(variable_key)):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('variable_key'))
return None
if (not isinstance(user_id, string_types)):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
if (not self._validate_user_inputs(attributes)):
return None
feature_flag = self.config.get_feature_from_key(feature_key)
if (not feature_flag):
return None
variable = self.config.get_variable_for_feature(feature_key, variable_key)
if (not variable):
return None
if (variable.type != variable_type):
self.logger.warning(('Requested variable type "%s", but variable is of type "%s". Use correct API to retrieve value. Returning None.' % (variable_type, variable.type)))
return None
feature_enabled = False
source_info = {}
variable_value = variable.defaultValue
decision = self.decision_service.get_variation_for_feature(feature_flag, user_id, attributes)
if decision.variation:
feature_enabled = decision.variation.featureEnabled
if feature_enabled:
variable_value = self.config.get_variable_value_for_variation(variable, decision.variation)
self.logger.info(('Got variable value "%s" for variable "%s" of feature flag "%s".' % (variable_value, variable_key, feature_key)))
else:
self.logger.info(('Feature "%s" for variation "%s" is not enabled. Returning the default variable value "%s".' % (feature_key, decision.variation.key, variable_value)))
else:
self.logger.info(('User "%s" is not in any variation or rollout rule. Returning default value for variable "%s" of feature flag "%s".' % (user_id, variable_key, feature_key)))
if (decision.source == enums.DecisionSources.FEATURE_TEST):
source_info = {'experiment_key': decision.experiment.key, 'variation_key': decision.variation.key}
try:
actual_value = self.config.get_typecast_value(variable_value, variable_type)
except:
self.logger.error('Unable to cast value. Returning None.')
actual_value = None
self.notification_center.send_notifications(enums.NotificationTypes.DECISION, enums.DecisionNotificationTypes.FEATURE_VARIABLE, user_id, (attributes or {}), {'feature_key': feature_key, 'feature_enabled': feature_enabled, 'source': decision.source, 'variable_key': variable_key, 'variable_value': actual_value, 'variable_type': variable_type, 'source_info': source_info})
return actual_value
|
Helper method to determine value for a certain variable attached to a feature flag based on type of variable.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
variable_type: Type of variable which could be one of boolean/double/integer/string.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
|
codesearchnet
|
def OpenFile(self, windows_path):
path_spec = self._path_resolver.ResolvePath(windows_path)
if path_spec is None:
return None
return self._file_system.GetFileObjectByPathSpec(path_spec)
|
Opens the file specificed by the Windows path.
Args:
windows_path (str): Windows path to the file.
Returns:
FileIO: file-like object or None if the file does not exist.
|
juraj-google-style
|
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `[src_lang_code] X [eos]`
- `labels`: (for decoder) `[tgt_lang_code] X [eos]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def take_node_screenshot(self, element, screenshot_path):
from PIL import Image
'Take a screenshot of a node\n\n Args:\n element (object): the proxy_element\n screenshot_path (str): the path where the screenshot will be saved\n '
temp_path = os.path.join(tempdir, screenshot_path)
el_x = int(element.location['x'])
el_y = int(element.location['y'])
el_height = int(element.size['height'])
el_width = int(element.size['width'])
if ((el_height == 0) or (el_width == 0)):
self.debug_log('take_node_screenshot cannot be taken because element width or height equal zero')
return False
bounding_box = (el_x, el_y, (el_x + el_width), (el_y + el_height))
self._driver.save_screenshot(temp_path)
base_image = Image.open(temp_path)
cropped_image = base_image.crop(bounding_box)
base_image = base_image.resize(cropped_image.size)
base_image.paste(cropped_image, (0, 0))
base_image.save(screenshot_path)
'\n except Exception as e:\n tb = traceback.format_exc()\n print unicode(tb)\n embed()\n '
|
Take a screenshot of a node
Args:
element (object): the proxy_element
screenshot_path (str): the path where the screenshot will be saved
|
codesearchnet
|
def do_command_line(infile: typing.IO[str]) -> int:
lines = infile.readlines()
tree = ast.parse(''.join(lines))
checker = Checker(tree, lines, infile.name)
checker.load()
errors = []
for func in checker.all_funcs(skip_noqa=True):
try:
errors = list(func.check_all())
except ValidationError as error:
errors = [error.to_aaa()]
print(func.__str__(errors), end='')
return len(errors)
|
Currently a small stub to create an instance of Checker for the passed
``infile`` and run its test functions through linting.
Args:
infile
Returns:
int: Number of flake8 errors raised.
|
juraj-google-style
|
def generate(cls, strategy, **kwargs):
assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)
action = getattr(cls, strategy)
return action(**kwargs)
|
Generate a new instance.
The instance will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
Returns:
object: the generated instance
|
juraj-google-style
|
def ParseFileObject(self, parser_mediator, file_object):
file_header_map = self._GetDataTypeMap('systemd_journal_file_header')
try:
(file_header, _) = self._ReadStructureFromFileObject(file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile('Unable to parse file header with error: {0!s}'.format(exception))
if (file_header.signature != self._FILE_SIGNATURE):
raise errors.UnableToParseFile('Invalid file signature.')
if (file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES):
raise errors.UnableToParseFile('Unsupported file header size: {0:d}.'.format(file_header.header_size))
data_hash_table_end_offset = (file_header.data_hash_table_offset + file_header.data_hash_table_size)
field_hash_table_end_offset = (file_header.field_hash_table_offset + file_header.field_hash_table_size)
self._maximum_journal_file_offset = max(data_hash_table_end_offset, field_hash_table_end_offset)
entry_object_offsets = self._ParseEntryObjectOffsets(file_object, file_header.entry_array_offset)
for entry_object_offset in entry_object_offsets:
if (entry_object_offset == 0):
continue
try:
fields = self._ParseJournalEntry(file_object, entry_object_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning('Unable to parse journal entry at offset: 0x{0:08x} with error: {1!s}'.format(entry_object_offset, exception))
return
event_data = SystemdJournalEventData()
event_data.body = fields.get('MESSAGE', None)
event_data.hostname = fields.get('_HOSTNAME', None)
event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None)
if (event_data.reporter and (event_data.reporter != 'kernel')):
event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None))
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=fields['real_time'])
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a Systemd journal file-like object.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
|
codesearchnet
|
def load_dataset(file_path: str, model: NormalizedModel) -> Dataset:
xs = []
ys = array.array('B')
with open(file_path) as f:
for row in f:
cols = row.strip().split('\t')
if len(cols) < 2:
continue
ys.append(cols[0] == '1')
xs.append(tuple((k in set(cols[1:]) for k in model.features)))
X = jnp.array(xs) * 2 - 1
Y = jnp.array(ys)
return Dataset(X, Y)
|
Loads a dataset from the given file path.
Args:
file_path: A file path for the encoded data file.
model: A normalized model.
Returns:
A dataset of inputs (X) and outputs (Y).
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.