code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def complete(self):
return ((self.header is not None) and (self.metadata is not None) and (self.content is not None) and (self.header.get('num_buffers', 0) == len(self._buffers)))
|
Returns whether all required parts of a message are present.
Returns:
bool : True if the message is complete, False otherwise
|
codesearchnet
|
def linear(m=1, b=0):
def f(i):
return m * i + b
return partial(force, sequence=_advance(f))
|
Return a driver function that can advance a sequence of linear values.
.. code-block:: none
value = m * i + b
Args:
m (float) : a slope for the linear driver
x (float) : an offset for the linear driver
|
juraj-google-style
|
def create_document(self, name='Test Document', owner_type=0, public=True):
payload = {
'name': name,
'ownerType': owner_type,
'isPublic': public
}
return self._api.request('post', '/api/documents', body=payload)
|
Create a new document.
Args:
- name (str, default='Test Document'): The doc name
- owner_type (int, default=0): 0 for user, 1 for company, 2 for team
- public (bool, default=False): Whether or not to make doc public
Returns:
- requests.Response: Onshape response data
|
juraj-google-style
|
def properties(self, var_or_nodeid, as_list=False):
props = []
if var_or_nodeid in self._vars:
props = self._vars[var_or_nodeid]['props']
elif var_or_nodeid in self._eps:
var = self._eps[var_or_nodeid][3].get(IVARG_ROLE)
props = self._vars.get(var, {}).get('props', [])
else:
raise KeyError(var_or_nodeid)
if not as_list:
props = dict(props)
return props
|
Return a dictionary of variable properties for *var_or_nodeid*.
Args:
var_or_nodeid: if a variable, return the properties
associated with the variable; if a nodeid, return the
properties associated with the intrinsic variable of the
predication given by the nodeid
|
juraj-google-style
|
def __init__(self, extensions):
super(ExtensionsFileEntryFilter, self).__init__()
self._extensions = extensions
|
Initializes an extensions-based file entry filter.
An extension is defined as "pdf" as in "document.pdf".
Args:
extensions (list[str]): a list of extension strings.
|
juraj-google-style
|
def perfcounters(infile):
measurements = []
with open(infile, 'r') as in_file:
read_struct(in_file)
for region_struct in read_structs(in_file):
region = region_struct["1"][1]
core_info = region_struct["Region Info"]
measurements += \
get_measurements(region, core_info, region_struct)
for table_struct in read_tables(in_file):
core_info = None
if "Event" in table_struct:
offset = 1
core_info = table_struct["Event"][offset:]
measurements += get_measurements(region, core_info,
table_struct, offset)
elif "Metric" in table_struct:
core_info = table_struct["Metric"]
measurements += get_measurements(region, core_info,
table_struct)
return measurements
|
Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream.
|
juraj-google-style
|
def index_file(self, f, overwrite=False):
if isinstance(f, six.string_types):
f = self.layout.get_file(f)
if f.path in self.file_index and not overwrite:
return
if 'suffix' not in f.entities:
return
md = self._get_metadata(f.path)
for md_key, md_val in md.items():
if md_key not in self.key_index:
self.key_index[md_key] = {}
self.key_index[md_key][f.path] = md_val
self.file_index[f.path][md_key] = md_val
|
Index metadata for the specified file.
Args:
f (BIDSFile, str): A BIDSFile or path to an indexed file.
overwrite (bool): If True, forces reindexing of the file even if
an entry already exists.
|
juraj-google-style
|
def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None) -> tf.Tensor:
if input_ids is None and inputs_embeds is None:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if inputs_embeds is None:
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
final_embeddings = inputs_embeds + position_embeds
return final_embeddings
|
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
|
github-repos
|
def GetRowCache(self, query):
query_hash = hash(query)
if (query_hash not in self._row_caches):
self._row_caches[query_hash] = set()
return self._row_caches[query_hash]
|
Retrieves the row cache for a specific query.
The row cache is a set that contains hashes of values in a row. The row
cache is used to find duplicate row when a database and a database with
a WAL file is parsed.
Args:
query (str): query.
Returns:
set: hashes of the rows that have been parsed.
|
codesearchnet
|
def get_states(self):
stamp_token, num_trees, num_finalized_trees, num_attempted_layers, nodes_range = gen_boosted_trees_ops.boosted_trees_get_ensemble_states(self.resource_handle)
return (array_ops.identity(stamp_token, name='stamp_token'), array_ops.identity(num_trees, name='num_trees'), array_ops.identity(num_finalized_trees, name='num_finalized_trees'), array_ops.identity(num_attempted_layers, name='num_attempted_layers'), array_ops.identity(nodes_range, name='last_layer_nodes_range'))
|
Returns states of the tree ensemble.
Returns:
stamp_token, num_trees, num_finalized_trees, num_attempted_layers and
range of the nodes in the latest layer.
|
github-repos
|
def __get_keywords(self):
txt = self.text
for line in txt:
for word in split_words(line):
(yield word)
|
Get all the keywords related of this page
Returns:
An array of strings
|
codesearchnet
|
def _CheckForRestartAndMaybePurge(self, event):
if (event.HasField('session_log') and (event.session_log.status == event_pb2.SessionLog.START)):
self._Purge(event, by_tags=False)
|
Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
|
codesearchnet
|
def data_struct_array(sample, **vectors):
if (not len(sample)):
sample = np.zeros((0, 0), dtype=np.int8)
else:
sample = np.asarray(sample, dtype=np.int8)
if (sample.ndim < 2):
sample = np.expand_dims(sample, 0)
(num_samples, num_variables) = sample.shape
if ('num_occurrences' not in vectors):
vectors['num_occurrences'] = ([1] * num_samples)
datavectors = {}
datatypes = [('sample', np.dtype(np.int8), (num_variables,))]
for (kwarg, vector) in vectors.items():
dtype = (float if (kwarg == 'energy') else None)
datavectors[kwarg] = vector = np.asarray(vector, dtype)
if ((len(vector.shape) < 1) or (vector.shape[0] != num_samples)):
msg = '{} and sample have a mismatched shape {}, {}. They must have the same size in the first axis.'.format(kwarg, vector.shape, sample.shape)
raise ValueError(msg)
datatypes.append((kwarg, vector.dtype, vector.shape[1:]))
if ('energy' not in datavectors):
raise TypeError('data_struct_array() needs keyword-only argument energy')
elif (datavectors['energy'].shape != (num_samples,)):
raise ValueError('energy should be a vector of length {}'.format(num_samples))
data = np.rec.array(np.zeros(num_samples, dtype=datatypes))
data['sample'] = sample
for (kwarg, vector) in datavectors.items():
data[kwarg] = vector
return data
|
Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', 'num_occurrences', **kwargs]
|
codesearchnet
|
def from_fortran_src(cls, fortran_src: str, dir: str = "."):
import tempfile
fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir)
fp.writelines(fortran_src)
fp.close()
G = cls.from_fortran_file(fp.name, dir)
os.remove(fp.name)
return G
|
Create a GroundedFunctionNetwork instance from a string with raw
Fortran code.
Args:
fortran_src: A string with Fortran source code.
dir: (Optional) - the directory in which the temporary Fortran file
will be created (make sure you have write permission!) Defaults to
the current directory.
Returns:
A GroundedFunctionNetwork instance
|
juraj-google-style
|
def created(cls, data=None):
if cls.expose_status:
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
|
Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
|
juraj-google-style
|
def _generic_fit(fqdn, result, scorer, yP=None, *argl, **argd):
out = None
if len(argl) > 0:
machine = argl[0]
out = {}
if hasattr(machine, "best_score_"):
out["score"] = machine.best_score_
yL = _do_auto_predict(*argl[0:2])
yscore = scorer(fqdn, yL, yP, *argl, **argd)
if yscore is not None:
out.update(yscore)
return out
|
Performs the generic fit tests that are common to both classifier and
regressor; uses `scorer` to score the predicted values given by the machine
when tested against its training set.
Args:
scorer (function): called on the result of `machine.predict(Xtrain,
ytrain)`.
|
juraj-google-style
|
def default_metric_definitions(cls, toolkit):
if (toolkit is RLToolkit.COACH):
return [{'Name': 'reward-training', 'Regex': '^Training>.*Total reward=(.*?),'}, {'Name': 'reward-testing', 'Regex': '^Testing>.*Total reward=(.*?),'}]
elif (toolkit is RLToolkit.RAY):
float_regex = '[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?'
return [{'Name': 'episode_reward_mean', 'Regex': ('episode_reward_mean: (%s)' % float_regex)}, {'Name': 'episode_reward_max', 'Regex': ('episode_reward_max: (%s)' % float_regex)}]
|
Provides default metric definitions based on provided toolkit.
Args:
toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training.
Returns:
list: metric definitions
|
codesearchnet
|
def calculate_stress(self, strain):
strain = np.array(strain)
if strain.shape == (6,):
strain = Strain.from_voigt(strain)
assert strain.shape == (3, 3), "Strain must be 3x3 or voigt-notation"
stress_matrix = self.einsum_sequence([strain]*(self.order - 1)) \
/ factorial(self.order - 1)
return Stress(stress_matrix)
|
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
Args:
strain (3x3 array-like): matrix corresponding to strain
|
juraj-google-style
|
def convert_response(check_response, project_id):
if ((not check_response) or (not check_response.checkErrors)):
return _IS_OK
theError = check_response.checkErrors[0]
error_tuple = _CHECK_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)
if (error_tuple[1].find(u'{') == (- 1)):
return error_tuple
updated_msg = error_tuple[1].format(project_id=project_id, detail=(theError.detail or u''))
return (error_tuple[0], updated_msg, error_tuple[2])
|
Computes a http status code and message `CheckResponse`
The return value a tuple (code, message, api_key_is_bad) where
code: is the http status code
message: is the message to return
api_key_is_bad: indicates that a given api_key is bad
Args:
check_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.CheckResponse`):
the response from calling an api
Returns:
tuple(code, message, bool)
|
codesearchnet
|
def test_on_batch(self, x, y=None, sample_weight=None, return_dict=False):
raise NotImplementedError
|
Test the model on a single batch of samples.
Args:
x: Input data. Must be array-like.
y: Target data. Must be array-like.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape `(samples, sequence_length)`, to apply a different
weight to every timestep of every sample.
return_dict: If `True`, loss and metric results are returned as a
dict, with each key being the name of the metric. If `False`,
they are returned as a list.
Returns:
A scalar loss value (when no metrics and `return_dict=False`),
a list of loss and metric values
(if there are metrics and `return_dict=False`), or a dict of
metric and loss values (if `return_dict=True`).
|
github-repos
|
def _clone_sequential_model(model, clone_function, input_tensors=None):
if not isinstance(model, Sequential):
raise ValueError(f'Expected `model` argument to be a `Sequential` model instance. Received: model={model}')
if not callable(clone_function):
raise ValueError(f'Expected `clone_function` argument to be a callable. Received: clone_function={clone_function}')
new_layers = [clone_function(layer) for layer in model.layers]
if isinstance(model._layers[0], InputLayer):
ref_input_layer = model._layers[0]
input_name = ref_input_layer.name
input_batch_shape = ref_input_layer.batch_shape
input_dtype = ref_input_layer._dtype
else:
input_name = None
input_dtype = None
input_batch_shape = None
if input_tensors is not None:
if isinstance(input_tensors, (list, tuple)):
if len(input_tensors) != 1:
raise ValueError('Argument `input_tensors` must contain a single tensor.')
input_tensors = input_tensors[0]
if not isinstance(input_tensors, backend.KerasTensor):
raise ValueError(f'Argument `input_tensors` must be a KerasTensor. Received invalid value: input_tensors={input_tensors}')
inputs = Input(tensor=input_tensors, name=input_name)
new_layers = [inputs] + new_layers
elif input_batch_shape is not None:
inputs = Input(batch_shape=input_batch_shape, dtype=input_dtype, name=input_name)
new_layers = [inputs] + new_layers
cloned_model = Sequential(new_layers, name=model.name, trainable=model.trainable)
if model.compiled:
compiled_config = model.get_compile_config()
cloned_model.compile_from_config(compiled_config)
return cloned_model
|
Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Args:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
clone_function: callable to be applied on non-input layers in the model.
By default, it clones the layer (without copying the weights).
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
|
github-repos
|
def _FormatSourceShort(self, event):
(source_short, _) = self._output_mediator.GetFormattedSources(event)
if (source_short is None):
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound('Unable to find event formatter for: {0:s}.'.format(data_type))
return source_short
|
Formats the short source.
Args:
event (EventObject): event.
Returns:
str: short source field.
Raises:
NoFormatterFound: If no event formatter can be found to match the data
type in the event.
|
codesearchnet
|
def get_column_names(self, X):
if isinstance(X, pd.DataFrame):
return X.columns
return range(X.shape[1])
|
Return iterable containing columns for the given array X.
Args:
X: `numpy.ndarray` or `pandas.DataFrame`.
Returns:
iterable: columns for the given matrix.
|
juraj-google-style
|
def request(self, result_limit, result_start, filters=None, params=None):
return self.tc_requests.request(
self.api_type,
self.api_sub_type,
result_limit,
result_start,
owner=self.owner,
filters=filters,
params=params,
)
|
Gets the Indicator/Group/Victim or Security Labels
Args:
filters:
owner:
result_limit:
result_start:
params: parameters to pass in to get the objects
Returns:
|
juraj-google-style
|
def unit_is_related(self, location, worksheet):
same_worksheet = (worksheet == self.worksheet)
if isinstance(location, (tuple, list)):
return ((location[0] >= self.start[0]) and (location[0] < self.end[0]) and (location[1] >= self.start[1]) and (location[1] < self.end[1]) and same_worksheet)
else:
return same_worksheet
|
Checks for relationship between a unit location and this block.
Returns:
True if the location is related to this block.
|
codesearchnet
|
def find_first_file_with_ext(base_paths, prefix, exts):
for base_path in base_paths:
for ext in exts:
filename = os.path.join(base_path, ('%s%s' % (prefix, ext)))
if (os.path.exists(filename) and os.path.isfile(filename)):
logger.debug('Found first file with relevant extension: %s', filename)
return (base_path, ext)
logger.debug('No files found for prefix %s, extensions %s', prefix, ', '.join(exts))
return (None, None)
|
Runs through the given list of file extensions and returns the first file with the given base
path and extension combination that actually exists.
Args:
base_paths: The base paths in which to search for files.
prefix: The filename prefix of the file for which to search.
exts: An ordered list of file extensions for which to search.
Returns:
On success, a 2-tuple containing the base path in which the file was found, and the extension of the file.
On failure, returns (None, None).
|
codesearchnet
|
def pretokenized_t2t_dataset(dataset_name=gin.REQUIRED,
text2self=False,
data_dir=gin.REQUIRED,
dataset_split="train",
batch_size=gin.REQUIRED,
sequence_length=gin.REQUIRED,
vocabulary=None):
del vocabulary
filepattern = os.path.join(
data_dir, dataset_name + "-" + dataset_split + "-*")
filenames = tf.gfile.Glob(filepattern)
tf.logging.info("Found %s files matching %s" % (len(filenames), filepattern))
if not filenames:
raise ValueError("No matching files found")
dataset = pretokenized_tfrecord_dataset(
filenames=filenames,
text2self=text2self,
eos_included=True,
repeat=dataset_split == "train",
batch_size=batch_size,
sequence_length=sequence_length)
if dataset_split == "train":
dataset = dataset.shuffle(1000)
return dataset
|
Loads the Tensor2tensor dataset specified by dataset_name.
Args:
dataset_name: TensorFlow Datasets dataset name.
text2self: a boolean
data_dir: string, data_dir for TensorFlow Datasets
dataset_split: a string - "train" or "dev"
batch_size: an integer
sequence_length: an integer
vocabulary: ignored
Returns:
A tf.data.Dataset of batches
|
juraj-google-style
|
def enable_argscope_for_function(func, log_shape=True):
assert callable(func), 'func should be a callable'
@wraps(func)
def wrapped_func(*args, **kwargs):
actual_args = copy.copy(get_arg_scope()[func.__name__])
actual_args.update(kwargs)
out_tensor = func(*args, **actual_args)
in_tensor = args[0]
ctx = get_current_tower_context()
name = (func.__name__ if ('name' not in kwargs) else kwargs['name'])
if log_shape:
if (('tower' not in ctx.ns_name.lower()) or ctx.is_main_training_tower):
if isinstance(out_tensor, tuple):
out_tensor_descr = out_tensor[0]
else:
out_tensor_descr = out_tensor
logger.info(('%20s: %20s -> %20s' % (name, in_tensor.shape.as_list(), out_tensor_descr.shape.as_list())))
return out_tensor
wrapped_func.symbolic_function = None
return wrapped_func
|
Decorator for function to support argscope
Example:
.. code-block:: python
from mylib import myfunc
myfunc = enable_argscope_for_function(myfunc)
Args:
func: A function mapping one or multiple tensors to one or multiple
tensors.
log_shape (bool): Specify whether the first input resp. output tensor
shape should be printed once.
Remarks:
If the function ``func`` returns multiple input or output tensors,
only the first input/output tensor shape is displayed during logging.
Returns:
The decorated function.
|
codesearchnet
|
def console_set_default_background(
con: tcod.console.Console, col: Tuple[int, int, int]
) -> None:
lib.TCOD_console_set_default_background(_console(con), col)
|
Change the default background color for a console.
Args:
con (Console): Any Console instance.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
.. deprecated:: 8.5
Use :any:`Console.default_bg` instead.
|
juraj-google-style
|
def marginalize_out(node_indices, tpm):
return tpm.sum(tuple(node_indices), keepdims=True) / (
np.array(tpm.shape)[list(node_indices)].prod())
|
Marginalize out nodes from a TPM.
Args:
node_indices (list[int]): The indices of nodes to be marginalized out.
tpm (np.ndarray): The TPM to marginalize the node out of.
Returns:
np.ndarray: A TPM with the same number of dimensions, with the nodes
marginalized out.
|
juraj-google-style
|
def angle_to_name(angle, segments=8, abbr=False):
if (segments == 4):
string = COMPASS_NAMES[((int(((angle + 45) / 90)) % 4) * 2)]
elif (segments == 8):
string = COMPASS_NAMES[((int(((angle + 22.5) / 45)) % 8) * 2)]
elif (segments == 16):
string = COMPASS_NAMES[(int(((angle + 11.25) / 22.5)) % 16)]
else:
raise ValueError(('Segments parameter must be 4, 8 or 16 not %r' % segments))
if abbr:
return ''.join((i[0].capitalize() for i in string.split('-')))
else:
return string
|
Convert angle in to direction name.
Args:
angle (float): Angle in degrees to convert to direction name
segments (int): Number of segments to split compass in to
abbr (bool): Whether to return abbreviated direction string
Returns:
str: Direction name for ``angle``
|
codesearchnet
|
def verify_gmt_integrity(gmt):
set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt]
assert (len(set(set_ids)) == len(set_ids)), 'Set identifiers should be unique. set_ids: {}'.format(set_ids)
|
Make sure that set ids are unique.
Args:
gmt (GMT object): list of dicts
Returns:
None
|
codesearchnet
|
def get_optimizer_experimental_options(self):
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = attr == rewriter_config_pb2.RewriterConfig.ON
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle('layout_optimizer')
rewriter_toggle('constant_folding')
rewriter_toggle('shape_optimization')
rewriter_toggle('remapping')
rewriter_toggle('arithmetic_optimization')
rewriter_toggle('dependency_optimization')
rewriter_toggle('loop_optimization')
rewriter_toggle('function_optimization')
rewriter_toggle('debug_stripper')
rewriter_bool('disable_model_pruning')
rewriter_toggle('scoped_allocator_optimization')
rewriter_toggle('pin_to_host_optimization')
rewriter_toggle('implementation_selector')
rewriter_toggle('auto_mixed_precision')
rewriter_toggle('use_plugin_optimizers')
rewriter_bool('disable_meta_optimizer')
rewriter_toggle('auto_mixed_precision_onednn_bfloat16')
rewriter_toggle('auto_mixed_precision_mkl')
if rewrite_options.min_graph_nodes != 0:
options['min_graph_nodes'] = rewrite_options.min_graph_nodes
return options
|
Get experimental options for the optimizer.
Returns:
Dictionary of current option values
|
github-repos
|
def tan(cls, x: 'TensorFluent') -> 'TensorFluent':
return cls._unary_op(x, tf.tan, tf.float32)
|
Returns a TensorFluent for the tan function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the tan function.
|
codesearchnet
|
def _populate(cls, as_of=None, delete=False):
billing_cycle_helper = get_billing_cycle()
billing_cycles_exist = BillingCycle.objects.exists()
try:
current_billing_cycle = BillingCycle.objects.as_of(date=as_of)
except BillingCycle.DoesNotExist:
current_billing_cycle = None
if not billing_cycles_exist:
delete = False
if billing_cycles_exist and not current_billing_cycle:
raise CannotPopulateForDateOutsideExistingCycles()
omit_current = (current_billing_cycle and delete)
stop_date = as_of + relativedelta(years=settings.SWIFTWIND_BILLING_CYCLE_YEARS)
date_ranges = billing_cycle_helper.generate_date_ranges(as_of, stop_date=stop_date, omit_current=omit_current)
date_ranges = list(date_ranges)
beginning_date = date_ranges[0][0]
with db_transaction.atomic():
if delete:
cls.objects.filter(start_date__gte=beginning_date).delete()
for start_date, end_date in date_ranges:
exists = BillingCycle.objects.filter(date_range=(start_date, end_date)).exists()
if exists:
if delete:
raise Exception(
'It should not be possible to get here as future billing cycles have just been deleted'
)
else:
pass
else:
BillingCycle.objects.create(
date_range=(start_date, end_date),
)
|
Populate the table with billing cycles starting from `as_of`
Args:
as_of (date): The date at which to begin the populating
delete (bool): Should future billing cycles be deleted?
|
juraj-google-style
|
def to_channel_dimension_format(image: np.ndarray, channel_dim: Union[ChannelDimension, str], input_channel_dim: Optional[Union[ChannelDimension, str]]=None) -> np.ndarray:
if not isinstance(image, np.ndarray):
raise ValueError(f'Input image must be of type np.ndarray, got {type(image)}')
if input_channel_dim is None:
input_channel_dim = infer_channel_dimension_format(image)
target_channel_dim = ChannelDimension(channel_dim)
if input_channel_dim == target_channel_dim:
return image
if target_channel_dim == ChannelDimension.FIRST:
image = image.transpose((2, 0, 1))
elif target_channel_dim == ChannelDimension.LAST:
image = image.transpose((1, 2, 0))
else:
raise ValueError('Unsupported channel dimension format: {}'.format(channel_dim))
return image
|
Converts `image` to the channel dimension format specified by `channel_dim`.
Args:
image (`numpy.ndarray`):
The image to have its channel dimension set.
channel_dim (`ChannelDimension`):
The channel dimension format to use.
input_channel_dim (`ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input image.
Returns:
`np.ndarray`:
The image with the channel dimension set to `channel_dim`.
|
github-repos
|
def constant(x: A) -> Callable[(..., A)]:
def constanted(*args, **kwargs):
return x
return constanted
|
Produce a function that always returns a supplied value.
Args:
x: Any object.
Returns:
A function that accepts any number of positional and keyword arguments, discards them, and returns ``x``.
|
codesearchnet
|
def add_hparam(self, name, value):
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError(
'Multi-valued hyperparameters cannot be empty: %s' % name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
|
Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
|
juraj-google-style
|
async def openurl(url, **opts):
if (url.find(':
newurl = alias(url)
if (newurl is None):
raise s_exc.BadUrl(f':
url = newurl
info = s_urlhelp.chopurl(url)
info.update(opts)
host = info.get('host')
port = info.get('port')
auth = None
user = info.get('user')
if (user is not None):
passwd = info.get('passwd')
auth = (user, {'passwd': passwd})
scheme = info.get('scheme')
if (scheme == 'cell'):
path = info.get('path')
name = info.get('name', '*')
host = info.get('host')
if host:
path = path.strip('/')
path = os.path.join(host, path)
if (':' in path):
(path, name) = path.split(':')
full = os.path.join(path, 'sock')
link = (await s_link.unixconnect(full))
elif (scheme == 'unix'):
(path, name) = info.get('path').split(':')
link = (await s_link.unixconnect(path))
else:
path = info.get('path')
name = info.get('name', path[1:])
sslctx = None
if (scheme == 'ssl'):
certpath = info.get('certdir')
certdir = s_certdir.CertDir(certpath)
sslctx = certdir.getClientSSLContext()
link = (await s_link.connect(host, port, ssl=sslctx))
prox = (await Proxy.anit(link, name))
prox.onfini(link)
try:
(await prox.handshake(auth=auth))
except Exception:
(await prox.fini())
raise
return prox
|
Open a URL to a remote telepath object.
Args:
url (str): A telepath URL.
**opts (dict): Telepath connect options.
Returns:
(synapse.telepath.Proxy): A telepath proxy object.
The telepath proxy may then be used for sync or async calls:
proxy = openurl(url)
value = proxy.getFooThing()
... or ...
proxy = await openurl(url)
valu = await proxy.getFooThing()
... or ...
async with await openurl(url) as proxy:
valu = await proxy.getFooThing()
|
codesearchnet
|
def set_all_curriculums_to_lesson_num(self, lesson_num):
for (_, curriculum) in self.brains_to_curriculums.items():
curriculum.lesson_num = lesson_num
|
Sets all the curriculums in this meta curriculum to a specified
lesson number.
Args:
lesson_num (int): The lesson number which all the curriculums will
be set to.
|
codesearchnet
|
def get_compile_flags():
flags = []
flags.append('-I%s' % get_include())
flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)
cxx_version_flag = None
if _CXX_VERSION == 201103:
cxx_version_flag = '--std=c++11'
elif _CXX_VERSION == 201402:
cxx_version_flag = '--std=c++14'
elif _CXX_VERSION == 201703:
cxx_version_flag = '--std=c++17'
elif _CXX_VERSION == 202002:
cxx_version_flag = '--std=c++20'
if cxx_version_flag:
flags.append(cxx_version_flag)
flags.append('-DEIGEN_MAX_ALIGN_BYTES=%d' % pywrap_tf_session.get_eigen_max_align_bytes())
return flags
|
Returns the compilation flags for compiling with TensorFlow.
The returned list of arguments can be passed to the compiler for compiling
against TensorFlow headers. The result is platform dependent.
For example, on a typical Linux system with Python 3.7 the following command
prints `['-I/usr/local/lib/python3.7/dist-packages/tensorflow/include',
'-D_GLIBCXX_USE_CXX11_ABI=1', '-DEIGEN_MAX_ALIGN_BYTES=64']`
>>> print(tf.sysconfig.get_compile_flags())
Returns:
A list of strings for the compiler flags.
|
github-repos
|
def release_port(upnp, external_port):
mapping = upnp.getspecificportmapping(external_port, 'UDP')
if (mapping is None):
log.error('could not find a port mapping', external=external_port)
return False
else:
log.debug('found existing port mapping', mapping=mapping)
if upnp.deleteportmapping(external_port, 'UDP'):
log.info('successfully released port mapping', external=external_port)
return True
log.warning('could not release port mapping, check your router for stale mappings')
return False
|
Try to release the port mapping for `external_port`.
Args:
external_port (int): the port that was previously forwarded to.
Returns:
success (boolean): if the release was successful.
|
codesearchnet
|
def extract_element_internationalized_comment(element):
element_entry_comment = get_element_attribute_or_empty(element, 'userLabel')
if (element_entry_comment == ''):
try:
element_entry_comment = element.getElementsByTagName('string')[0].firstChild.nodeValue
except Exception:
element_entry_comment = ''
if (not element_entry_comment.lower().startswith(JT_INTERNATIONALIZED_COMMENT_PREFIX)):
return None
else:
return element_entry_comment[len(JT_INTERNATIONALIZED_COMMENT_PREFIX):]
|
Extracts the xib element's comment, if the element has been internationalized.
Args:
element (element): The element from which to extract the comment.
Returns:
The element's internationalized comment, None if it does not exist, or hasn't been internationalized (according
to the JTLocalize definitions).
|
codesearchnet
|
def xarrayfunc(func):
@wraps(func)
def wrapper(*args, **kwargs):
if any((isinstance(arg, xr.DataArray) for arg in args)):
newargs = []
for arg in args:
if isinstance(arg, xr.DataArray):
newargs.append(arg.values)
else:
newargs.append(arg)
return dc.full_like(args[0], func(*newargs, **kwargs))
else:
return func(*args, **kwargs)
return wrapper
|
Make a function compatible with xarray.DataArray.
This function is intended to be used as a decorator like::
>>> @dc.xarrayfunc
>>> def func(array):
... # do something
... return newarray
>>>
>>> result = func(array)
Args:
func (function): Function to be wrapped. The first argument
of the function must be an array to be processed.
Returns:
wrapper (function): Wrapped function.
|
codesearchnet
|
def get_decoder_self_attention_bias(length):
with tf.name_scope('decoder_self_attention_bias'):
valid_locs = tf.matrix_band_part(tf.ones([length, length]), (- 1), 0)
valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
decoder_bias = (_NEG_INF * (1.0 - valid_locs))
return decoder_bias
|
Calculate bias for decoder that maintains model's autoregressive property.
Creates a tensor that masks out locations that correspond to illegal
connections, so prediction at position i cannot draw information from future
positions.
Args:
length: int length of sequences in batch.
Returns:
float tensor of shape [1, 1, length, length]
|
codesearchnet
|
def update_x(self, x, indices=None):
x = _make_np_bool(x)
if (indices is None):
if (len(self._x) != len(x)):
raise QiskitError('During updating whole x, you can not change the number of qubits.')
self._x = x
else:
if ((not isinstance(indices, list)) and (not isinstance(indices, np.ndarray))):
indices = [indices]
for (p, idx) in enumerate(indices):
self._x[idx] = x[p]
return self
|
Update partial or entire x.
Args:
x (numpy.ndarray or list): to-be-updated x
indices (numpy.ndarray or list or optional): to-be-updated qubit indices
Returns:
Pauli: self
Raises:
QiskitError: when updating whole x, the number of qubits must be the same.
|
codesearchnet
|
def set_stderrthreshold(s):
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
'set_stderrthreshold only accepts integer absl logging level '
'from -3 to 1, or case-insensitive string values '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'But found "{}" ({}).'.format(s, type(s)))
|
Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
|
juraj-google-style
|
class XGBoostModelHandlerSciPy(XGBoostModelHandler[scipy.sparse.csr_matrix, PredictionResult, Union[xgboost.Booster, xgboost.XGBModel]]):
def run_inference(self, batch: Sequence[scipy.sparse.csr_matrix], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
return self._inference_fn(batch, model, inference_args)
def get_num_bytes(self, batch: Sequence[scipy.sparse.csr_matrix]) -> int:
return sum((sys.getsizeof(element) for element in batch))
|
Implementation of the ModelHandler interface for XGBoost
using scipy matrices as input.
Example Usage::
pcoll | RunInference(
XGBoostModelHandlerSciPy(
model_class="XGBoost Model Class",
model_state="my_model_state.json")))
Args:
model_class: class of the XGBoost model that defines the model
structure.
model_state: path to a json file that contains the model's
configuration.
inference_fn: the inference function to use during RunInference.
default=default_xgboost_inference_fn
|
github-repos
|
def extract_model_metrics(model):
if getattr(model, '_compile_metrics', None):
return {m.name: m for m in model._compile_metric_functions}
return None
|
Convert metrics from a Keras model `compile` API to dictionary.
This is used for converting Keras models to SavedModels.
Args:
model: A `tf.keras.Model` object.
Returns:
Dictionary mapping metric names to metric instances. May return `None` if
the model does not contain any metrics.
|
github-repos
|
def download_file_by_name(url, target_folder, file_name, mkdir=False):
__hdr__ = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}
if (not os.path.isdir(target_folder)):
if mkdir:
preparedir(target_folder)
else:
created = preparedir(target_folder, False)
if (not created):
raise ValueError(('Failed to find %s.' % target_folder))
file_path = os.path.join(target_folder, file_name)
if (sys.version_info < (3, 0)):
_download_py2(url, file_path, __hdr__)
else:
_download_py3(url, file_path, __hdr__)
|
Download a file to a directory.
Args:
url: A string to a valid URL.
target_folder: Target folder for download (e.g. c:/ladybug)
file_name: File name (e.g. testPts.zip).
mkdir: Set to True to create the directory if doesn't exist (Default: False)
|
codesearchnet
|
def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs):
X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y)
tokenizer.build_vocab(X_train)
process_save(X_train, y_train, tokenizer, path.join(
proc_data_dir, 'train.bin'), train=True, **kwargs)
process_save(X_val, y_val, tokenizer, path.join(
proc_data_dir, 'val.bin'), **kwargs)
process_save(X_test, y_test, tokenizer, path.join(
proc_data_dir, 'test.bin'), **kwargs)
|
Setup data while splitting into a training, validation, and test set.
Args:
X: text data,
y: data labels,
tokenizer: A Tokenizer instance
proc_data_dir: Directory for the split and processed data
|
juraj-google-style
|
def _map_seqprop_resnums_to_structprop_chain_index(self, resnums, seqprop=None, structprop=None, chain_id=None, use_representatives=False):
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if (not structprop):
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
elif ((not seqprop) or (not structprop) or (not chain_id)):
raise ValueError('Please specify sequence, structure, and chain ID')
if self.representative_structure:
if (structprop.id == self.representative_structure.id):
full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '')
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
access_key = '{}_chain_index'.format(aln_id)
if (access_key not in seqprop.letter_annotations):
raise KeyError('{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id))
chain_index_mapping = seqprop.letter_annotations[access_key]
resnum_to_chain_index = {}
for x in resnums:
ix = (chain_index_mapping[(x - 1)] - 1)
if np.isnan(ix):
log.warning('{}-{}, {}: no equivalent residue found in structure sequence'.format(structprop.id, chain_id, x))
else:
resnum_to_chain_index[int(x)] = int(ix)
return resnum_to_chain_index
|
Map a residue number in any SeqProp to the mapping index in the StructProp + chain ID. This does not provide
a mapping to residue number, only a mapping to the index which then can be mapped to the structure resnum!
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to index
use_representatives (bool): If representative sequence/structure/chain should be used in mapping
Returns:
dict: Mapping of resnums to indices
|
codesearchnet
|
def cleanup(pin=None, assert_exists=False):
if pin is None:
for pin in list(_open):
cleanup(pin)
return
if not isinstance(pin, int):
raise TypeError("pin must be an int, got: {}".format(pin))
state = _open.get(pin)
if state is None:
if assert_exists:
raise ValueError("pin {} was not setup".format(pin))
return
state.value.close()
state.direction.close()
if os.path.exists(gpiopath(pin)):
log.debug("Unexporting pin {0}".format(pin))
with _export_lock:
with open(pjoin(gpio_root, 'unexport'), 'w') as f:
_write(f, pin)
del _open[pin]
|
Cleanup the pin by closing and unexporting it.
Args:
pin (int, optional): either the pin to clean up or None (default).
If None, clean up all pins.
assert_exists: if True, raise a ValueError if the pin was not
setup. Otherwise, this function is a NOOP.
|
juraj-google-style
|
def get_name(principal):
if isinstance(principal, pywintypes.SIDType):
sid_obj = principal
else:
if (principal is None):
principal = 'S-1-0-0'
try:
sid_obj = win32security.ConvertStringSidToSid(principal)
except pywintypes.error:
try:
sid_obj = win32security.LookupAccountName(None, principal)[0]
except pywintypes.error:
sid_obj = principal
try:
return win32security.LookupAccountSid(None, sid_obj)[0]
except (pywintypes.error, TypeError) as exc:
message = 'Error resolving "{0}"'.format(principal)
if (type(exc) == pywintypes.error):
win_error = win32api.FormatMessage(exc.winerror).rstrip('\n')
message = '{0}: {1}'.format(message, win_error)
log.exception(message)
raise CommandExecutionError(message, exc)
|
Gets the name from the specified principal.
Args:
principal (str):
Find the Normalized name based on this. Can be a PySID object, a SID
string, or a user name in any capitalization.
.. note::
Searching based on the user name can be slow on hosts connected
to large Active Directory domains.
Returns:
str: The name that corresponds to the passed principal
Usage:
.. code-block:: python
salt.utils.win_dacl.get_name('S-1-5-32-544')
salt.utils.win_dacl.get_name('adminisTrators')
|
codesearchnet
|
def cholesky(self, name: str='cholesky') -> 'LinearOperator':
if not self._can_use_cholesky():
raise ValueError('Cannot take the Cholesky decomposition: Not a positive definite self adjoint matrix.')
with self._name_scope(name):
return self._linop_cholesky()
|
Returns a Cholesky factor as a `LinearOperator`.
Given `A` representing this `LinearOperator`, if `A` is positive definite
self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky
decomposition.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the lower triangular matrix
in the Cholesky decomposition.
Raises:
ValueError: When the `LinearOperator` is not hinted to be positive
definite and self adjoint.
|
github-repos
|
def to_list(self):
if not isinstance(self.row_splits, ops.EagerTensor):
raise ValueError('to_list can only be used in eager mode.')
row_splits = self.row_splits.numpy().tolist()
values = self.values
if isinstance(values, RaggedTensor):
return [values[row_splits[i]:row_splits[i + 1]].to_list() for i in range(len(row_splits) - 1)]
else:
if hasattr(values, 'numpy'):
values_as_list = values.numpy().tolist()
elif hasattr(values, 'to_list'):
values_as_list = values.to_list()
else:
raise ValueError('values must be convertible to a list')
return [values_as_list[row_splits[i]:row_splits[i + 1]] for i in range(len(row_splits) - 1)]
|
Returns a nested Python `list` with the values for this `RaggedTensor`.
Requires that `rt` was constructed in eager execution mode.
Returns:
A nested Python `list`.
|
github-repos
|
def _get_events_data(object_key: str) -> List[dict]:
events_data = []
key = _keys.events_data(object_key)
for event_id in _get_events_list(object_key):
event_dict = literal_eval(DB.get_hash_value(key, event_id))
events_data.append(event_dict)
return events_data
|
Get the list of event data for the object with the specified key.
Args:
object_key (str): Key of an object in the database.
|
codesearchnet
|
def get_arp_table(self, switch_ip, ip=None, mac=None, interf=None, arp_type=None):
node = natlas_node(switch_ip)
if (node.try_snmp_creds(self.config.snmp_creds) == 0):
return []
arp = node.get_arp_table()
if (arp == None):
return []
if ((((ip == None) & (mac == None)) & (interf == None)) & (arp_type == None)):
return arp
interf = (str(interf) if vlan else None)
ret = []
for a in arp:
if (ip != None):
if (re.match(ip, a.ip) == None):
continue
if (mac != None):
if (re.match(mac, a.mac) == None):
continue
if (interf != None):
if (re.match(interf, str(a.interf)) == None):
continue
if (arp_type != None):
if (re.match(arp_type, a.arp_type) == None):
continue
ret.append(a)
return ret
|
Get the ARP table from a switch.
Args:
switch_ip IP address of the device
ip Filter results by IP (regex)
mac Filter results by MAC (regex)
interf Filter results by INTERFACE (regex)
arp_type Filter results by ARP Type
Return:
Array of natlas_arp objects
|
codesearchnet
|
def get_int_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH, min_int=_MIN_INT, max_int=_MAX_INT):
length = self.get_int(min_length, max_length)
return self.fdp.ConsumeIntListInRange(length, min_int, max_int)
|
Consume a signed integer list with given constraints.
Args:
min_length: The minimum length of the list.
max_length: The maximum length of the list.
min_int: Minimum allowed integer.
max_int: Maximum allowed integer.
Returns:
Consumed integer list based on input bytes and constraints.
|
github-repos
|
def match_regex(self, regex: Pattern, required: bool = False,
meaning: str = "") -> str:
mo = regex.match(self.input, self.offset)
if mo:
self.offset = mo.end()
return mo.group()
if required:
raise UnexpectedInput(self, meaning)
|
Parse input based on a regular expression .
Args:
regex: Compiled regular expression object.
required: Should the exception be raised on unexpected input?
meaning: Meaning of `regex` (for use in error messages).
Raises:
UnexpectedInput: If no syntactically correct keyword is found.
|
juraj-google-style
|
def getline(self, lnum=None):
return self._vim.current.buffer[lnum] if lnum else self._vim.current.line
|
Get a line from the current buffer.
Args:
lnum (Optional[str]): Number of the line to get, current if ``None``.
Todo:
- Give this more behavior of Vim ``getline()``?
- ``buffer[index]`` is zero-based, this is probably too confusing
|
juraj-google-style
|
def get_config():
cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*'
try:
raw_config = _pshell(cmd, ignore_retcode=True)
except CommandExecutionError as exc:
if ('Current configuration does not exist' in exc.info['stderr']):
raise CommandExecutionError('Not Configured')
raise
config = dict()
if raw_config:
if ('ConfigurationName' in raw_config[0]):
config[raw_config[0]['ConfigurationName']] = {}
for item in raw_config:
config[item['ConfigurationName']][item['ResourceId']] = {}
for key in item:
if (key not in ['ConfigurationName', 'ResourceId']):
config[item['ConfigurationName']][item['ResourceId']][key] = item[key]
return config
|
Get the current DSC Configuration
Returns:
dict: A dictionary representing the DSC Configuration on the machine
Raises:
CommandExecutionError: On failure
CLI Example:
.. code-block:: bash
salt '*' dsc.get_config
|
codesearchnet
|
def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):
grid_h = tf.range(grid_size, dtype=tf.float32)
grid_w = tf.range(grid_size, dtype=tf.float32)
grid = tf.meshgrid(grid_w, grid_h)
grid = tf.stack(grid, axis=0)
grid = tf.reshape(grid, [2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if add_cls_token:
pos_embed = tf.concat([tf.zeros((1, embed_dim)), pos_embed], axis=0)
return pos_embed
|
Create 2D sin/cos positional embeddings.
Args:
embed_dim (`int`):
Embedding dimension.
grid_size (`int`):
The grid height and width.
add_cls_token (`bool`, *optional*, defaults to `False`):
Whether or not to add a classification (CLS) token.
Returns:
(`tf.Tensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position
embeddings (with or without classification token)
|
github-repos
|
def remove_node(self, node):
if node not in self.node_list:
return
self.node_list.remove(node)
for n in self.node_list:
n.link_list = [link for link in n.link_list if
link.target != node]
|
Remove a node from ``self.node_list`` and links pointing to it.
If ``node`` is not in the graph, do nothing.
Args:
node (Node): The node to be removed
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.remove_node(node_1)
>>> len(graph.node_list)
0
|
juraj-google-style
|
def get_help_data(filepath):
try:
with open(filepath, 'r') as file:
return _json.load(file, object_pairs_hook=OrderedDict)
except Exception as e:
logger.error('Could not load file {}'.format(filepath))
logger.exception(e)
return {}
|
Get the json data from a help file
Args:
filepath (str): The file path for the help file
Returns:
data: The json data from a help file
|
codesearchnet
|
def _get_data_iterator_from_dataset(dataset, dataset_type_spec):
if dataset_type_spec is list:
if len(dataset) == 0:
raise ValueError('Received an empty list dataset. Please provide a non-empty list of arrays.')
expected_shape = None
for i, element in enumerate(dataset):
if not isinstance(element, np.ndarray):
raise ValueError(f'Expected a list of `numpy.ndarray` objects,Received: {type(element)} at index {i}.')
if expected_shape is None:
expected_shape = element.shape
elif element.shape[0] != expected_shape[0]:
raise ValueError(f'Received a list of NumPy arrays with different lengths.Mismatch found at index {i}, Expected shape={expected_shape} Received shape={np.array(element).shape}.Please provide a list of NumPy arrays of the same length.')
return iter(zip(*dataset))
elif dataset_type_spec is tuple:
if len(dataset) == 0:
raise ValueError('Received an empty list dataset.Please provide a non-empty tuple of arrays.')
expected_shape = None
for i, element in enumerate(dataset):
if not isinstance(element, np.ndarray):
raise ValueError(f'Expected a tuple of `numpy.ndarray` objects,Received: {type(element)} at index {i}.')
if expected_shape is None:
expected_shape = element.shape
elif element.shape[0] != expected_shape[0]:
raise ValueError(f'Received a tuple of NumPy arrays with different lengths.Mismatch found at index {i}, Expected shape={expected_shape} Received shape={np.array(element).shape}.Please provide a tuple of NumPy arrays of the same length.')
return iter(zip(*dataset))
elif dataset_type_spec is tf.data.Dataset:
if is_batched(dataset):
dataset = dataset.unbatch()
return iter(dataset)
elif is_torch_dataset(dataset):
return iter(dataset)
elif dataset_type_spec is np.ndarray:
return iter(dataset)
raise ValueError(f'Invalid dataset_type_spec: {dataset_type_spec}')
|
Get the iterator from a dataset.
Args:
dataset: A `tf.data.Dataset`, a `torch.utils.data.Dataset` object,
or a list/tuple of arrays.
dataset_type_spec: The type of the dataset.
Returns:
iterator: An `iterator` object.
|
github-repos
|
def add(self, message):
if (not isinstance(message, ValidationMessage)):
raise TypeError('Argument must of type ValidationMessage')
self.messages.append(message)
|
Add a new validation message to this instance.
Args:
message (ValidationMessage): A validation message to add to this instance's list of messages.
|
codesearchnet
|
def to_json(self):
(d, ps) = self._to_json()
if (len(ps) == 0):
return {'name': d}
else:
return {'name': d, 'args': ps}
|
Convert to json serializable dictionary.
Returns:
dict: dictionary of descriptor
|
codesearchnet
|
def get_labels(self, plt, label_fontsize=10):
if len(self.slab_regions) > 1:
label_in_vac = (self.slab_regions[0][1] + self.slab_regions[1][0])/2
if abs(self.slab_regions[0][0]-self.slab_regions[0][1]) > \
abs(self.slab_regions[1][0]-self.slab_regions[1][1]):
label_in_bulk = self.slab_regions[0][1]/2
else:
label_in_bulk = (self.slab_regions[1][1] + self.slab_regions[1][0]) / 2
else:
label_in_bulk = (self.slab_regions[0][0] + self.slab_regions[0][1])/2
if self.slab_regions[0][0] > 1-self.slab_regions[0][1]:
label_in_vac = self.slab_regions[0][0] / 2
else:
label_in_vac = (1 + self.slab_regions[0][1]) / 2
plt.plot([0, 1], [self.vacuum_locpot]*2, 'b--', zorder=-5, linewidth=1)
xy = [label_in_bulk, self.vacuum_locpot+self.ave_locpot*0.05]
plt.annotate(r"$V_{vac}=%.2f$" %(self.vacuum_locpot), xy=xy,
xytext=xy, color='b', fontsize=label_fontsize)
plt.plot([0, 1], [self.efermi]*2, 'g--',
zorder=-5, linewidth=3)
xy = [label_in_bulk, self.efermi+self.ave_locpot*0.05]
plt.annotate(r"$E_F=%.2f$" %(self.efermi), xytext=xy,
xy=xy, fontsize=label_fontsize, color='g')
plt.plot([0, 1], [self.ave_bulk_p]*2, 'r--', linewidth=1., zorder=-1)
xy = [label_in_vac, self.ave_bulk_p + self.ave_locpot * 0.05]
plt.annotate(r"$V^{interior}_{slab}=%.2f$" % (self.ave_bulk_p),
xy=xy, xytext=xy, color='r', fontsize=label_fontsize)
plt.plot([label_in_vac]*2, [self.efermi, self.vacuum_locpot],
'k--', zorder=-5, linewidth=2)
xy = [label_in_vac, self.efermi + self.ave_locpot * 0.05]
plt.annotate(r"$\Phi=%.2f$" %(self.work_function),
xy=xy, xytext=xy, fontsize=label_fontsize)
return plt
|
Handles the optional labelling of the plot with relevant quantities
Args:
plt (plt): Plot of the locpot vs c axis
label_fontsize (float): Fontsize of labels
Returns Labelled plt
|
juraj-google-style
|
def conv3d(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
out = nn_ops.conv3d(input_tensor, self.filters, strides=[1, 1, 2, 1, 1], dilations=[1, 1, 1, 1, 1], padding=padding, data_format='NDHWC')
if has_bias:
out = nn_ops.bias_add(out, self.bias)
if activation_fn is not None:
out = activation_fn(out)
return {'output': out}
|
Performs a 3D convolution operation.
Args:
input_tensor: Input tensor to perform convolution on.
Returns:
A map of: output key -> output result.
|
github-repos
|
def execute(source, optimize=True, output=sys.stdout, input=sys.stdin, steps=(- 1)):
from crianza import compiler
code = compiler.compile(parser.parse(source), optimize=optimize)
machine = Machine(code, output=output, input=input)
return machine.run(steps)
|
Compiles and runs program, returning the machine used to execute the
code.
Args:
optimize: Whether to optimize the code after parsing it.
output: Stream which program can write output to.
input: Stream which program can read input from.
steps: An optional maximum number of instructions to execute on the
virtual machine. Set to -1 for no limit.
Returns:
A Machine instance.
|
codesearchnet
|
def stoichiometry( self ):
return Counter( { label: number for label, number in zip( self.atoms, self.atom_numbers ) } )
|
Stoichiometry for this POSCAR, as a Counter.
e.g. AB_2O_4 -> Counter( { 'A': 1, 'B': 2, O: 4 } )
Args:
None
Returns:
None
|
juraj-google-style
|
def sdk_version(self, value):
if value == self._defaults['ai.internal.sdkVersion'] and 'ai.internal.sdkVersion' in self._values:
del self._values['ai.internal.sdkVersion']
else:
self._values['ai.internal.sdkVersion'] = value
|
The sdk_version property.
Args:
value (string). the property value.
|
juraj-google-style
|
def restore_from_checkpoint(self, session, inception_checkpoint_file, trained_checkpoint_file):
inception_exclude_scopes = ['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step', 'final_ops']
reader = tf.train.NewCheckpointReader(inception_checkpoint_file)
var_to_shape_map = reader.get_variable_to_shape_map()
all_vars = tf.contrib.slim.get_variables_to_restore(exclude=inception_exclude_scopes)
inception_vars = {var.op.name: var for var in all_vars if (var.op.name in var_to_shape_map)}
inception_saver = tf.train.Saver(inception_vars)
inception_saver.restore(session, inception_checkpoint_file)
trained_vars = tf.contrib.slim.get_variables_to_restore(exclude=(inception_exclude_scopes + inception_vars.keys()))
trained_saver = tf.train.Saver(trained_vars)
trained_saver.restore(session, trained_checkpoint_file)
|
To restore model variables from the checkpoint file.
The graph is assumed to consist of an inception model and other
layers including a softmax and a fully connected layer. The former is
pre-trained and the latter is trained using the pre-processed data. So
we restore this from two checkpoint files.
Args:
session: The session to be used for restoring from checkpoint.
inception_checkpoint_file: Path to the checkpoint file for the Inception
graph.
trained_checkpoint_file: path to the trained checkpoint for the other
layers.
|
codesearchnet
|
def __init__(self, encoder=None, encoder_config=None):
if encoder and encoder_config:
raise ValueError("If encoder is provided, encoder_config must be None.")
if encoder:
encoder_config = text_lib.TextEncoderConfig(
encoder_cls=type(encoder),
vocab_size=encoder.vocab_size)
elif encoder_config:
encoder = encoder_config.encoder
self._encoder = encoder
self._encoder_config = encoder_config
|
Constructs a Text FeatureConnector.
Args:
encoder: `tfds.features.text.TextEncoder`, an encoder that can convert
text to integers. If None, the text will be utf-8 byte-encoded.
encoder_config: `tfds.features.text.TextEncoderConfig`, needed if
restoring from a file with `load_metadata`.
|
juraj-google-style
|
def profile_write(self, profile, outfile=None):
if (outfile is None):
outfile = '{}.json'.format(profile.get('profile_name').replace(' ', '_').lower())
fqpn = os.path.join(self.profile_dir, outfile)
if os.path.isfile(fqpn):
print('Append to File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, fqpn))
with open(fqpn, 'r+') as fh:
try:
data = json.load(fh, object_pairs_hook=OrderedDict)
except ValueError as e:
self.handle_error('Can not parse JSON data ({}).'.format(e))
data.append(profile)
fh.seek(0)
fh.write(json.dumps(data, indent=2, sort_keys=True))
fh.truncate()
else:
print('Create File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, fqpn))
with open(fqpn, 'w') as fh:
data = [profile]
fh.write(json.dumps(data, indent=2, sort_keys=True))
|
Write the profile to the output directory.
Args:
profile (dict): The dictionary containting the profile settings.
outfile (str, optional): Defaults to None. The filename for the profile.
|
codesearchnet
|
def _is_in_targets(self, site, targets):
elems = self._get_elements(site)
for elem in elems:
if (elem not in targets):
return False
return True
|
Test whether a site contains elements in the target list
Args:
site (Site): Site to assess
targets ([Element]) List of elements
Returns:
(boolean) Whether this site contains a certain list of elements
|
codesearchnet
|
def __init__(self, model=None, env=None, options=None):
self.event = Event.create(__name__)
self.options = options
self.model = {} if not isinstance(model, dict) else model
self.data = PipelineData()
self.data.env_list[0].update([] if env is None else env)
self.logger = Logger.get_logger(__name__)
self.variables = {}
|
Initializing pipeline with definition (loaded from a yaml file).
Args:
model (dict): if you have a model defined in your pipeline definition (yaml)
env (dict): the env as defined (if) per matrix
options (dict): command line options for spline
|
juraj-google-style
|
def __init__(self, x: int, y: int=1, **kwargs):
self.z = x + y + sum(kwargs.values())
|
Class A.
Args:
x: The first integer.
y: The second integer.
**kwargs: Other arguments.
|
github-repos
|
def capture_by_value(self, graph: Any, tensor: core.Tensor, name: Optional[str]=None) -> core.Tensor:
if isinstance(tensor, core.Value):
if name is None:
name = str(pywrap_tfe.TFE_Py_UID())
if tensor.dtype in dtypes.TF_VALUE_DTYPES and functools.reduce(lambda a, b: a * b, tensor.shape, 1) <= _EAGER_CONST_THRESHOLD:
graph_const = self.by_val_internal.get(id(tensor))
if graph_const is None:
graph_const = tensor._capture_as_const(name)
if graph_const is None:
graph_const = self._create_placeholder_helper(graph, tensor, name)
self.add_or_replace(key=id(tensor), external=tensor, internal=graph_const, is_by_ref=False)
graph.inputs.append(graph_const)
graph_const._record_tape(tensor)
return graph_const
return self._create_placeholder_helper(graph, tensor, name)
if tensor.graph is not graph:
graph._validate_in_scope(tensor)
if name is None:
assert tensor.op is not None, (tensor.__class__, dir(tensor), tensor.__class__.__name__)
name = tensor.op.name
return graph._capture_helper(tensor, name)
return tensor
|
Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
graph: The FuncGraph that captures this tensor.
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
Raises:
InaccessibleTensorError: if any tensors are accessed in a manner that
bypasses the mechanisms required for the data dependencies to be correctly
wired.
|
github-repos
|
def __init__(self, version: str = None, api_url: str = None) -> None:
bel_versions = bel_specification.get_bel_versions()
if not version:
self.version = config["bel"]["lang"]["default_bel_version"]
else:
self.version = version
if self.version not in bel_versions:
log.warning(
f"Cannot validate with invalid version: {self.version} in BEL Versions: {bel_versions}"
)
if not api_url:
self.api_url = config["bel_api"]["servers"]["api_url"]
else:
self.api_url = api_url
self.validation_messages = []
self.spec = bel_specification.get_specification(self.version)
try:
parser_fn = self.spec["admin"]["parser_fn"]
parser_name = os.path.basename(parser_fn).replace(".py", "")
module_spec = importlib.util.spec_from_file_location(parser_name, parser_fn)
imported_parser = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(imported_parser)
self.parser = imported_parser.BELParser()
except Exception as e:
raise bel_ex.NoParserFound(f"Version: {self.version} Msg: {e}")
|
Initialize BEL object used for validating/processing/etc BEL statements
Args:
version (str): BEL Version, defaults to config['bel']['lang']['default_bel_version']
api_url (str): BEL API endpoint, defaults to config['bel_api']['servers']['api_url']
|
juraj-google-style
|
def CompileReport(self, mediator):
path_specs_per_labels_counter = collections.Counter()
tags = []
while self._ContinueReportCompilation():
try:
self._LogProgressUpdateIfReasonable()
hash_analysis = self.hash_analysis_queue.get(
timeout=self._analysis_queue_timeout)
except Queue.Empty:
continue
pathspecs, labels, new_tags = self._HandleHashAnalysis(
hash_analysis)
tags.extend(new_tags)
for label in labels:
path_specs_per_labels_counter[label] += len(pathspecs)
self._analyzer.SignalAbort()
lines_of_text = ['{0:s} hash tagging results'.format(self.NAME)]
for label, count in sorted(path_specs_per_labels_counter.items()):
line_of_text = (
'{0:d} path specifications tagged with label: {1:s}'.format(
count, label))
lines_of_text.append(line_of_text)
lines_of_text.append('')
report_text = '\n'.join(lines_of_text)
for event_tag in tags:
mediator.ProduceEventTag(event_tag)
return reports.AnalysisReport(
plugin_name=self.NAME, text=report_text)
|
Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
|
juraj-google-style
|
def watch(static_root, watch_paths=None, on_reload=None, host='localhost', port=5555, server_base_path='/', watcher_interval=1.0, recursive=True, open_browser=True, open_browser_delay=1.0):
server = httpwatcher.HttpWatcherServer(static_root, watch_paths=watch_paths, on_reload=on_reload, host=host, port=port, server_base_path=server_base_path, watcher_interval=watcher_interval, recursive=recursive, open_browser=open_browser, open_browser_delay=open_browser_delay)
server.listen()
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
server.shutdown()
|
Initialises an HttpWatcherServer to watch the given path for changes. Watches until the IO loop
is terminated, or a keyboard interrupt is intercepted.
Args:
static_root: The path whose contents are to be served and watched.
watch_paths: The paths to be watched for changes. If not supplied, this defaults to the static root.
on_reload: An optional callback to pass to the watcher server that will be executed just before the
server triggers a reload in connected clients.
host: The host to which to bind our server.
port: The port to which to bind our server.
server_base_path: If the content is to be served from a non-standard base path, specify it here.
watcher_interval: The maximum refresh rate of the watcher server.
recursive: Whether to monitor the watch path recursively.
open_browser: Whether or not to automatically attempt to open the user's browser at the root URL of
the project (default: True).
open_browser_delay: The number of seconds to wait before attempting to open the user's browser.
|
codesearchnet
|
def most_visited_pages_stats():
stats = {'more_than_10': [], 'less_than_10': {}}
counter = Counter(list(RequestLog.objects.values_list('url', flat=True)))
most_visited_pages = counter.most_common()
bounds = (10000, 1000, 100, 10)
subsets = [[] for _ in bounds]
for u, c in most_visited_pages:
if url_is_ignored(u):
continue
if c >= bounds[0]:
subsets[0].append([u, c])
elif c < bounds[-1]:
subsets[-1].append([u, c])
else:
for i, bound in enumerate(bounds[:-1]):
if bound > c >= bounds[i+1]:
subsets[i+1].append([u, c])
break
stats['more_than_10'] = [
{'bound': bound, 'subset': subset}
for bound, subset in zip(bounds[:-1], subsets[:-1])]
for subset in subsets[:-1]:
for uc in subset:
if url_is_project(uc[0]):
if url_is_asset(uc[0]):
uc.append(ASSET)
else:
uc.append(PROJECT)
else:
if url_is_asset(uc[0]):
uc.append(OLD_ASSET)
elif url_is_common_asset(uc[0]):
uc.append(COMMON_ASSET)
elif url_is_old_project(uc[0]):
uc.append(OLD_PROJECT)
elif url_is_false_negative(uc[0]):
uc.append(FALSE_NEGATIVE)
else:
uc.append(SUSPICIOUS)
occurrences = {name: {'distinct': 0, 'total': 0}
for name in set(URL_TYPE.keys()) - {IGNORED}}
for u, c in subsets[-1]:
if url_is_project(u):
if url_is_asset(u):
occurrences[ASSET]['distinct'] += 1
occurrences[ASSET]['total'] += c
else:
occurrences[PROJECT]['distinct'] += 1
occurrences[PROJECT]['total'] += c
else:
if url_is_asset(u):
occurrences[OLD_ASSET]['distinct'] += 1
occurrences[OLD_ASSET]['total'] += c
elif url_is_common_asset(u):
occurrences[COMMON_ASSET]['distinct'] += 1
occurrences[COMMON_ASSET]['total'] += c
elif url_is_old_project(u):
occurrences[OLD_PROJECT]['distinct'] += 1
occurrences[OLD_PROJECT]['total'] += c
elif url_is_false_negative(u):
occurrences[FALSE_NEGATIVE]['distinct'] += 1
occurrences[FALSE_NEGATIVE]['total'] += c
else:
occurrences[SUSPICIOUS]['distinct'] += 1
occurrences[SUSPICIOUS]['total'] += c
stats['less_than_10'] = occurrences
return stats
|
Get stats for most visited pages.
Args:
logs (list): logs data to use.
Returns:
dict: more_than_10 and less_than_10: list of dict (bound + url list).
|
juraj-google-style
|
def save_json(dictionary, path, pretty=False, sortkeys=False):
with open(path, 'w') as f:
if pretty:
indent = 2
separators = (',', ': ')
else:
indent = None
separators = (', ', ': ')
json.dump(dictionary, f, indent=indent, sort_keys=sortkeys, separators=separators)
|
Save dictionary to JSON file preserving order if it is an OrderedDict
Args:
dictionary (Dict): Python dictionary to save
path (str): Path to JSON file
pretty (bool): Whether to pretty print. Defaults to False.
sortkeys (bool): Whether to sort dictionary keys. Defaults to False.
Returns:
None
|
juraj-google-style
|
def set_of_vars(lovs):
return set((var for pvars in lovs for svars in pvars for var in svars))
|
Build set of variables from list.
Args:
lovs: nested lists of variables such as the one produced by
:func:`list_of_vars`.
Returns:
set of str: flattened set of all the variables present in the
nested lists.
|
codesearchnet
|
def __init__(self, daily_req_limit=None, dup_interval=None):
self.daily_req_limit = daily_req_limit
self.dup_interval = dup_interval
|
Create flow throttler object.
Args:
daily_req_limit: Number of flows allow per user per client. Integer.
dup_interval: rdfvalue.Duration time during which duplicate flows will be
blocked.
|
juraj-google-style
|
def __init__(self, exprs):
self.exprs = exprs
|
Initialize a conjunction.
Args:
exprs: A set. The subterms.
|
github-repos
|
def build(self):
if self.colour:
embed = discord.Embed(title=self.title, type='rich', description=self.description, colour=self.colour)
else:
embed = discord.Embed(title=self.title, type='rich', description=self.description)
if self.thumbnail:
embed.set_thumbnail(url=self.thumbnail)
if self.image:
embed.set_image(url=self.image)
embed.set_author(name='Modis', url='https:
for pack in self.datapacks:
embed.add_field(name=pack[0], value=pack[1], inline=pack[2])
return embed
|
Builds Discord embed GUI
Returns:
discord.Embed: Built GUI
|
codesearchnet
|
def ensure_dir_path(self, path, relative=False):
if (not relative):
rel_path = self.relpath(path)
else:
rel_path = path
if self.is_locator(rel_path, relative=True):
path = path.rstrip('/')
elif rel_path:
path = (path.rstrip('/') + '/')
return path
|
Ensure the path is a dir path.
Should end with '/' except for schemes and locators.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
Returns:
path: dir path
|
codesearchnet
|
def fields_to_string(fields, values={}):
items = [repr(field['name']) + ':' + repr(values.get(field['name'], field.get('default', ''))) + ',' + ('
return '{\n %s\n}' % '\n '.join(items) if items else '{}'
|
Converts fields to a dictionary of parameters as a string.
Used to generate input blocks in generated code.
For example:
{
'auth':'user', # authentication to use for call.
'name':'value', # parameter to pass to function.
}
Args:
- fields: (list) Contains {"field":{...}} objects to be rendered as imputs.
- values: (dict) Default values to use for each field.
Returns:
String representing python code to be written to a generated file.
|
github-repos
|
def impulse_noise(x, severity=1):
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
|
Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
|
juraj-google-style
|
def repr(self, changed_widgets=None):
if changed_widgets is None:
changed_widgets = {}
local_changed_widgets = {}
_innerHTML = self.innerHTML(local_changed_widgets)
if self._ischanged() or ( len(local_changed_widgets) > 0 ):
self._backup_repr = ''.join(('<', self.type, ' ', self._repr_attributes, '>',
_innerHTML, '</', self.type, '>'))
if self._ischanged():
changed_widgets[self] = self._backup_repr
self._set_updated()
else:
changed_widgets.update(local_changed_widgets)
return self._backup_repr
|
It is used to automatically represent the object to HTML format
packs all the attributes, children and so on.
Args:
changed_widgets (dict): A dictionary containing a collection of tags that have to be updated.
The tag that have to be updated is the key, and the value is its textual repr.
|
juraj-google-style
|
def diff_bisectSplit(self, text1, text2, x, y, deadline):
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
diffs = self.diff_main(text1a, text2a, False, deadline)
diffsb = self.diff_main(text1b, text2b, False, deadline)
return (diffs + diffsb)
|
Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
|
codesearchnet
|
def modify_model_backprop(model, backprop_modifier):
modified_model = _MODIFIED_MODEL_CACHE.get((model, backprop_modifier))
if modified_model is not None:
return modified_model
model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')
try:
model.save(model_path)
modifier_fn = _BACKPROP_MODIFIERS.get(backprop_modifier)
if modifier_fn is None:
raise ValueError("'{}' modifier is not supported".format(backprop_modifier))
modifier_fn(backprop_modifier)
with tf.get_default_graph().gradient_override_map({'Relu': backprop_modifier}):
modified_model = load_model(model_path)
_MODIFIED_MODEL_CACHE[(model, backprop_modifier)] = modified_model
return modified_model
finally:
os.remove(model_path)
|
Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior.
Args:
model: The `keras.models.Model` instance.
backprop_modifier: One of `{'guided', 'rectified'}`
Returns:
A copy of model with modified activations for backwards pass.
|
juraj-google-style
|
def get_facets(qhull_data, joggle=False):
if joggle:
return ConvexHull(qhull_data, qhull_options='QJ i').simplices
else:
return ConvexHull(qhull_data, qhull_options='Qt i').simplices
|
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
|
codesearchnet
|
def pull_reply(self, param=None, must=[APIKEY]):
param = {} if param is None else param
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[SMS_REPLY] if SMS_REPLY in rsp else None, VERSION_V2:rsp}[self.version()])
return self.path('pull_reply.json').post(param, h, r)
|
获取回复短信
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
page_size Integer 否 每页个数,最大100个,默认20个 20
Args:
param:
Results:
Result
|
juraj-google-style
|
def survey_basis(self, keys=None, alias=None, step=None):
if (keys is None):
keys = [k for (k, v) in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
(starts, stops, steps) = ([], [], [])
for k in keys:
d = self.get_curve(k, alias=alias)
if (keys and (d is None)):
continue
try:
starts.append(d.basis[0])
stops.append(d.basis[(- 1)])
steps.append((d.basis[1] - d.basis[0]))
except Exception as e:
pass
if (starts and stops and steps):
step = (step or min(steps))
return np.arange(min(starts), (max(stops) + 1e-09), step)
else:
return None
|
Look at the basis of all the curves in ``well.data`` and return a
basis with the minimum start, maximum depth, and minimum step.
Args:
keys (list): List of strings: the keys of the data items to
survey, if not all of them.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
step (float): a new step, if you want to change it.
Returns:
ndarray. The most complete common basis.
|
codesearchnet
|
def get_url_preview(self, url, ts=None):
params = {'url': url}
if ts:
params['ts'] = ts
return self._send(
"GET", "",
query_params=params,
api_path="/_matrix/media/r0/preview_url"
)
|
Get preview for URL.
Args:
url (str): URL to get a preview
ts (double): The preferred point in time to return
a preview for. The server may return a newer
version if it does not have the requested
version available.
|
juraj-google-style
|
def __write_to_fil_heavy(self, filename_out, *args, **kwargs):
chunk_dim = self.__get_chunk_dimensions()
blob_dim = self.__get_blob_dimensions(chunk_dim)
n_blobs = self.container.calc_n_blobs(blob_dim)
n_bytes = self.header[b'nbits'] / 8
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self))
logger.info('Using %i n_blobs to write the data.'% n_blobs)
for ii in range(0, n_blobs):
logger.info('Reading %i of %i' % (ii + 1, n_blobs))
bob = self.container.read_blob(blob_dim,n_blob=ii)
with open(filename_out, "a") as fileh:
j = bob
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh)
|
Write data to .fil file.
Args:
filename_out (str): Name of output file
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.