code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def flatten(sequence):
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [item.flow if isinstance(item, tensor_array_ops.TensorArray) else item for item in flat_sequence]
|
Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and TensorArrays.
Returns:
A list of tensors.
|
github-repos
|
def AddWatchOnly(self, script_hash):
if script_hash in self._contracts:
logger.error("Address already in contracts")
return
self._watch_only.append(script_hash)
|
Add a watch only address to the wallet.
Args:
script_hash (UInt160): a bytearray (len 20) representing the public key.
Note:
Prints a warning to the console if the address already exists in the wallet.
|
juraj-google-style
|
def check_annotation_type_mismatch(self, node, name, typ, value, stack, allow_none, details=None):
if not typ or not value:
return
if value.data == [self.convert.ellipsis] or (allow_none and value.data == [self.convert.none]):
return
contained_type = abstract_utils.match_type_container(typ, ('typing.ClassVar', 'dataclasses.InitVar'))
if contained_type:
typ = contained_type
bad = self.matcher(node).compute_one_match(value, typ).bad_matches
for match in bad:
self.errorlog.annotation_type_mismatch(stack, match.expected.typ, match.actual_binding, name, match.error_details, details)
|
Checks for a mismatch between a variable's annotation and value.
Args:
node: node
name: variable name
typ: variable annotation
value: variable value
stack: a frame stack for error reporting
allow_none: whether a value of None is allowed for any type
details: any additional details to add to the error message
|
github-repos
|
def _case_create_default_action(predicates, actions):
k = len(predicates) - 1
predicate, action = (predicates[k], actions[k])
other_predicates, other_actions = (predicates[:k], actions[:k])
def default_action():
others_msg = 'Implementation error: selected default action
default_msg = ('Input error: None of conditions evaluated as True:', array_ops_stack.stack(predicates, name='preds_c'))
with ops.control_dependencies([_assert_at_most_n_true(other_predicates, n=0, msg=others_msg), control_flow_assert.Assert(predicate, data=default_msg)]):
return action()
return (default_action, other_predicates, other_actions)
|
Creates default action for a list of actions and their predicates.
It uses the input actions to select an arbitrary as default and makes sure
that corresponding predicates have valid values.
Args:
predicates: a list of bool scalar tensors
actions: a list of callable objects which return tensors.
Returns:
a callable
|
github-repos
|
def RemoveConnectedPeer(self, peer):
if (peer in self.Peers):
self.Peers.remove(peer)
|
Remove a connected peer from the known peers list.
Args:
peer (NeoNode): instance.
|
codesearchnet
|
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
for processor in self:
function_args = inspect.signature(processor.__call__).parameters
if len(function_args) > 2:
if not all((arg in kwargs for arg in list(function_args.keys())[2:])):
raise ValueError(f'Make sure that all the required parameters: {list(function_args.keys())} for {processor.__class__} are passed to the logits processor.')
scores = processor(input_ids, scores, **kwargs)
else:
scores = processor(input_ids, scores)
return scores
|
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
beam search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional kwargs that are specific to a logits processor.
Return:
`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`:
The processed prediction scores.
|
github-repos
|
def parquet_to_df(filename, use_threads=1):
try:
return pq.read_table(filename, use_threads=use_threads).to_pandas()
except pa.lib.ArrowIOError:
print('Could not read parquet file {:s}'.format(filename))
return None
|
parquet_to_df: Reads a Parquet file into a Pandas DataFrame
Args:
filename (string): The full path to the filename for the Parquet file
ntreads (int): The number of threads to use (defaults to 1)
|
juraj-google-style
|
def resumeProducing(self):
self._running = True
for consumer in self._consumers.values():
(queue_object, _) = (yield consumer.channel.basic_consume(queue=consumer.queue, consumer_tag=consumer.tag))
deferred = self._read(queue_object, consumer)
deferred.addErrback((lambda f: _legacy_twisted_log.msg), '_read failed on consumer {c}', c=consumer, logLevel=logging.ERROR)
_legacy_twisted_log.msg('AMQP connection successfully established')
|
Starts or resumes the retrieval of messages from the server queue.
This method starts receiving messages from the server, they will be
passed to the consumer callback.
.. note:: This is called automatically when :meth:`.consume` is called,
so users should not need to call this unless :meth:`.pauseProducing`
has been called.
Returns:
defer.Deferred: fired when the production is ready to start
|
codesearchnet
|
def create_pipeline_box(self, pipeline_key, name, **kwargs):
if not (pipeline_key and name):
return requests.codes.bad_request, None
uri = '/'.join([
self.api_uri,
self.pipelines_suffix,
pipeline_key,
self.boxes_suffix
])
kwargs.update({'name':name})
new_box = StreakBox(**kwargs)
code, data = self._req('put', uri, new_box.to_dict(rw = True))
return code, data
|
Creates a box int the pipeline specified with the provided attributes.
Args:
name required name string
kwargs {...} see StreakBox object for details
return (status code, box dict)
|
juraj-google-style
|
def display_arr(screen, arr, video_size, transpose):
if transpose:
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1))
else:
pyg_img = arr
pyg_img = pygame.transform.scale(pyg_img, video_size)
screen.blit(pyg_img, (0, 0))
|
Display an image to the pygame screen.
Args:
screen (pygame.Surface): the pygame surface to write frames to
arr (np.ndarray): numpy array representing a single frame of gameplay
video_size (tuple): the size to render the frame as
transpose (bool): whether to transpose the frame before displaying
Returns:
None
|
codesearchnet
|
async def export_image(self, name: str):
response = await self.docker._query(
"images/{name}/get".format(name=name), "GET"
)
return response.content
|
Get a tarball of an image by name or id.
Args:
name: name/id of the image to be exported
Returns:
Streamreader of tarball image
|
juraj-google-style
|
def execute_no_wait(self, cmd, walltime=2, envs={}):
(stdin, stdout, stderr) = self.ssh_client.exec_command(self.prepend_envs(cmd, envs), bufsize=(- 1), timeout=walltime)
return (None, stdout, stderr)
|
Execute asynchronousely without waiting for exitcode
Args:
- cmd (string): Commandline string to be executed on the remote side
- walltime (int): timeout to exec_command
KWargs:
- envs (dict): A dictionary of env variables
Returns:
- None, stdout (readable stream), stderr (readable stream)
Raises:
- ChannelExecFailed (reason)
|
codesearchnet
|
def run(self, *args, backend=None, **kwargs):
if (backend is None):
if (self._default_backend is None):
backend = self.__get_backend(DEFAULT_BACKEND_NAME)
else:
backend = self.__get_backend(self._default_backend)
elif isinstance(backend, str):
backend = self.__get_backend(backend)
return backend.run(self.ops, self.n_qubits, *args, **kwargs)
|
Run the circuit.
`Circuit` have several backends. When `backend` parameter is specified,
use specified backend, and otherwise, default backend is used.
Other parameters are passed to the backend.
The meaning of parameters are depends on the backend specifications.
However, following parameters are commonly used.
Commonly used args (Depends on backend):
shots (int, optional): The number of sampling the circuit.
returns (str, optional): The category of returns value.
e.g. "statevector" returns the state vector after run the circuit.
"shots" returns the counter of measured value.
token, url (str, optional): The token and URL for cloud resource.
Returns:
Depends on backend.
Raises:
Depends on backend.
|
codesearchnet
|
def split(self, grouper):
data = self.to_df(condition=True, entities=True)
data = data.drop('condition', axis=1)
subsets = []
for i, (name, g) in enumerate(data.groupby(grouper)):
name = '%s.%s' % (self.name, name)
col = self.__class__(name=name, data=g, source=self.source,
run_info=getattr(self, 'run_info', None))
subsets.append(col)
return subsets
|
Split the current SparseRunVariable into multiple columns.
Args:
grouper (iterable): list to groupby, where each unique value will
be taken as the name of the resulting column.
Returns:
A list of SparseRunVariables, one per unique value in the
grouper.
|
juraj-google-style
|
def shapes_match(a, b):
if (isinstance(a, (tuple, list)) and isinstance(b, (tuple, list))):
if (len(a) != len(b)):
return False
return all([shapes_match(ia, ib) for (ia, ib) in zip(a, b)])
elif (isinstance(a, dict) and isinstance(b, dict)):
if (len(a) != len(b)):
return False
match = True
for ((ak, av), (bk, bv)) in zip(a.items(), b.items()):
match = (match and all([((ak == bk) and shapes_match(av, bv))]))
return match
else:
shape_checker = shape_checkers[(type(a), type(b))]
return shape_checker(a, b)
|
Recursively check if shapes of object `a` and `b` match.
Will walk lists, tuples and dicts.
Args:
a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict)
to check for matching shapes against `b`.
b: object to check for matching shape against `a`.
Returns:
A boolean indicating whether the shapes of `a` and `b` match.
|
codesearchnet
|
def get_connected_client(self):
if (self.__sem is not None):
(yield self.__sem.acquire())
client = None
(newly_created, client) = self._get_client_from_pool_or_make_it()
if newly_created:
res = (yield client.connect())
if (not res):
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(ClientError(("can't connect to %s" % client.title)))
raise tornado.gen.Return(client)
|
Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
|
codesearchnet
|
def get_bucket(self, key, rate=None, capacity=None, **kwargs):
return buckets.Bucket(key=key, rate=(rate or self.rate), capacity=(capacity or self.capacity), storate=self.storate, **kwargs)
|
Fetch a Bucket for the given key.
rate and capacity might be overridden from the Throttler defaults.
Args:
rate (float): Units regenerated by second, or None to keep
Throttler defaults
capacity (int): Maximum units available, or None to keep Throttler
defaults
|
codesearchnet
|
def _recurse(self, matrix, m_list, indices, output_m_list=[]):
if self._finished:
return
while (m_list[(- 1)][1] == 0):
m_list = copy(m_list)
m_list.pop()
if (not m_list):
matrix_sum = np.sum(matrix)
if (matrix_sum < self._current_minimum):
self.add_m_list(matrix_sum, output_m_list)
return
if (m_list[(- 1)][1] > len(indices.intersection(m_list[(- 1)][2]))):
return
if ((len(m_list) == 1) or (m_list[(- 1)][1] > 1)):
if (self.best_case(matrix, m_list, indices) > self._current_minimum):
return
index = self.get_next_index(matrix, m_list[(- 1)], indices)
m_list[(- 1)][2].remove(index)
matrix2 = np.copy(matrix)
m_list2 = deepcopy(m_list)
output_m_list2 = copy(output_m_list)
matrix2[(index, :)] *= m_list[(- 1)][0]
matrix2[(:, index)] *= m_list[(- 1)][0]
output_m_list2.append([index, m_list[(- 1)][3]])
indices2 = copy(indices)
indices2.remove(index)
m_list2[(- 1)][1] -= 1
self._recurse(matrix2, m_list2, indices2, output_m_list2)
self._recurse(matrix, m_list, indices, output_m_list)
|
This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them.
|
codesearchnet
|
def __init__(self, d: Dict, nlp) -> None:
self.dependencies = d["dependencies"] if "dependencies" in d else []
self.description = d["description"] if "description" in d else ""
self.active = tf_transfer(d["is_active"])
self.identifier = d["identifier"]
self.output_format = d["output_format"]
self.polarity = tf_transfer(d["polarity"])
self.patterns = []
for pattern_idx, a_pattern in enumerate(d["pattern"]):
this_pattern = Pattern(a_pattern, nlp)
self.patterns.append(this_pattern)
|
Storing information for each Rule, create list of Pattern for a rule
Args:
d: Dict
nlp
Returns:
|
juraj-google-style
|
def select_rows(self, rows):
self.values = self.values.iloc[rows]
self.index = self.index.iloc[rows, :]
for prop in self._property_columns:
vals = getattr(self, prop)[rows]
setattr(self, prop, vals)
|
Truncate internal arrays to keep only the specified rows.
Args:
rows (array): An integer or boolean array identifying the indices
of rows to keep.
|
juraj-google-style
|
def serialize(self, datas):
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references'])
|
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
|
juraj-google-style
|
def __mul__(self, right: torch.Tensor) -> Rigid:
if not isinstance(right, torch.Tensor):
raise TypeError('The other multiplicand must be a Tensor')
new_rots = self._rots * right
new_trans = self._trans * right[..., None]
return Rigid(new_rots, new_trans)
|
Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.
Args:
right:
The tensor multiplicand
Returns:
The product
|
github-repos
|
def _maybe_cast_inputs(self, inputs, input_list=None):
if not input_list:
input_list = nest.flatten(inputs)
compute_dtype_object = self._compute_dtype_object
should_autocast = self._autocast and compute_dtype_object and compute_dtype_object.is_floating
if should_autocast and any(map(self._should_cast_single_input, input_list)):
return nest.map_structure(self._cast_single_input, inputs)
else:
return inputs
|
Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
input_list: Flat list of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype
|
github-repos
|
def _shannon_radii_from_cn(species_list, cn_roman, radius_to_compare=0):
shannon_radii = []
for s in species_list:
try:
radius = s.get_shannon_radius(cn_roman)
shannon_radii.append({'species': s, 'radius': radius, 'radii_diff': (radius - radius_to_compare)})
except KeyError:
pass
return shannon_radii
|
Utility func to get Shannon radii for a particular coordination number.
As the Shannon radii depends on charge state and coordination number,
species without an entry for a particular coordination number will
be skipped.
Args:
species_list (list): A list of Species to get the Shannon radii for.
cn_roman (str): The coordination number as a roman numeral. See
Specie.get_shannon_radius for more details.
radius_to_compare (float, optional): If set, the data will be returned
with a "radii_diff" key, containing the difference between the
shannon radii and this radius.
Returns:
(list of dict): The Shannon radii for all Species in species. Formatted
as a list of dictionaries, with the keys:
- "species": The species with charge state.
- "radius": The Shannon radius for the species.
- "radius_diff": The difference between the Shannon radius and the
radius_to_compare optional argument.
|
codesearchnet
|
def _merge_with(self, other: 'DynamicRaggedShape') -> 'DynamicRaggedShape':
max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions)
a = self._with_num_row_partitions(max_num_row_partitions)
b = other._with_num_row_partitions(max_num_row_partitions)
new_row_partitions = [rp_a._merge_precomputed_encodings(rp_b) for rp_a, rp_b in zip(a._row_partitions, b._row_partitions)]
new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64
new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape)
new_inner_shape = a._inner_shape
return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, True, new_static_inner_shape)
|
Merge two shapes that are equal modulo num_row_partitions.
The resulting num_row_partitions is the maximum of the two
num_row_partitions.
Args:
other: a DynamicRaggedShape representing the same shape with a possibly
different number of row partitions.
Returns:
A DynamicRaggedShape with the same shape and the maximum of the
num_row_partitions of the two shapes.
|
github-repos
|
def get_interpolated_value(self, x):
if (len(self.ydim) == 1):
return get_linear_interpolated_value(self.x, self.y, x)
else:
return [get_linear_interpolated_value(self.x, self.y[(:, k)], x) for k in range(self.ydim[1])]
|
Returns an interpolated y value for a particular x value.
Args:
x: x value to return the y value for
Returns:
Value of y at x
|
codesearchnet
|
def _MakeTimestamp(self, start=None, end=None):
mysql_unsigned_bigint_max = 18446744073709551615
ts_start = int(start or 0)
if end is None:
ts_end = mysql_unsigned_bigint_max
else:
ts_end = int(end)
if ts_start == 0 and ts_end == mysql_unsigned_bigint_max:
return None
else:
return (ts_start, ts_end)
|
Create a timestamp using a start and end time.
Args:
start: Start timestamp.
end: End timestamp.
Returns:
A tuple (start, end) of converted timestamps or None for all time.
|
juraj-google-style
|
def run_bidirectional_blast(reference, other_genome, dbtype, outdir=''):
if (dbtype == 'nucl'):
command = 'blastn'
elif (dbtype == 'prot'):
command = 'blastp'
else:
raise ValueError('dbtype must be "nucl" or "prot"')
(r_folder, r_name, r_ext) = utils.split_folder_and_path(reference)
(g_folder, g_name, g_ext) = utils.split_folder_and_path(other_genome)
run_makeblastdb(infile=reference, dbtype=dbtype, outdir=r_folder)
run_makeblastdb(infile=other_genome, dbtype=dbtype, outdir=g_folder)
r_vs_g = (((r_name + '_vs_') + g_name) + '_blast.out')
r_vs_g = op.join(outdir, r_vs_g)
if (op.exists(r_vs_g) and (os.stat(r_vs_g).st_size != 0)):
log.debug('{} vs {} BLAST already run'.format(r_name, g_name))
else:
cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, reference, op.join(g_folder, g_name), r_vs_g)
log.debug('Running: {}'.format(cmd))
retval = subprocess.call(cmd, shell=True)
if (retval == 0):
log.debug('BLASTed {} vs {}'.format(g_name, r_name))
else:
log.error('Error running {}, exit code {}'.format(command, retval))
g_vs_r = (((g_name + '_vs_') + r_name) + '_blast.out')
g_vs_r = op.join(outdir, g_vs_r)
if (op.exists(g_vs_r) and (os.stat(g_vs_r).st_size != 0)):
log.debug('{} vs {} BLAST already run'.format(g_name, r_name))
else:
cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, other_genome, op.join(r_folder, r_name), g_vs_r)
log.debug('Running: {}'.format(cmd))
retval = subprocess.call(cmd, shell=True)
if (retval == 0):
log.debug('BLASTed {} vs {}'.format(g_name, r_name))
else:
log.error('Error running {}, exit code {}'.format(command, retval))
return (r_vs_g, g_vs_r)
|
BLAST a genome against another, and vice versa.
This function requires BLAST to be installed, do so by running:
sudo apt install ncbi-blast+
Args:
reference (str): path to "reference" genome, aka your "base strain"
other_genome (str): path to other genome which will be BLASTed to the reference
dbtype (str): "nucl" or "prot" - what format your genome files are in
outdir (str): path to folder where BLAST outputs should be placed
Returns:
Paths to BLAST output files.
(reference_vs_othergenome.out, othergenome_vs_reference.out)
|
codesearchnet
|
def gill_king(mat, eps=1e-16):
if (not scipy.sparse.issparse(mat)):
mat = numpy.asfarray(mat)
assert numpy.allclose(mat, mat.T)
size = mat.shape[0]
mat_diag = mat.diagonal()
gamma = abs(mat_diag).max()
off_diag = abs((mat - numpy.diag(mat_diag))).max()
delta = (eps * max((gamma + off_diag), 1))
beta = numpy.sqrt(max(gamma, (off_diag / size), eps))
lowtri = _gill_king(mat, beta, delta)
return lowtri
|
Gill-King algorithm for modified cholesky decomposition.
Args:
mat (numpy.ndarray):
Must be a non-singular and symmetric matrix. If sparse, the result
will also be sparse.
eps (float):
Error tolerance used in algorithm.
Returns:
(numpy.ndarray):
Lower triangular Cholesky factor.
Examples:
>>> mat = [[4, 2, 1], [2, 6, 3], [1, 3, -.004]]
>>> lowtri = gill_king(mat)
>>> print(numpy.around(lowtri, 4))
[[2. 0. 0. ]
[1. 2.2361 0. ]
[0.5 1.118 1.2264]]
>>> print(numpy.around(numpy.dot(lowtri, lowtri.T), 4))
[[4. 2. 1. ]
[2. 6. 3. ]
[1. 3. 3.004]]
|
codesearchnet
|
def _CreateOutputFileHandles(self, output_type):
gzip_filehandle_parent = tempfile.NamedTemporaryFile(suffix=output_type)
gzip_filehandle = gzip.GzipFile(gzip_filehandle_parent.name, "wb",
self.GZIP_COMPRESSION_LEVEL,
gzip_filehandle_parent)
self.temp_output_trackers[output_type] = TempOutputTracker(
output_type=output_type,
gzip_filehandle=gzip_filehandle,
gzip_filehandle_parent=gzip_filehandle_parent)
return self.temp_output_trackers[output_type]
|
Creates a new gzipped output tempfile for the output type.
We write to JSON data to gzip_filehandle to get compressed data. We hold a
reference to the original filehandle (gzip_filehandle_parent) so we can pass
the gzip data to bigquery.
Args:
output_type: string of export type to be used in filename. e.g.
ExportedFile
Returns:
A TempOutputTracker object
|
juraj-google-style
|
async def info(self):
stat = self._items.stat()
return {'indx': self._items.index(), 'metrics': self._metrics.index(), 'stat': stat}
|
Returns information about the CryoTank instance.
Returns:
dict: A dict containing items and metrics indexes.
|
codesearchnet
|
def compute(self, x_arr, y_arr):
return np.linalg.norm(x_arr - y_arr, axis=-1)
|
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
|
juraj-google-style
|
def _unary_op(cls, x: 'TensorFluent', op: Callable[([tf.Tensor], tf.Tensor)], dtype: tf.DType) -> 'TensorFluent':
x = x.cast(dtype)
t = op(x.tensor)
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(t, scope, batch=batch)
|
Returns a TensorFluent for the unary `op` applied to fluent `x`.
Args:
x: The input fluent.
op: The unary operation.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the unary operator's output.
|
codesearchnet
|
def DownloadPqlResultToList(self, pql_query, values=None):
results = []
self._PageThroughPqlSet(pql_query, results.append, values)
return results
|
Downloads the results of a PQL query to a list.
Args:
pql_query: str a statement filter to apply (the query should not include
the limit or the offset)
[optional]
values: A dict of python objects or a list of raw SOAP values to bind
to the pql_query.
Returns:
a list of lists with the first being the header row and each subsequent
list being a row of results.
|
codesearchnet
|
def is_unknown(input, model_file=None, model_proto=None, name=None):
return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(
input, model_file=model_file, model_proto=model_proto, name=name,
piece_type=0)
|
Returns true if input id is unknown piece.
Args:
input: An arbitrary tensor of int32.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A tensor of bool with the same shape as input.
|
juraj-google-style
|
def get_labels_encoder(self, data_dir):
label_filepath = os.path.join(data_dir, self.vocab_filename)
return text_encoder.TokenTextEncoder(label_filepath)
|
Builds encoder for the given class labels.
Args:
data_dir: data directory
Returns:
An encoder for class labels.
|
juraj-google-style
|
def define_simulation_graph(batch_env, algo_cls, config):
step = tf.Variable(0, False, dtype=tf.int32, name='global_step')
is_training = tf.placeholder(tf.bool, name='is_training')
should_log = tf.placeholder(tf.bool, name='should_log')
do_report = tf.placeholder(tf.bool, name='do_report')
force_reset = tf.placeholder(tf.bool, name='force_reset')
algo = algo_cls(batch_env, step, is_training, should_log, config)
(done, score, summary) = tools.simulate(batch_env, algo, should_log, force_reset)
message = 'Graph contains {} trainable variables.'
tf.logging.info(message.format(tools.count_weights()))
return tools.AttrDict(locals())
|
Define the algorithm and environment interaction.
Args:
batch_env: In-graph environments object.
algo_cls: Constructor of a batch algorithm.
config: Configuration object for the algorithm.
Returns:
Object providing graph elements via attributes.
|
codesearchnet
|
def _build(self, inputs):
if (self._axis is None):
axis = list(range(1, inputs.shape.ndims))
else:
axis = self._axis
original_dtype = inputs.dtype
if (original_dtype in [tf.float16, tf.bfloat16]):
inputs = tf.cast(inputs, tf.float32)
if (inputs.get_shape().ndims < 2):
raise base.NotSupportedError('Layer normalization expects inputs of at least rank 2. Got inputs of rank {}.'.format(inputs.get_shape().ndims))
params_shape = inputs.get_shape()[(- 1):]
if self._scale:
if (self.GAMMA not in self._initializers):
self._initializers[self.GAMMA] = create_gamma_initializer()
self._gamma = tf.get_variable(self.GAMMA, shape=params_shape, dtype=inputs.dtype, initializer=self._initializers[self.GAMMA], partitioner=self._partitioners.get(self.GAMMA), regularizer=self._regularizers.get(self.GAMMA))
else:
self._gamma = None
if self._offset:
if (self.BETA not in self._initializers):
self._initializers[self.BETA] = create_beta_initializer()
self._beta = tf.get_variable(self.BETA, shape=params_shape, dtype=inputs.dtype, initializer=self._initializers[self.BETA], partitioner=self._partitioners.get(self.BETA), regularizer=self._regularizers.get(self.BETA))
else:
self._beta = None
(mean, var) = tf.nn.moments(inputs, axis, keep_dims=True)
normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta, self._gamma, self._eps)
if (original_dtype in [tf.float16, tf.bfloat16]):
normalized = tf.cast(normalized, dtype=original_dtype)
return normalized
|
Connects the LayerNorm module into the graph.
Args:
inputs: a Tensor of dimensionality >= 2.
Returns:
normalized: layer normalized outputs with same shape as inputs.
Raises:
base.NotSupportedError: If `inputs` has less than 2 dimensions.
|
codesearchnet
|
def __init__(self, augmented_graph_view: _AugmentedGraphView, options: save_options.SaveOptions):
self.augmented_graph_view = augmented_graph_view
self.options = options
self._trackable_objects, self.node_paths, self.node_ids, self._slot_variables, self.object_names = checkpoint_util.objects_ids_and_slot_variables_and_paths(self.augmented_graph_view)
untraced_functions = self.augmented_graph_view.untraced_functions
if untraced_functions:
logging.info('Found untraced functions such as %s while saving (showing %d of %d). These functions will not be directly callable after loading.', ', '.join(untraced_functions[:_NUM_DISPLAY_UNTRACED_FUNCTIONS]), min(_NUM_DISPLAY_UNTRACED_FUNCTIONS, len(untraced_functions)), len(untraced_functions))
self._initialize_save_and_restore_functions()
self._initialize_nodes_and_concrete_functions()
self.captured_tensor_node_ids = object_identity.ObjectIdentityDictionary()
|
Initializes a SaveableView.
Args:
augmented_graph_view: A GraphView object.
options: A SaveOptions instance.
|
github-repos
|
def _get_trainable_state(self):
trainable_state = weakref.WeakKeyDictionary()
for layer in self._flatten_layers():
trainable_state[layer] = layer.trainable
return trainable_state
|
Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
|
github-repos
|
def n_choose_k(n, k):
if n == 0:
return 0
return reduce(lambda x, y: x * y[0] / y[1],
zip(range(n - k + 1, n + 1),
range(1, k + 1)), 1)
|
Return the number of combinations for n choose k.
Args:
n (int): the total number of options .
k (int): The number of elements.
Returns:
int: returns the binomial coefficient
|
juraj-google-style
|
def check_secret(self, secret):
try:
return hmac.compare_digest(secret, self.secret)
except AttributeError:
return secret == self.secret
|
Checks if the secret string used in the authentication attempt
matches the "known" secret string. Some mechanisms will override this
method to control how this comparison is made.
Args:
secret: The secret string to compare against what was used in the
authentication attempt.
Returns:
True if the given secret matches the authentication attempt.
|
juraj-google-style
|
def _compute_sum_image(features, max_area_width, max_area_height=1, height=1,
name=None):
with tf.name_scope(name, default_name="compute_sum_image"):
feature_shape = common_layers.shape_list(features)
batch_size = feature_shape[0]
length = feature_shape[-2]
depth = feature_shape[-1]
width = length
features_2d = tf.reshape(features, [batch_size, height, width, depth])
width_cum = tf.cumsum(features_2d, axis=-2, name="compute_integral_h")
integral_image = tf.cumsum(width_cum, axis=-3, name="compute_integral_v")
padded_image = tf.pad(
integral_image, [[0, 0], [1, 0], [1, 0], [0, 0]], constant_values=0)
height_list = []
width_list = []
dst_images = []
src_images_diag = []
src_images_h = []
src_images_v = []
size_tensor = tf.ones_like(padded_image[:, :, :, 0],
dtype=tf.int32)
for area_height in range(max_area_height):
for area_width in range(max_area_width):
dst_images.append(
tf.reshape(
padded_image[:, area_height + 1:, area_width + 1:, :],
[batch_size, -1, depth]))
src_images_diag.append(
tf.reshape(
padded_image[:, :-area_height - 1, :-area_width - 1, :],
[batch_size, -1, depth]))
src_images_h.append(
tf.reshape(
padded_image[:, area_height + 1:, :-area_width - 1, :],
[batch_size, -1, depth]))
src_images_v.append(
tf.reshape(
padded_image[:, :-area_height - 1, area_width + 1:, :],
[batch_size, -1, depth]))
height_list.append(
tf.reshape(
size_tensor[:, area_height + 1:, area_width + 1:] *\
(area_height + 1), [batch_size, -1]))
width_list.append(
tf.reshape(
size_tensor[:, area_height + 1:, area_width + 1:] *\
(area_width + 1), [batch_size, -1]))
sum_image = tf.subtract(
tf.concat(dst_images, axis=1) + tf.concat(src_images_diag, axis=1),
tf.concat(src_images_v, axis=1) + tf.concat(src_images_h, axis=1))
area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2)
area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2)
return sum_image, area_heights, area_widths
|
Computes area sums for features.
Args:
features: a Tensor in a shape of [batch_size, height * width, depth].
max_area_width: the max width allowed for an area.
max_area_height: the max height allowed for an area.
height: the height of the image.
name: the namescope.
Returns:
sum_image: A Tensor of shape [batch_size, num_areas, depth]
area_heights: A Tensor of shape [batch_size, num_areas, 1]
area_widths: A Tensor of shape [batch_size, num_areas, 1]
|
juraj-google-style
|
def labels_to_dataset(labels, label_mode, num_classes):
label_ds = tf.data.Dataset.from_tensor_slices(labels)
if label_mode == 'binary':
label_ds = label_ds.map(lambda x: tf.expand_dims(tf.cast(x, 'float32'), axis=-1), num_parallel_calls=tf.data.AUTOTUNE)
elif label_mode == 'categorical':
label_ds = label_ds.map(lambda x: tf.one_hot(x, num_classes), num_parallel_calls=tf.data.AUTOTUNE)
return label_ds
|
Create a `tf.data.Dataset` from the list/tuple of labels.
Args:
labels: list/tuple of labels to be converted into a `tf.data.Dataset`.
label_mode: String describing the encoding of `labels`. Options are:
- `"binary"` indicates that the labels (there can be only 2) are encoded
as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- `"categorical"` means that the labels are mapped into a categorical
vector. (e.g. for `categorical_crossentropy` loss).
num_classes: number of classes of labels.
Returns:
A `tf.data.Dataset` instance.
|
github-repos
|
def _value_to_pb(value, proto_type):
data_type_pb = getattr(google_dot_protobuf_dot_wrappers__pb2, proto_type)()
ParseDict(value, data_type_pb)
return data_type_pb
|
Convert a value to protobuf. e.g. BoolValue, Int32Value.
Args:
value (dict): A dict that needs to be converted to protobuf.
proto_type (str): The type of the Protobuf.
Returns:
An instance of the specified protobuf.
|
juraj-google-style
|
def __init__(self, value=True, tag=enums.Tags.DEFAULT):
super(Boolean, self).__init__(tag, type=enums.Types.BOOLEAN)
self.logger = logging.getLogger(__name__)
self.value = value
self.length = self.LENGTH
self.validate()
|
Create a Boolean object.
Args:
value (bool): The value of the Boolean. Optional, defaults to True.
tag (Tags): An enumeration defining the tag of the Boolean object.
Optional, defaults to Tags.DEFAULT.
|
juraj-google-style
|
def from_json_file(cls, file_name):
with open(file_name) as json_data:
config = json.load(json_data)
return cls(config)
|
Construct OneViewClient using a json file.
Args:
file_name: json full path.
Returns:
OneViewClient:
|
codesearchnet
|
def _ReadUnionDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):
return self._ReadDataTypeDefinitionWithMembers(definitions_registry, definition_values, data_types.UnionDefinition, definition_name, supports_conditions=False)
|
Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
|
codesearchnet
|
def reserve_ids(self, token, channel, quantity):
quantity = str(quantity)
url = self.url('{}/{}/reserve/{}/'.format(token, channel, quantity))
req = self.remote_utils.get_url(url)
if (req.status_code is not 200):
raise RemoteDataNotFoundError(('Invalid req: ' + req.status_code))
out = req.json()
return [(out[0] + i) for i in range(out[1])]
|
Requests a list of next-available-IDs from the server.
Arguments:
quantity (int): The number of IDs to reserve
Returns:
int[quantity]: List of IDs you've been granted
|
codesearchnet
|
def metrics(self):
collected_metrics = []
for layer in self._flatten_layers():
with layer._metrics_lock:
collected_metrics.extend(layer._metrics)
return collected_metrics
|
List of metrics added using the `add_metric()` API.
Example:
>>> input = tf.keras.layers.Input(shape=(3,))
>>> d = tf.keras.layers.Dense(2)
>>> output = d(input)
>>> d.add_metric(tf.reduce_max(output), name='max')
>>> d.add_metric(tf.reduce_min(output), name='min')
>>> [m.name for m in d.metrics]
['max', 'min']
Returns:
A list of `Metric` objects.
|
github-repos
|
def drop_scored_calls(self, names):
def _remove(calls, names):
d = dict([(k, v) for (k, v) in calls.items() if (k not in names)])
return d
if isinstance(names, str):
names = [names]
output = self.copy()
output['scored_calls'] = output['scored_calls'].apply((lambda x: _remove(x, names)))
return output
|
Take a name or list of scored call names and drop those from the scored calls
Args:
names (list): list of names to drop or a single string name to drop
Returns:
CellDataFrame: The CellDataFrame modified.
|
codesearchnet
|
def find_invalid_filenames(filenames, repository_root):
errors = []
for filename in filenames:
if not os.path.abspath(filename).startswith(repository_root):
errors.append((filename, 'Error: File %s does not belong to '
'repository %s' % (filename, repository_root)))
if not os.path.exists(filename):
errors.append((filename,
'Error: File %s does not exist' % (filename, )))
if os.path.isdir(filename):
errors.append((filename,
'Error: %s is a directory. Directories are'
' not yet supported' % (filename, )))
return errors
|
Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
|
juraj-google-style
|
def get_collection(self, uri=None, filter='', path=''):
if not uri:
uri = self._base_uri
if filter:
filter = self.make_query_filter(filter)
filter = "?" + filter[1:]
uri = "{uri}{path}{filter}".format(uri=uri, path=path, filter=filter)
logger.debug('Get resource collection (uri = %s)' % uri)
response = self._connection.get(uri)
return self.get_members(response)
|
Retrieves a collection of resources.
Use this function when the 'start' and 'count' parameters are not allowed in the GET call.
Otherwise, use get_all instead.
Optional filtering criteria may be specified.
Args:
filter (list or str): General filter/query string.
path (str): path to be added with base URI
Returns:
Collection of the requested resource.
|
juraj-google-style
|
def join(self, basepath, *paths):
if not basepath.startswith(S3FileSystem.S3_PREFIX):
raise ValueError('Basepath %r must be S3 path.' % basepath)
path = basepath
for p in paths:
path = path.rstrip('/') + '/' + p.lstrip('/')
return path
|
Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all of the return nulled components
|
github-repos
|
def truncate(text, length=255):
lines = []
i = 0
while (i < (len(text) - 1)):
try:
lines.append(text[i:(i + length)])
i += length
except IndexError as e:
lines.append(text[i:])
return lines
|
Splits the message into a list of strings of of length `length`
Args:
text (str): The text to be divided
length (int, optional): The length of the chunks of text. \
Defaults to 255.
Returns:
list: Text divided into chunks of length `length`
|
codesearchnet
|
def deprecated_endpoints(*args):
def deprecated_wrapper(func):
if '_tf_deprecated_api_names' in func.__dict__:
raise DeprecatedNamesAlreadySetError(f'Cannot set deprecated names for {func.__name__} to {args}. Deprecated names are already set to {func._tf_deprecated_api_names}.')
func._tf_deprecated_api_names = args
return func
return deprecated_wrapper
|
Decorator for marking endpoints deprecated.
This decorator does not print deprecation messages.
TODO(annarev): eventually start printing deprecation warnings when
@deprecation_endpoints decorator is added.
Args:
*args: Deprecated endpoint names.
Returns:
A function that takes symbol as an argument and adds
_tf_deprecated_api_names to that symbol.
_tf_deprecated_api_names would be set to a list of deprecated
endpoint names for the symbol.
|
github-repos
|
def all_downstreams(self, node):
nodes = [node]
nodes_seen = set()
i = 0
while i < len(nodes):
downstreams = self.downstream(nodes[i])
for downstream_node in downstreams:
if downstream_node not in nodes_seen:
nodes_seen.add(downstream_node)
nodes.append(downstream_node)
i += 1
return [
node_ for node_ in self.topological_sort() if node_ in nodes_seen
]
|
Returns a list of all nodes ultimately downstream
of the given node in the dependency graph, in
topological order.
Args:
node (str): The node whose downstream nodes you want to find.
Returns:
list: A list of nodes that are downstream from the node.
|
juraj-google-style
|
def get_pipeline_path(pipeline_name, working_directory):
logger.debug("starting")
logger.debug(f"current directory is {working_directory}")
pipeline_path = os.path.abspath(os.path.join(
working_directory,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
logger.debug(f"{pipeline_name} not found in current "
"directory/pipelines folder. Looking in pypyr install "
"directory instead.")
pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger.debug(f"pypyr installation directory is: {pypyr_dir}")
pipeline_path = os.path.abspath(os.path.join(
pypyr_dir,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
raise PipelineNotFoundError(f"{pipeline_name}.yaml not found in "
f"either "
f"{working_directory}/pipelines "
f"or {pypyr_dir}/pipelines")
logger.debug("done")
return pipeline_path
|
Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines.
|
juraj-google-style
|
def edit(self, customer_id, data={}, **kwargs):
url = '{}/{}'.format(self.base_url, customer_id)
return self.put_url(url, data, **kwargs)
|
Edit Customer information from given dict
Returns:
Customer Dict which was edited
|
codesearchnet
|
def __init__(self, filepath, eps=10, max_rows=None):
self.eps_timer = itertools.cycle([max(0, delta) for delta in np.random.normal(1.0/float(eps), .5/float(eps), size=1000)])
self.log_reader = bro_log_reader.BroLogReader(filepath, tail=False)
self.max_rows = max_rows
|
Initialization for the LiveSimulator Class
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
|
juraj-google-style
|
def show_fields(self, block=None):
mapping = self._mapping()
if (block is None):
return mapping
elif (block == 'top'):
blocks = set()
for key in mapping.keys():
blocks.add(key.split('.')[0])
block_map = {}
for b in blocks:
block_map[b] = 'object'
else:
block_map = {}
for (key, value) in mapping.items():
if key.startswith(block):
block_map[key] = value
return block_map
|
Retrieve and return the mapping for the given metadata block.
Arguments:
block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``),
or the special values ``None`` for everything or ``"top"`` for just the
top-level fields.
**Default:** ``None``.
index (str): The Search index to map. **Default:** The current index.
Returns:
dict: ``field:datatype`` pairs.
|
codesearchnet
|
def fn(x: list[Union[int, float]], y: Optional[Union[int, str]]=None):
return x
|
Test function
Args:
x: The input
y: Also the input
|
github-repos
|
def add_tile(self, address, tile):
if (address in self._tiles):
raise ArgumentError('Tried to add two tiles at the same address', address=address)
self._tiles[address] = tile
|
Add a tile to handle all RPCs at a given address.
Args:
address (int): The address of the tile
tile (RPCDispatcher): A tile object that inherits from RPCDispatcher
|
codesearchnet
|
def delete_endpoint_config(self, endpoint_config_name):
LOGGER.info('Deleting endpoint configuration with name: {}'.format(endpoint_config_name))
self.sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)
|
Delete an Amazon SageMaker endpoint configuration.
Args:
endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to delete.
|
juraj-google-style
|
def get_schedule_distribution(schedule, global_step=None):
interpolation, steps, pmfs = schedule
if len(pmfs) == 1:
return pmfs[0]
if global_step is None:
global_step = tf.train.get_or_create_global_step()
if interpolation == 'step':
interpolation_fn = step_interpolation
elif interpolation == 'linear':
interpolation_fn = linear_interpolation
else:
raise ValueError('Invalid interpolation strategy: %s' % interpolation)
return tf.reshape(
tf.py_func(
func=lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs)),
inp=[global_step], Tout=tf.float32), [len(pmfs[0])])
|
Computes the pmf of a schedule given the global_step.
Args:
schedule: A schedule tuple, see encode_schedule for details.
global_step: A scalar tensor, the step to query the schedule.
Returns:
A 1-D tensor of probs, the sampling distribution of the global_step.
|
juraj-google-style
|
def rationalize(flt: float, denominators: Set[int] = None) -> Fraction:
if denominators is None:
denominators = _DENOMINATORS
frac = Fraction.from_float(flt).limit_denominator()
if frac.denominator not in denominators:
raise ValueError('Cannot rationalize')
return frac
|
Convert a floating point number to a Fraction with a small
denominator.
Args:
flt: A floating point number
denominators: Collection of standard denominators. Default is
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512,
1024, 2048, 4096, 8192
Raises:
ValueError: If cannot rationalize float
|
juraj-google-style
|
def __call__(self, name, value):
super(ObjectTypeChecker, self).__call__(name, value)
|
Call method.
Args:
name (str): the value's name.
value (str): the value to check.
Raises:
ValueError: if value is not type str.
|
juraj-google-style
|
def history(self, samples=500, pandas=True, stream="default"):
node = "history" if stream == "default" else "events"
query = gql( % node)
response = self._exec(query, samples=samples)
lines = [json.loads(line)
for line in response['project']['run'][node]]
if pandas:
pandas = util.get_module("pandas")
if pandas:
lines = pandas.DataFrame.from_records(lines)
else:
print("Unable to load pandas, call history with pandas=False")
return lines
|
Return history metrics for a run
Args:
samples (int, optional): The number of samples to return
pandas (bool, optional): Return a pandas dataframe
stream (str, optional): "default" for metrics, "system" for machine metrics
|
juraj-google-style
|
def _OpenPathSpec(self, path_specification, ascii_codepage='cp1252'):
if not path_specification:
return None
file_entry = self._file_system.GetFileEntryByPathSpec(path_specification)
if file_entry is None:
return None
file_object = file_entry.GetFileObject()
if file_object is None:
return None
registry_file = dfwinreg_regf.REGFWinRegistryFile(
ascii_codepage=ascii_codepage)
try:
registry_file.Open(file_object)
except IOError as exception:
logger.warning(
'Unable to open Windows Registry file with error: {0!s}'.format(
exception))
file_object.close()
return None
return registry_file
|
Opens the Windows Registry file specified by the path specification.
Args:
path_specification (dfvfs.PathSpec): path specification.
ascii_codepage (Optional[str]): ASCII string codepage.
Returns:
WinRegistryFile: Windows Registry file or None.
|
juraj-google-style
|
def unwrap(tensor):
while isinstance(tensor, (PrettyTensor, Loss)):
tensor = tensor.tensor
return tensor
|
Returns the underlying tensor if tensor is wrapped or tensor.
Args:
tensor: The tensor to unwrap.
Returns:
Tensor or if it is a pretty tensor, the unwrapped version.
Raises:
ValueError: if tensor holds a sequence.
|
codesearchnet
|
def to_dict(self):
return self._base(((key, (value.to_dict() if isinstance(value, AutoDict) else value)) for (key, value) in self.items()))
|
Recursively casts a AutoDict into a regular dictionary. All nested
AutoDict values are also converted.
Returns:
dict: a copy of this dict without autovivification
Example:
>>> from ubelt.util_dict import AutoDict
>>> auto = AutoDict()
>>> auto[1] = 1
>>> auto['n1'] = AutoDict()
>>> static = auto.to_dict()
>>> assert not isinstance(static, AutoDict)
>>> assert not isinstance(static['n1'], AutoDict)
|
codesearchnet
|
def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None):
course_videos = (CourseVideo.objects.select_related('video')
.prefetch_related('video__encoded_videos', 'video__encoded_videos__profile')
.filter(video__encoded_videos__profile__profile_name='youtube')
.order_by('id')
.distinct())
if course_ids:
course_videos = course_videos.filter(course_id__in=course_ids)
course_videos = course_videos.values_list('course_id', 'video__edx_video_id')
if limit is not None and offset is not None:
course_videos = course_videos[offset: offset+limit]
course_videos_with_yt_profile = []
for course_id, edx_video_id in course_videos:
yt_profile = EncodedVideo.objects.filter(
video__edx_video_id=edx_video_id,
profile__profile_name='youtube'
).first()
if yt_profile:
course_videos_with_yt_profile.append((
course_id, edx_video_id, yt_profile.url
))
return course_videos_with_yt_profile
|
Returns a list that contains all the course ids and video ids with the youtube profile
Args:
course_ids (list): valid course ids
limit (int): batch records limit
offset (int): an offset for selecting a batch
Returns:
(list): Tuples of course_id, edx_video_id and youtube video url
|
juraj-google-style
|
def push(self,message,message_type):
super(Producer,self).send(message,message_type)
|
Send a reply message of the given type
Args:
- message: the message to publish
- message_type: the type of message being sent
|
juraj-google-style
|
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
|
Creates a constant tensor.
Args:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
|
github-repos
|
def InitializeDownload(self, http_request, http=None, client=None):
self.EnsureUninitialized()
if ((http is None) and (client is None)):
raise exceptions.UserError('Must provide client or http.')
http = (http or client.http)
if (client is not None):
http_request.url = client.FinalizeTransferUrl(http_request.url)
url = http_request.url
if self.auto_transfer:
end_byte = self.__ComputeEndByte(0)
self.__SetRangeHeader(http_request, 0, end_byte)
response = http_wrapper.MakeRequest((self.bytes_http or http), http_request)
if (response.status_code not in self._ACCEPTABLE_STATUSES):
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if (client is not None):
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
if self.auto_transfer:
self.StreamInChunks()
|
Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
|
codesearchnet
|
def render(self,
trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array],
batch: Optional[int] = None) -> None:
non_fluents, initial_state, states, actions, interms, rewards = trajectories
non_fluents = dict(non_fluents)
states = dict((name, fluent[0]) for name, fluent in states)
actions = dict((name, fluent[0]) for name, fluent in actions)
rewards = rewards[0]
idx = self._compiler.rddl.domain.state_fluent_ordering.index('location/1')
start = initial_state[idx][0]
g = non_fluents['GOAL/1']
path = states['location/1']
deltas = actions['move/1']
centers = non_fluents['DECELERATION_ZONE_CENTER/2']
decays = non_fluents['DECELERATION_ZONE_DECAY/1']
zones = [(x, y, d) for (x, y), d in zip(centers, decays)]
self._ax1 = plt.gca()
self._render_state_space()
self._render_start_and_goal_positions(start, g)
self._render_deceleration_zones(zones)
self._render_state_action_trajectory(start, path, deltas)
plt.title('Navigation', fontweight='bold')
plt.legend(loc='lower right')
plt.show()
|
Render the simulated state-action `trajectories` for Navigation domain.
Args:
stats: Performance statistics.
trajectories: NonFluents, states, actions, interms and rewards.
batch: Number of batches to render.
|
juraj-google-style
|
def from_frozen_graph(cls, graph_def_file, input_arrays, output_arrays, input_shapes=None):
TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_GRAPH_DEF)
with _ops.Graph().as_default():
with _session.Session() as sess:
if not gfile.Exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with gfile.GFile(graph_def_file, 'rb') as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
file_content = file_content.decode('utf-8')
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError("Unable to parse input file '{}'.".format(graph_def_file))
if sys.byteorder == 'big':
bst.swap_tensor_content_in_graph_node(graph_def, 'little', 'big')
load_model_in_session = True
try:
_import_graph_def(graph_def, name='')
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
if not _is_frozen_graph(sess):
raise ValueError('Please freeze the graph using freeze_graph.py.')
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError('input_shapes must be defined for this model.')
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError('input_shapes must contain a value for each item in input_array.')
input_arrays_with_shape = [(name, input_shapes[name]) for name in input_arrays]
return cls(graph_def, input_tensors=None, output_tensors=None, input_arrays_with_shape=input_arrays_with_shape, output_arrays=output_arrays)
|
Creates a TFLiteConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TFLiteConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
|
github-repos
|
def convert_argument(self, arg_name, arg_value):
self._ensure_loaded()
type_name = self.param_type(arg_name)
if (type_name is None):
return arg_value
val = typeinfo.type_system.convert_to_type(arg_value, type_name)
validators = self.annotated_params[arg_name].validators
if (len(validators) == 0):
return val
type_obj = typeinfo.type_system.get_type(type_name)
try:
for (validator_name, extra_args) in validators:
if (not hasattr(type_obj, validator_name)):
raise ValidationError('Could not find validator specified for argument', argument=arg_name, validator_name=validator_name, type=str(type_obj), method=dir(type_obj))
validator = getattr(type_obj, validator_name)
validator(val, *extra_args)
except (ValueError, TypeError) as exc:
raise ValidationError(exc.args[0], argument=arg_name, arg_value=val)
return val
|
Given a parameter with type information, convert and validate it.
Args:
arg_name (str): The name of the argument to convert and validate
arg_value (object): The value to convert and validate
Returns:
object: The converted value.
|
codesearchnet
|
def _compress_url(link):
comment_re = re.compile(r'/comments/([A-Za-z\d]{2,})(?:/[^\s]+/([A-Za-z\d]+))?')
message_re = re.compile(r'/message/messages/([A-Za-z\d]+)')
matches = re.findall(comment_re, link)
if len(matches) == 0:
matches = re.findall(message_re, link)
if len(matches) == 0:
return None
else:
return 'm,' + matches[0]
else:
if matches[0][1] == '':
return 'l,' + matches[0][0]
else:
return 'l,' + matches[0][0] + ',' + matches[0][1]
|
Convert a reddit URL into the short-hand used by usernotes.
Arguments:
link: a link to a comment, submission, or message (str)
Returns a String of the shorthand URL
|
juraj-google-style
|
def rms(x):
try:
return (np.array(x) ** 2).mean() ** 0.5
except:
x = np.array(dropna(x))
invN = 1.0 / len(x)
return (sum(invN * (x_i ** 2) for x_i in x)) ** .5
|
Root Mean Square"
Arguments:
x (seq of float): A sequence of numerical values
Returns:
The square root of the average of the squares of the values
math.sqrt(sum(x_i**2 for x_i in x) / len(x))
or
return (np.array(x) ** 2).mean() ** 0.5
>>> rms([0, 2, 4, 4])
3.0
|
juraj-google-style
|
def set(self, key, val):
self._create_file_if_none_exists()
with open(self.filename, 'r+b') as file_object:
cache_pickle = pickle.load(file_object)
cache_pickle[key] = val
file_object.seek(0)
pickle.dump(cache_pickle, file_object)
|
Sets a value in a key.
Args:
key (str): Key for the value.
val: Value to set.
Returns:
Retrieved value.
|
codesearchnet
|
def isinf(x):
if any_symbolic_tensors((x,)):
return Isinf().symbolic_call(x)
return backend.numpy.isinf(x)
|
Test element-wise for positive or negative infinity.
Args:
x: Input tensor.
Returns:
Output boolean tensor.
|
github-repos
|
def _broadcast_shape_helper(shape_x, shape_y):
broadcasted_dims = reversed(list(itertools.zip_longest(reversed(shape_x.dims), reversed(shape_y.dims), fillvalue=tensor_shape.Dimension(1))))
return_dims = []
for dim_x, dim_y in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
return_dims.append(dim_y)
elif dim_y.value == 1:
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
return_dims.append(dim_x.merge_with(dim_y))
else:
return None
return return_dims
|
Helper functions for is_broadcast_compatible and broadcast_shape.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
Returns None if the shapes are not broadcast compatible,
a list of the broadcast dimensions otherwise.
|
github-repos
|
def create_table(cls, table_name, schema_fields, table_data):
table_schema = bigquery.TableSchema()
for field_def in schema_fields:
field = bigquery.TableFieldSchema()
field.name = field_def[0]
field.type = field_def[1]
if len(field_def) > 2:
field.mode = field_def[2]
if len(field_def) > 3:
for subfield_def in field_def[3]:
subfield = bigquery.TableFieldSchema()
subfield.name = subfield_def[0]
subfield.type = subfield_def[1]
field.fields.append(subfield)
table_schema.fields.append(field)
table = bigquery.Table(tableReference=bigquery.TableReference(projectId=cls.project, datasetId=cls.dataset_id, tableId=table_name), schema=table_schema)
request = bigquery.BigqueryTablesInsertRequest(projectId=cls.project, datasetId=cls.dataset_id, table=table)
cls.bigquery_client.client.tables.Insert(request)
cls.bigquery_client.insert_rows(cls.project, cls.dataset_id, table_name, table_data)
return f'{cls.project}.{cls.dataset_id}.{table_name}'
|
Create a BigQuery table with the specified schema and data.
Args:
table_name: Name of the table to create
schema_fields: List of field definitions in the format:
(name, type, [mode, [subfields]])
table_data: List of dictionaries containing the data to insert
Returns:
Fully qualified table name (project.dataset.table)
|
github-repos
|
def resolve(self, host: str) -> ResolveResult:
_logger.debug(__('Lookup address {0}.', host))
try:
host = self.hook_dispatcher.call(PluginFunctions.resolve_dns, host
) or host
except HookDisconnected:
pass
cache_key = (host, self._family)
if self._cache and cache_key in self._cache:
resolve_result = self._cache[cache_key]
_logger.debug(__('Return by cache {0}.', resolve_result))
if self._rotate:
resolve_result.rotate()
return resolve_result
address_infos = []
dns_infos = []
if not self.dns_python_enabled:
families = ()
elif self._family == IPFamilyPreference.any:
families = (socket.AF_INET, socket.AF_INET6)
elif self._family == IPFamilyPreference.ipv4_only:
families = (socket.AF_INET, )
else:
families = (socket.AF_INET6, )
for family in families:
datetime_now = datetime.datetime.utcnow()
try:
answer = yield from self._query_dns(host, family)
except DNSNotFound:
continue
else:
dns_infos.append(DNSInfo(datetime_now, answer.response.answer))
address_infos.extend(self._convert_dns_answer(answer))
if not address_infos:
if self._family == IPFamilyPreference.any:
family = socket.AF_UNSPEC
elif self._family == IPFamilyPreference.ipv4_only:
family = socket.AF_INET
else:
family = socket.AF_INET6
results = yield from self._getaddrinfo(host, family)
address_infos.extend(self._convert_addrinfo(results))
_logger.debug(__('Resolved addresses: {0}.', address_infos))
resolve_result = ResolveResult(address_infos, dns_infos)
if self._cache:
self._cache[cache_key] = resolve_result
self.event_dispatcher.notify(PluginFunctions.resolve_dns_result, host, resolve_result)
if self._rotate:
resolve_result.shuffle()
return resolve_result
|
Resolve hostname.
Args:
host: Hostname.
Returns:
Resolved IP addresses.
Raises:
DNSNotFound if the hostname could not be resolved or
NetworkError if there was an error connecting to DNS servers.
Coroutine.
|
juraj-google-style
|
def __init__(self, option_strings, dest, copyright_text=None, nargs=None,
**kwargs):
if nargs is not None:
raise ValueError('nargs not allowed for CopyRight')
self.copyright = copyright_text
super(CopyRight, self).__init__(option_strings, dest, nargs=0,
**kwargs)
|
Initialize class and spawn self as Base Class w/o nargs
Args:
option_strings (list): list of str giving command line flags that
call this action
dest (str): namespace reference to value
copyright_text (str): str to print
nargs (str): number of args as special char or int
**kwargs (various): optional arguments to pass to super call
|
juraj-google-style
|
def __init__(self, manager):
self.manager = manager
self._var_cache = dict((k, EnvironmentVariable(k, self))
for k in manager.parent_environ.iterkeys())
|
Creates an `EnvironmentDict`.
Args:
override_existing_lists (bool): If True, the first call to append
or prepend will override the value in `environ` and effectively
act as a setenv operation. If False, pre-existing values will
be appended/prepended to as usual.
|
juraj-google-style
|
def circle(y_true, y_pred, ref_labels=None, ref_embeddings=None, remove_diagonal=True, gamma=80, margin=0.4):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, 'int32')
ref_embeddings = y_pred if ref_embeddings is None else ops.convert_to_tensor(ref_embeddings)
ref_labels = y_true if ref_labels is None else ops.cast(ref_labels, 'int32')
optim_pos = margin
optim_neg = 1 + margin
delta_pos = margin
delta_neg = 1 - margin
pairwise_cosine_distances = 1 - ops.matmul(y_pred, ops.transpose(ref_embeddings))
pairwise_cosine_distances = ops.maximum(pairwise_cosine_distances, 0.0)
positive_mask, negative_mask = build_pos_neg_masks(y_true, ref_labels, remove_diagonal=remove_diagonal)
positive_mask = ops.cast(positive_mask, dtype=pairwise_cosine_distances.dtype)
negative_mask = ops.cast(negative_mask, dtype=pairwise_cosine_distances.dtype)
pos_weights = optim_pos + pairwise_cosine_distances
pos_weights = pos_weights * positive_mask
pos_weights = ops.maximum(pos_weights, 0.0)
neg_weights = optim_neg - pairwise_cosine_distances
neg_weights = neg_weights * negative_mask
neg_weights = ops.maximum(neg_weights, 0.0)
pos_dists = delta_pos - pairwise_cosine_distances
neg_dists = delta_neg - pairwise_cosine_distances
pos_wdists = -1 * gamma * pos_weights * pos_dists
neg_wdists = gamma * neg_weights * neg_dists
p_loss = ops.logsumexp(ops.where(positive_mask, pos_wdists, float('-inf')), axis=1)
n_loss = ops.logsumexp(ops.where(negative_mask, neg_wdists, float('-inf')), axis=1)
circle_loss = ops.softplus(p_loss + n_loss)
backend.set_keras_mask(circle_loss, circle_loss > 0)
return circle_loss
|
Computes the Circle loss.
It is designed to minimize within-class distances and maximize between-class
distances in L2 normalized embedding space.
Args:
y_true: Tensor with ground truth labels in integer format.
y_pred: Tensor with predicted L2 normalized embeddings.
ref_labels: Optional integer tensor with labels for reference
embeddings. If `None`, defaults to `y_true`.
ref_embeddings: Optional tensor with L2 normalized reference embeddings.
If `None`, defaults to `y_pred`.
remove_diagonal: Boolean, whether to remove self-similarities from
positive mask. Defaults to `True`.
gamma: Float, scaling factor for the loss. Defaults to `80`.
margin: Float, relaxation factor for the loss. Defaults to `0.4`.
Returns:
Circle loss value.
|
github-repos
|
def draw_points(self, *points):
point_array = ffi.new('SDL_Point[]', len(points))
for (i, p) in enumerate(points):
point_array[i] = p._ptr[0]
check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points)))
|
Draw multiple points on the current rendering target.
Args:
*points (Point): The points to draw.
Raises:
SDLError: If an error is encountered.
|
codesearchnet
|
def launch(self):
server = self._make_server()
thread = threading.Thread(target=server.serve_forever, name='TensorBoard')
thread.daemon = True
thread.start()
return server.get_url()
|
Python API for launching TensorBoard.
This method is the same as main() except it launches TensorBoard in
a separate permanent thread. The configure() method must be called
first.
Returns:
The URL of the TensorBoard web server.
:rtype: str
|
codesearchnet
|
def _get_parsed_args(command_name, doc, argv):
_LOGGER.debug('Parsing docstring: with arguments %s.', doc, argv)
args = docopt(doc, argv=argv)
if command_name == settings.command:
args[command_name] = True
return args
|
Parse the docstring with docopt.
Args:
command_name: The name of the subcommand to parse.
doc: A docopt-parseable string.
argv: The list of arguments to pass to docopt during parsing.
Returns:
The docopt results dictionary. If the subcommand has the same name as
the primary command, the subcommand value will be added to the
dictionary.
|
juraj-google-style
|
def is_tensor_on_canonical_device(self, tensor_name):
device = self.get_tensor_device(tensor_name)
return not device or device == self.canonical_device
|
Whether the tensor is on the first (canonical) device.
Tensors not assigned to a device are assumed to be on all devices, including
the canonical device.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor is on the first device.
|
juraj-google-style
|
def enroll_user(cls, enterprise_customer, user, course_mode, *course_ids):
(enterprise_customer_user, __) = EnterpriseCustomerUser.objects.get_or_create(enterprise_customer=enterprise_customer, user_id=user.id)
enrollment_client = EnrollmentApiClient()
succeeded = True
for course_id in course_ids:
try:
enrollment_client.enroll_user_in_course(user.username, course_id, course_mode)
except HttpClientError as exc:
if cls.is_user_enrolled(user, course_id, course_mode):
succeeded = True
else:
succeeded = False
default_message = 'No error message provided'
try:
error_message = json.loads(exc.content.decode()).get('message', default_message)
except ValueError:
error_message = default_message
logging.error('Error while enrolling user %(user)s: %(message)s', dict(user=user.username, message=error_message))
if succeeded:
(__, created) = EnterpriseCourseEnrollment.objects.get_or_create(enterprise_customer_user=enterprise_customer_user, course_id=course_id)
if created:
track_enrollment('admin-enrollment', user.id, course_id)
return succeeded
|
Enroll a single user in any number of courses using a particular course mode.
Args:
enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment
user: The user who needs to be enrolled in the course
course_mode: The mode with which the enrollment should be created
*course_ids: An iterable containing any number of course IDs to eventually enroll the user in.
Returns:
Boolean: Whether or not enrollment succeeded for all courses specified
|
codesearchnet
|
def contextual_override(*, cascade: bool=False, override_attrs: bool=False, **variables) -> ContextManager[dict[str, ContextualOverride]]:
vs = {}
for k, v in variables.items():
if not isinstance(v, ContextualOverride):
v = ContextualOverride(v, cascade, override_attrs)
vs[k] = v
return contextual_scope(_global_contextual_overrides, **vs)
|
Context manager to provide contextual values under a scope.
Please be aware that contextual value override are per-thread. If you want
to propagate the contextual value override to other threads, please obtain
a wrapper function for a user function using
`pg.with_contextual_override(func)`.
Args:
cascade: If True, this override will apply to both current scope and nested
scope, meaning that this `pg.contextual_override` will take precedence
over all nested `pg.contextual_override` on the overriden variables.
override_attrs: If True, this override will apply to attributes that already
have values. Otherwise overridden variables will only be used for
contextual attributes whose values are not present.
**variables: Key/values as override for contextual attributes.
Returns:
A dict of attribute names to their contextual overrides.
|
github-repos
|
def get_shared_file(self, sharekey=None):
if not sharekey:
raise Exception("You must specify a sharekey.")
endpoint = '/api/sharedfile/{0}'.format(sharekey)
data = self._make_request('GET', endpoint)
return SharedFile.NewFromJSON(data)
|
Returns a SharedFile object given by the sharekey.
Args:
sharekey (str): Sharekey of the SharedFile you want to retrieve.
Returns:
SharedFile
|
juraj-google-style
|
def verify_ed25519_signature_cmdln(args=None, exception=SystemExit):
args = args or sys.argv[1:]
parser = argparse.ArgumentParser(
description=)
parser.add_argument('--pubkey', help='path to a base64-encoded ed25519 pubkey, optional')
parser.add_argument('file_path')
parser.add_argument('sig_path')
opts = parser.parse_args(args)
log = logging.getLogger('scriptworker')
log.setLevel(logging.DEBUG)
logging.basicConfig()
pubkeys = {}
if opts.pubkey:
pubkeys['cmdln'] = [read_from_file(opts.pubkey)]
pubkeys.update(dict(DEFAULT_CONFIG['ed25519_public_keys']))
contents = read_from_file(opts.file_path, file_type='binary')
signature = read_from_file(opts.sig_path, file_type='binary')
for key_type, seeds in pubkeys.items():
for seed in seeds:
try:
verify_ed25519_signature(
ed25519_public_key_from_string(seed), contents, signature,
"didn't work with {}".format(seed)
)
log.info("Verified good with {} seed {} !".format(
key_type, seed
))
sys.exit(0)
except ScriptWorkerEd25519Error:
pass
raise exception("This is not a valid signature!")
|
Verify an ed25519 signature from the command line.
Args:
args (list, optional): the commandline args to parse. If ``None``, use
``sys.argv[1:]``. Defaults to ``None``.
exception (Exception, optional): the exception to raise on failure.
Defaults to ``SystemExit``.
|
juraj-google-style
|
def visit_indexer(self, indexer: _evaluation.IndexerNode) -> _sql_data_types.Select:
collection_result = self.visit(indexer.collection)
index_result = self.visit(indexer.index)
sql_alias = f'indexed_{collection_result.sql_alias}'
return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(f'element_at(COLLECT_LIST({collection_result.sql_alias}),{index_result.as_operand()} + 1)', collection_result.sql_data_type, _sql_alias=sql_alias), from_part=f'{collection_result.to_subquery()}', sql_dialect=_sql_data_types.SqlDialect.SPARK)
|
Translates a FHIRPath indexer expression to Spark SQL.
Args:
indexer: The `_Indexer` Expression node.
Returns:
A compiled Spark SQL expression.
|
github-repos
|
def Patch(self, request, global_params=None):
config = self.GetMethodConfig('Patch')
return self._RunMethod(config, request, global_params=global_params)
|
Updates a `WorkerPool`.
Args:
request: (CloudbuildProjectsLocationsWorkerPoolsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
|
github-repos
|
def solve(self):
if self.assignments:
return self.assignments
self._complete()
assignments = {var: self._get_nonfalse_values(var) for var in self.variables}
ground_pivots = self.ground_truth.simplify(assignments).extract_pivots(assignments)
for pivot, possible_values in ground_pivots.items():
if pivot in assignments:
assignments[pivot] &= set(possible_values)
something_changed = True
while something_changed:
something_changed = False
and_terms = []
for var in self.variables:
or_terms = []
for value in assignments[var].copy():
implication = self.implications[var][value].simplify(assignments)
if implication is FALSE:
assignments[var].remove(value)
something_changed = True
else:
or_terms.append(implication)
self.implications[var][value] = implication
and_terms.append(Or(or_terms))
d = And(and_terms)
for pivot, possible_values in d.extract_pivots(assignments).items():
if pivot in assignments:
length_before = len(assignments[pivot])
assignments[pivot] &= set(possible_values)
length_after = len(assignments[pivot])
something_changed |= length_before != length_after
self.register_variable = pytd_utils.disabled_function
self.implies = pytd_utils.disabled_function
self.assignments = assignments
return assignments
|
Solve the system of equations.
Returns:
An assignment, mapping strings (variables) to sets of strings (values).
|
github-repos
|
def delete_vm(access_token, subscription_id, resource_group, vm_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachines/', vm_name,
'?api-version=', COMP_API])
return do_delete(endpoint, access_token)
|
Delete a virtual machine.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vm_name (str): Name of the virtual machine.
Returns:
HTTP response.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.